# Batch Requests

To avoid the overhead of multiple calls, leverage our Async batch processing endpoint `https://async.scraperapi.com/batchjobs`. Instead of sending a single URL, pass an array of URL strings when calling `/batchjobs`.&#x20;

{% tabs %}
{% tab title="cURL" %}

```bash
curl --request POST \
  --url "https://async.scraperapi.com/batchjobs" \
  --header "Content-Type: application/json" \
  --data '{
    "apiKey": "API_KEY", 
    "urls": [
      "https://wikipedia.org/wiki/Cowboy_boot",
      "https://wikipedia.org/wiki/Web_scraping"
    ]
  }'
```

{% endtab %}

{% tab title="Python" %}

```python
import requests

r = requests.post(url='https://async.scraperapi.com/batchjobs',
json={'apiKey': 'API_KEY', # Replace the value for api_key with your actual API Key.
'urls': [
     'https://wikipedia.org/wiki/Cowboy_boot',
     'https://wikipedia.org/wiki/Web_scraping'
        ]
  }
)

print(r.text)
```

{% endtab %}

{% tab title="NodeJS" %}

```javascript
import axios from 'axios';

(async () => {
  const { data } = await axios({
    method: 'POST',
    url: 'https://async.scraperapi.com/batchjobs',
    headers: { 'Content-Type': 'application/json' },
    data: {
      apiKey: 'API_KEY', // Replace API_KEY with your actual API Key.
      urls: [
        'https://wikipedia.org/wiki/Cowboy_boot',
        'https://wikipedia.org/wiki/Web_scraping'
      ]
    }
  });

  console.log(data);
})();
```

{% endtab %}

{% tab title="PHP" %}

```php
<?php
$payload = json_encode([
    "apiKey" => "API_KEY", # Replace API_KEY with your actual API Key.
    "urls"   => [
        "https://wikipedia.org/wiki/Cowboy_boot",
        "https://wikipedia.org/wiki/Web_scraping"
    ]
]);

$ch = curl_init("https://async.scraperapi.com/batchjobs");

curl_setopt($ch, CURLOPT_POST, true);
curl_setopt($ch, CURLOPT_POSTFIELDS, $payload);
curl_setopt($ch, CURLOPT_HTTPHEADER, ["Content-Type: application/json"]);
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);

$response = curl_exec($ch);
curl_close($ch);

print_r($response);
```

{% endtab %}

{% tab title="Ruby" %}

```ruby
require 'net/http'
require 'json'

uri = URI('https://async.scraperapi.com/batchjobs')

website_content = Net::HTTP.post(uri, {
  "apiKey" => "API_KEY", # Replace API_KEY with your actual API Key.
  "urls" => [
    "https://wikipedia.org/wiki/Cowboy_boot",
    "https://wikipedia.org/wiki/Web_scraping"
  ]
}.to_json, "Content-Type" => "application/json")

print(website_content.body)
```

{% endtab %}

{% tab title="Java" %}

```java
import java.io.*;
import java.net.*;
import java.nio.charset.StandardCharsets;

public class Main {
    public static void main(String[] args) {
        try {
            URL url = new URL("https://async.scraperapi.com/batchjobs");
            HttpURLConnection conn = (HttpURLConnection) url.openConnection();
            conn.setRequestMethod("POST");
            conn.setRequestProperty("Content-Type", "application/json");
            conn.setDoOutput(true);
            
            String payload = "{"
                    // Replace API_Key with your actual API Key.
                    + "\"apiKey\": \"API_KEY","
                    + "\"urls\": ["
                    + "\"https://wikipedia.org/wiki/Cowboy_boot\","
                    + "\"https://wikipedia.org/wiki/Web_scraping\""
                    + "]"
                    + "}";

            try (OutputStream os = conn.getOutputStream()) {
                os.write(payload.getBytes(StandardCharsets.UTF_8));
            }

            try (BufferedReader in = new BufferedReader(new InputStreamReader(
                    conn.getResponseCode() == 200 ? conn.getInputStream() : conn.getErrorStream()
            ))) {
                StringBuilder response = new StringBuilder();
                String line;
                while ((line = in.readLine()) != null) response.append(line);
                System.out.println(response);
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
```

{% endtab %}
{% endtabs %}

The response returns one job entry per URL, including its ID and status.

```json
[
  {
    id: '04888c53-e322-4976-969d-8f8b39f016da',
    attempts: 0,
    status: 'running',
    statusUrl: 'https://async.scraperapi.com/jobs/04888c53-e322-4976-969d-8f8b39f016da',
    url: 'https://wikipedia.org/wiki/Cowboy_boot'
  },
  {
    id: '946ada9c-2f57-490b-900a-fa14193ae029',
    attempts: 0,
    status: 'running',
    statusUrl: 'https://async.scraperapi.com/jobs/946ada9c-2f57-490b-900a-fa14193ae029',
    url: 'https://wikipedia.org/wiki/Web_scraping'
  }
]
```

A single batch job can include up to 50 000 URLs. While this is already a large volume that should cover most use cases, it is also the **maximum** allowed per job and cannot be exceeded. This limit helps ensure stability, reliability, and efficient processing. If your workload requires more than 50 000 URLs, we recommend splitting them into multiple batches.


---

# Agent Instructions: Querying This Documentation

If you need additional information that is not directly available in this page, you can query the documentation dynamically by asking a question.

Perform an HTTP GET request on the current page URL with the `ask` query parameter:

```
GET https://docs.scraperapi.com/asynchronous-api/batch-requests.md?ask=<question>
```

The question should be specific, self-contained, and written in natural language.
The response will contain a direct answer to the question and relevant excerpts and sources from the documentation.

Use this mechanism when the answer is not explicitly present in the current page, you need clarification or additional context, or you want to retrieve related documentation sections.
