{
  "generated_at": "2026-04-24T15:01:35.625676+00:00",
  "slug": "www-hirescrape-com-api-tools-reddit",
  "title": "Hirescrape \u00b7 Reddit Data Extraction",
  "url": "https://www.hirescrape.com/api/tools/reddit",
  "category": "data",
  "summary": "Extract Reddit content including posts, comment threads, subreddit listings, search results, and ad data programmatically.",
  "seo": {
    "title": "Hirescrape Reddit Scraper API | x402all.com",
    "description": "Scrape Reddit posts, comments, subreddits, and ads via x402 micropayments. 0.07 USDC per call. Base network. No API keys required."
  },
  "use_cases": [
    "Monitor brand mentions and sentiment across Reddit communities",
    "Build datasets of Reddit discussions for NLP training",
    "Track competitor advertising and organic activity on Reddit"
  ],
  "ideal_buyer": "Social listening platforms, market research firms, and AI training data providers needing scalable Reddit extraction without managing scraper infrastructure.",
  "example_prompt": "Scrape the top 100 posts from r/ethereum from the past week with all comments.",
  "example_request_body": {
    "sort": "top",
    "time": "week",
    "limit": 100,
    "action": "scrape_subreddit",
    "subreddit": "ethereum"
  },
  "risk_notes": [],
  "pricing_sanity": {
    "flag": "expensive",
    "ratio": 3.5,
    "median_category_atomic": 20000
  },
  "pricing_review_required": false,
  "pricing_decimal_suspect": false,
  "trust_tier": "indexed_external",
  "accepts": [
    {
      "scheme": "exact",
      "network": "base",
      "pay_to": "0xb5194a98dbdbb7028b585db26b972e7f0f3f826a",
      "asset": "0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913",
      "max_amount_required_atomic": "70000",
      "max_timeout_seconds": 60,
      "verified": false,
      "hints": {
        "input": {
          "body": {
            "type": "object",
            "title": "\u00f0\u009f\u00a4\u0096 Reddit Deep Scraper",
            "required": [
              "action"
            ],
            "properties": {
              "sort": {
                "enum": [
                  "hot",
                  "new",
                  "top",
                  "rising",
                  "controversial",
                  "relevance",
                  "comments"
                ],
                "type": "string",
                "title": "\u00f0\u009f\u0093\u008a Sort Order",
                "default": "hot",
                "prefill": "hot",
                "enumTitles": [
                  "\u00f0\u009f\u0094\u00a5 Hot",
                  "\u00e2\u009c\u00a8 New",
                  "\u00f0\u009f\u008f\u0086 Top",
                  "\u00f0\u009f\u0093\u0088 Rising",
                  "\u00e2\u009a\u00a1 Controversial",
                  "\u00f0\u009f\u008e\u00af Relevance",
                  "\u00f0\u009f\u0092\u00ac Comments"
                ],
                "description": "**How to sort results**\n\nFor Scrape Subreddit:\n\u00e2\u0080\u00a2 \u00f0\u009f\u0094\u00a5 **hot** (trending now)\n\u00e2\u0080\u00a2 \u00e2\u009c\u00a8 **new** (newest first)\n\u00e2\u0080\u00a2 \u00f0\u009f\u008f\u0086 **top** (highest rated)\n\u00e2\u0080\u00a2 \u00f0\u009f\u0093\u0088 **rising** (gaining traction)\n\u00e2\u0080\u00a2 \u00e2\u009a\u00a1 **controversial** (most debated)\n\nFor Search:\n\u00e2\u0080\u00a2 \u00f0\u009f\u008e\u00af **relevance** (best match)\n\u00e2\u0080\u00a2 \u00e2\u009c\u00a8 **new** (newest first)\n\u00e2\u0080\u00a2 \u00f0\u009f\u008f\u0086 **top** (highest rated)\n\u00e2\u0080\u00a2 \u00f0\u009f\u0092\u00ac **comments** (most discussed)",
                "sectionCaption": "\u00e2\u009a\u0099\u00ef\u00b8\u008f Options",
                "sectionDescription": "Fine-tune your scrape (sensible defaults \u00e2\u0080\u0094 change only if needed)"
              },
              "limit": {
                "type": "integer",
                "title": "\u00f0\u009f\u0094\u00a2 Max Results",
                "default": 50,
                "maximum": 500,
                "minimum": 1,
                "prefill": 50,
                "description": "**Maximum number of posts/results to return**\n\n\u00e2\u009a\u00a0\u00ef\u00b8\u008f Higher = more data but slower and costs more\n\u00f0\u009f\u0092\u00b0 Each post with comments costs more than posts only\n\nRecommended:\n\u00e2\u0080\u00a2 \u00f0\u009f\u009f\u00a2 Quick test: 10-25\n\u00e2\u0080\u00a2 \u00f0\u009f\u009f\u00a1 Normal: 50-100\n\u00e2\u0080\u00a2 \u00f0\u009f\u0094\u00b4 Large: 200-500"
              },
              "query": {
                "type": "string",
                "title": "\u00f0\u009f\u0094\u008d Search Query / Question",
                "editor": "textfield",
                "example": "best web scraping tools 2026",
                "description": "**Keyword to search or question to ask**\n\n\u00e2\u009c\u0085 Required for: Search Posts, Search Comments, Find Subreddits, Reddit AI Answers\n\u00e2\u009d\u008c Ignore for: Scrape Subreddit, Fetch Post\n\nExamples:\n\u00e2\u0080\u00a2 `best web scraping tools 2026`\n\u00e2\u0080\u00a2 `how to learn Python`\n\u00e2\u0080\u00a2 `AI automation`"
              },
              "action": {
                "enum": [
                  "scrape_subreddit",
                  "search_posts",
                  "search_comments",
                  "search_subreddits",
                  "fetch_post",
                  "reddit_answers",
                  "ads_search",
                  "ad"
                ],
                "type": "string",
                "title": "\u00f0\u009f\u008e\u00af What do you want to scrape?",
                "default": "scrape_subreddit",
                "prefill": "scrape_subreddit",
                "enumTitles": [
                  "\u00f0\u009f\u0093\u009a Scrape Subreddit (Posts + Comments)",
                  "\u00f0\u009f\u0094\u008d Search Posts by Keyword",
                  "\u00f0\u009f\u0092\u00ac Search Comments by Keyword",
                  "\u00f0\u009f\u008f\u00b7\u00ef\u00b8\u008f Find Subreddits by Keyword",
                  "\u00f0\u009f\u0094\u0097 Fetch Single Post by URL",
                  "\u00f0\u009f\u00a4\u0096 Reddit AI Answers",
                  "\u00f0\u009f\u0093\u00a2 Search Reddit Ads",
                  "\u00f0\u009f\u008e\u00af Ad Detail by ID"
                ],
                "description": "Choose your action \u00e2\u0080\u0094 then scroll down and fill only the fields marked for that action:\n\n\u00e2\u0080\u00a2 **Scrape Subreddit** \u00e2\u0086\u0092 \u00e2\u009c\u0085 Subreddit\n\u00e2\u0080\u00a2 **Search Posts** \u00e2\u0086\u0092 \u00e2\u009c\u0085 Query + \u00e2\u009a\u00a0\u00ef\u00b8\u008f Subreddit (optional)\n\u00e2\u0080\u00a2 **Search Comments** \u00e2\u0086\u0092 \u00e2\u009c\u0085 Query + \u00e2\u009a\u00a0\u00ef\u00b8\u008f Subreddit (optional)\n\u00e2\u0080\u00a2 **Find Subreddits** \u00e2\u0086\u0092 \u00e2\u009c\u0085 Query\n\u00e2\u0080\u00a2 **Fetch Post** \u00e2\u0086\u0092 \u00e2\u009c\u0085 Post URL\n\u00e2\u0080\u00a2 **Reddit AI Answers** \u00e2\u0086\u0092 \u00e2\u009c\u0085 Query",
                "sectionCaption": "\u00f0\u009f\u008e\u00af Action",
                "sectionDescription": "Select what you want to scrape, then fill the relevant fields below"
              },
              "postUrl": {
                "type": "string",
                "title": "\u00f0\u009f\u0094\u0097 Post URL",
                "editor": "textfield",
                "example": "https://www.reddit.com/r/technology/comments/1sdjh66/example/",
                "description": "**Full Reddit post URL**\n\n\u00e2\u009c\u0085 Required for: Fetch Single Post\n\u00e2\u009d\u008c Ignore for: All other actions\n\nExample: `https://www.reddit.com/r/technology/comments/abc123/my_post/`"
              },
              "threads": {
                "type": "integer",
                "title": "\u00e2\u009a\u00a1 Concurrency (Threads)",
                "default": 10,
                "maximum": 20,
                "minimum": 1,
                "prefill": 10,
                "description": "**Number of parallel workers**\n\n\u00f0\u009f\u009f\u00a2 Low (1-5): Slower but safer, less proxy bandwidth\n\u00f0\u009f\u009f\u00a1 Medium (6-10): Balanced speed and reliability\n\u00f0\u009f\u0094\u00b4 High (11-20): Fastest but uses more proxy bandwidth\n\n\u00e2\u009a\u00a0\u00ef\u00b8\u008f Only applies to: **Scrape Subreddit**, **Find Subreddits**\n\u00f0\u009f\u0092\u00a1 Higher = faster but may trigger rate limits"
              },
              "subreddit": {
                "type": "string",
                "title": "\u00f0\u009f\u0093\u009d Subreddit Name",
                "editor": "textfield",
                "example": "technology",
                "prefill": "technology",
                "description": "**Subreddit name without the r/ prefix**\n\n\u00e2\u009c\u0085 Required for: Scrape Subreddit\n\u00e2\u009a\u00a0\u00ef\u00b8\u008f Optional for: Search Posts, Search Comments\n\u00e2\u009d\u008c Ignore for: Find Subreddits, Fetch Post, Reddit AI Answers\n\nExample: `technology` (not `r/technology`)",
                "sectionCaption": "\u00f0\u009f\u008e\u00af Target",
                "sectionDescription": "Fill the target for your action (only ONE of these is needed)"
              },
              "timeFilter": {
                "enum": [
                  "hour",
                  "day",
                  "week",
                  "month",
                  "year",
                  "all"
                ],
                "type": "string",
                "title": "\u00e2\u008f\u00b0 Time Filter",
                "default": "week",
                "prefill": "week",
                "enumTitles": [
                  "\u00f0\u009f\u0095\u0090 Past Hour",
                  "\u00f0\u009f\u0093\u0085 Past Day",
                  "\u00f0\u009f\u0093\u0086 Past Week",
                  "\u00f0\u009f\u0093\u008a Past Month",
                  "\u00f0\u009f\u0097\u0093\u00ef\u00b8\u008f Past Year",
                  "\u00e2\u0099\u00be\u00ef\u00b8\u008f All Time"
                ],
                "description": "**Time range for results**\n\n\u00e2\u009a\u00a0\u00ef\u00b8\u008f Only applies when Sort = **top** or **controversial**\n\nOptions: past hour, day, week, month, year, or all time"
              },
              "includeComments": {
                "type": "boolean",
                "title": "\u00f0\u009f\u0092\u00ac Include Comments",
                "default": false,
                "prefill": false,
                "description": "**Fetch full comment trees for each post**\n\n\u00e2\u009c\u0085 Enabled: Get complete discussions with nested replies\n\u00e2\u009d\u008c Disabled: Posts only (faster, cheaper)\n\n\u00e2\u009a\u00a0\u00ef\u00b8\u008f Only applies to: **Scrape Subreddit**\n\u00f0\u009f\u0092\u00b0 Comments significantly increase cost and time\n\u00f0\u009f\u0093\u008a A post with 500 comments = 500 API calls",
                "groupCaption": "\u00f0\u009f\u0092\u00ac Comment Settings",
                "groupDescription": "Configure comment extraction (only for Scrape Subreddit)"
              },
              "proxyConfiguration": {
                "type": "object",
                "title": "\u00f0\u009f\u008c\u0090 Proxy Configuration",
                "editor": "proxy",
                "default": {
                  "useApifyProxy": true,
                  "apifyProxyGroups": [
                    "RESIDENTIAL"
                  ]
                },
                "prefill": {
                  "useApifyProxy": true,
                  "apifyProxyGroups": [
                    "RESIDENTIAL"
                  ]
                },
                "description": "**Reddit requires residential proxies for best results**\n\n\u00f0\u009f\u0093\u008c **Default Setup (Recommended):**\n\u00e2\u0080\u00a2 \u00e2\u009c\u0085 Use Apify Proxy\n\u00e2\u0080\u00a2 \u00f0\u009f\u008f\u00a0 Residential proxies (best success rate)\n\u00e2\u0080\u00a2 \u00f0\u009f\u0092\u00b0 Costs: $8/GB\n\n\u00f0\u009f\u0092\u00a1 **Alternative:**\n\u00e2\u0080\u00a2 \u00f0\u009f\u008f\u00a2 BUYPROXIES94952 (free datacenter, 5 IPs)\n\u00e2\u0080\u00a2 \u00e2\u009a\u00a0\u00ef\u00b8\u008f Higher chance of Reddit blocks\n\u00e2\u0080\u00a2 \u00f0\u009f\u0092\u00b5 Free but less reliable",
                "sectionCaption": "\u00f0\u009f\u008c\u0090 Proxy",
                "sectionDescription": "Proxy settings (default uses residential for best quality)"
              }
            },
            "description": "Scrape Reddit posts, comments, search results, find subreddits, and query Reddit AI Answers. Pick an action and fill only the relevant fields below.",
            "schemaVersion": 1
          },
          "type": "http",
          "method": "POST",
          "schema": {
            "type": "object",
            "title": "\u00f0\u009f\u00a4\u0096 Reddit Deep Scraper",
            "required": [
              "action"
            ],
            "properties": {
              "sort": {
                "enum": [
                  "hot",
                  "new",
                  "top",
                  "rising",
                  "controversial",
                  "relevance",
                  "comments"
                ],
                "type": "string",
                "title": "\u00f0\u009f\u0093\u008a Sort Order",
                "default": "hot",
                "prefill": "hot",
                "enumTitles": [
                  "\u00f0\u009f\u0094\u00a5 Hot",
                  "\u00e2\u009c\u00a8 New",
                  "\u00f0\u009f\u008f\u0086 Top",
                  "\u00f0\u009f\u0093\u0088 Rising",
                  "\u00e2\u009a\u00a1 Controversial",
                  "\u00f0\u009f\u008e\u00af Relevance",
                  "\u00f0\u009f\u0092\u00ac Comments"
                ],
                "description": "**How to sort results**\n\nFor Scrape Subreddit:\n\u00e2\u0080\u00a2 \u00f0\u009f\u0094\u00a5 **hot** (trending now)\n\u00e2\u0080\u00a2 \u00e2\u009c\u00a8 **new** (newest first)\n\u00e2\u0080\u00a2 \u00f0\u009f\u008f\u0086 **top** (highest rated)\n\u00e2\u0080\u00a2 \u00f0\u009f\u0093\u0088 **rising** (gaining traction)\n\u00e2\u0080\u00a2 \u00e2\u009a\u00a1 **controversial** (most debated)\n\nFor Search:\n\u00e2\u0080\u00a2 \u00f0\u009f\u008e\u00af **relevance** (best match)\n\u00e2\u0080\u00a2 \u00e2\u009c\u00a8 **new** (newest first)\n\u00e2\u0080\u00a2 \u00f0\u009f\u008f\u0086 **top** (highest rated)\n\u00e2\u0080\u00a2 \u00f0\u009f\u0092\u00ac **comments** (most discussed)",
                "sectionCaption": "\u00e2\u009a\u0099\u00ef\u00b8\u008f Options",
                "sectionDescription": "Fine-tune your scrape (sensible defaults \u00e2\u0080\u0094 change only if needed)"
              },
              "limit": {
                "type": "integer",
                "title": "\u00f0\u009f\u0094\u00a2 Max Results",
                "default": 50,
                "maximum": 500,
                "minimum": 1,
                "prefill": 50,
                "description": "**Maximum number of posts/results to return**\n\n\u00e2\u009a\u00a0\u00ef\u00b8\u008f Higher = more data but slower and costs more\n\u00f0\u009f\u0092\u00b0 Each post with comments costs more than posts only\n\nRecommended:\n\u00e2\u0080\u00a2 \u00f0\u009f\u009f\u00a2 Quick test: 10-25\n\u00e2\u0080\u00a2 \u00f0\u009f\u009f\u00a1 Normal: 50-100\n\u00e2\u0080\u00a2 \u00f0\u009f\u0094\u00b4 Large: 200-500"
              },
              "query": {
                "type": "string",
                "title": "\u00f0\u009f\u0094\u008d Search Query / Question",
                "editor": "textfield",
                "example": "best web scraping tools 2026",
                "description": "**Keyword to search or question to ask**\n\n\u00e2\u009c\u0085 Required for: Search Posts, Search Comments, Find Subreddits, Reddit AI Answers\n\u00e2\u009d\u008c Ignore for: Scrape Subreddit, Fetch Post\n\nExamples:\n\u00e2\u0080\u00a2 `best web scraping tools 2026`\n\u00e2\u0080\u00a2 `how to learn Python`\n\u00e2\u0080\u00a2 `AI automation`"
              },
              "action": {
                "enum": [
                  "scrape_subreddit",
                  "search_posts",
                  "search_comments",
                  "search_subreddits",
                  "fetch_post",
                  "reddit_answers",
                  "ads_search",
                  "ad"
                ],
                "type": "string",
                "title": "\u00f0\u009f\u008e\u00af What do you want to scrape?",
                "default": "scrape_subreddit",
                "prefill": "scrape_subreddit",
                "enumTitles": [
                  "\u00f0\u009f\u0093\u009a Scrape Subreddit (Posts + Comments)",
                  "\u00f0\u009f\u0094\u008d Search Posts by Keyword",
                  "\u00f0\u009f\u0092\u00ac Search Comments by Keyword",
                  "\u00f0\u009f\u008f\u00b7\u00ef\u00b8\u008f Find Subreddits by Keyword",
                  "\u00f0\u009f\u0094\u0097 Fetch Single Post by URL",
                  "\u00f0\u009f\u00a4\u0096 Reddit AI Answers",
                  "\u00f0\u009f\u0093\u00a2 Search Reddit Ads",
                  "\u00f0\u009f\u008e\u00af Ad Detail by ID"
                ],
                "description": "Choose your action \u00e2\u0080\u0094 then scroll down and fill only the fields marked for that action:\n\n\u00e2\u0080\u00a2 **Scrape Subreddit** \u00e2\u0086\u0092 \u00e2\u009c\u0085 Subreddit\n\u00e2\u0080\u00a2 **Search Posts** \u00e2\u0086\u0092 \u00e2\u009c\u0085 Query + \u00e2\u009a\u00a0\u00ef\u00b8\u008f Subreddit (optional)\n\u00e2\u0080\u00a2 **Search Comments** \u00e2\u0086\u0092 \u00e2\u009c\u0085 Query + \u00e2\u009a\u00a0\u00ef\u00b8\u008f Subreddit (optional)\n\u00e2\u0080\u00a2 **Find Subreddits** \u00e2\u0086\u0092 \u00e2\u009c\u0085 Query\n\u00e2\u0080\u00a2 **Fetch Post** \u00e2\u0086\u0092 \u00e2\u009c\u0085 Post URL\n\u00e2\u0080\u00a2 **Reddit AI Answers** \u00e2\u0086\u0092 \u00e2\u009c\u0085 Query",
                "sectionCaption": "\u00f0\u009f\u008e\u00af Action",
                "sectionDescription": "Select what you want to scrape, then fill the relevant fields below"
              },
              "postUrl": {
                "type": "string",
                "title": "\u00f0\u009f\u0094\u0097 Post URL",
                "editor": "textfield",
                "example": "https://www.reddit.com/r/technology/comments/1sdjh66/example/",
                "description": "**Full Reddit post URL**\n\n\u00e2\u009c\u0085 Required for: Fetch Single Post\n\u00e2\u009d\u008c Ignore for: All other actions\n\nExample: `https://www.reddit.com/r/technology/comments/abc123/my_post/`"
              },
              "threads": {
                "type": "integer",
                "title": "\u00e2\u009a\u00a1 Concurrency (Threads)",
                "default": 10,
                "maximum": 20,
                "minimum": 1,
                "prefill": 10,
                "description": "**Number of parallel workers**\n\n\u00f0\u009f\u009f\u00a2 Low (1-5): Slower but safer, less proxy bandwidth\n\u00f0\u009f\u009f\u00a1 Medium (6-10): Balanced speed and reliability\n\u00f0\u009f\u0094\u00b4 High (11-20): Fastest but uses more proxy bandwidth\n\n\u00e2\u009a\u00a0\u00ef\u00b8\u008f Only applies to: **Scrape Subreddit**, **Find Subreddits**\n\u00f0\u009f\u0092\u00a1 Higher = faster but may trigger rate limits"
              },
              "subreddit": {
                "type": "string",
                "title": "\u00f0\u009f\u0093\u009d Subreddit Name",
                "editor": "textfield",
                "example": "technology",
                "prefill": "technology",
                "description": "**Subreddit name without the r/ prefix**\n\n\u00e2\u009c\u0085 Required for: Scrape Subreddit\n\u00e2\u009a\u00a0\u00ef\u00b8\u008f Optional for: Search Posts, Search Comments\n\u00e2\u009d\u008c Ignore for: Find Subreddits, Fetch Post, Reddit AI Answers\n\nExample: `technology` (not `r/technology`)",
                "sectionCaption": "\u00f0\u009f\u008e\u00af Target",
                "sectionDescription": "Fill the target for your action (only ONE of these is needed)"
              },
              "timeFilter": {
                "enum": [
                  "hour",
                  "day",
                  "week",
                  "month",
                  "year",
                  "all"
                ],
                "type": "string",
                "title": "\u00e2\u008f\u00b0 Time Filter",
                "default": "week",
                "prefill": "week",
                "enumTitles": [
                  "\u00f0\u009f\u0095\u0090 Past Hour",
                  "\u00f0\u009f\u0093\u0085 Past Day",
                  "\u00f0\u009f\u0093\u0086 Past Week",
                  "\u00f0\u009f\u0093\u008a Past Month",
                  "\u00f0\u009f\u0097\u0093\u00ef\u00b8\u008f Past Year",
                  "\u00e2\u0099\u00be\u00ef\u00b8\u008f All Time"
                ],
                "description": "**Time range for results**\n\n\u00e2\u009a\u00a0\u00ef\u00b8\u008f Only applies when Sort = **top** or **controversial**\n\nOptions: past hour, day, week, month, year, or all time"
              },
              "includeComments": {
                "type": "boolean",
                "title": "\u00f0\u009f\u0092\u00ac Include Comments",
                "default": false,
                "prefill": false,
                "description": "**Fetch full comment trees for each post**\n\n\u00e2\u009c\u0085 Enabled: Get complete discussions with nested replies\n\u00e2\u009d\u008c Disabled: Posts only (faster, cheaper)\n\n\u00e2\u009a\u00a0\u00ef\u00b8\u008f Only applies to: **Scrape Subreddit**\n\u00f0\u009f\u0092\u00b0 Comments significantly increase cost and time\n\u00f0\u009f\u0093\u008a A post with 500 comments = 500 API calls",
                "groupCaption": "\u00f0\u009f\u0092\u00ac Comment Settings",
                "groupDescription": "Configure comment extraction (only for Scrape Subreddit)"
              },
              "proxyConfiguration": {
                "type": "object",
                "title": "\u00f0\u009f\u008c\u0090 Proxy Configuration",
                "editor": "proxy",
                "default": {
                  "useApifyProxy": true,
                  "apifyProxyGroups": [
                    "RESIDENTIAL"
                  ]
                },
                "prefill": {
                  "useApifyProxy": true,
                  "apifyProxyGroups": [
                    "RESIDENTIAL"
                  ]
                },
                "description": "**Reddit requires residential proxies for best results**\n\n\u00f0\u009f\u0093\u008c **Default Setup (Recommended):**\n\u00e2\u0080\u00a2 \u00e2\u009c\u0085 Use Apify Proxy\n\u00e2\u0080\u00a2 \u00f0\u009f\u008f\u00a0 Residential proxies (best success rate)\n\u00e2\u0080\u00a2 \u00f0\u009f\u0092\u00b0 Costs: $8/GB\n\n\u00f0\u009f\u0092\u00a1 **Alternative:**\n\u00e2\u0080\u00a2 \u00f0\u009f\u008f\u00a2 BUYPROXIES94952 (free datacenter, 5 IPs)\n\u00e2\u0080\u00a2 \u00e2\u009a\u00a0\u00ef\u00b8\u008f Higher chance of Reddit blocks\n\u00e2\u0080\u00a2 \u00f0\u009f\u0092\u00b5 Free but less reliable",
                "sectionCaption": "\u00f0\u009f\u008c\u0090 Proxy",
                "sectionDescription": "Proxy settings (default uses residential for best quality)"
              }
            },
            "description": "Scrape Reddit posts, comments, search results, find subreddits, and query Reddit AI Answers. Pick an action and fill only the relevant fields below.",
            "schemaVersion": 1
          },
          "example": {
            "limit": 20,
            "query": "AI agent payments",
            "action": "search_posts"
          },
          "bodyType": "json"
        },
        "output": {
          "type": "json",
          "schema": {
            "type": "object",
            "required": [
              "items"
            ],
            "properties": {
              "items": {
                "type": "array",
                "items": {
                  "type": "object"
                },
                "description": "Scraped items. Shape varies per tool \u00e2\u0080\u0094 see each tool's outputSample for a concrete example."
              },
              "runId": {
                "type": "string",
                "description": "Run ID for debugging/audit."
              },
              "payment": {
                "type": "object",
                "description": "Settlement summary (protocol, amount, currency)."
              },
              "duration": {
                "type": "integer",
                "description": "Run duration in seconds."
              }
            }
          },
          "example": {
            "items": [
              {
                "score": 124,
                "title": "Building AI agents that pay per call",
                "author": "ai_dev",
                "permalink": "/r/MachineLearning/comments/abc123/building_ai_agents/",
                "subreddit": "MachineLearning",
                "created_utc": 1776480000,
                "num_comments": 37
              }
            ],
            "runId": "sc_mo4example",
            "payment": {
              "amount": "0.040000",
              "currency": "USD",
              "protocol": "x402"
            },
            "duration": 4
          }
        }
      }
    }
  ],
  "duplicate_cluster_id": "data-cl-09a755d6df5e",
  "origin": {
    "slug": "www-hirescrape-com",
    "host": "www.hirescrape.com",
    "title": "Hirescrape \u2014 Pay-per-call scraper API for AI agents",
    "description": "Pay-per-call web scrapers for AI agents. 28 tools across Reddit, 8-board job search (LinkedIn \u00b7 Indeed \u00b7 Glassdoor \u00b7 Google Jobs \u00b7 +5), TikTok \u00b7 Douyin \u00b7 Bilibili, cross-platform trend research, social media, and ad libraries. No API keys. Agent wallets settle USDC on Tempo or Base via x402 + MPP.",
    "url": "https://www.hirescrape.com",
    "og_image": "https://www.hirescrape.com/opengraph-image?c645bc0ba1f3236d",
    "favicon": "https://hirescrape.com/favicon.ico"
  },
  "json_ld": {
    "@id": "https://x402all.com/resource/www-hirescrape-com-api-tools-reddit",
    "url": "https://x402all.com/resource/www-hirescrape-com-api-tools-reddit",
    "name": "Hirescrape \u00b7 Reddit Data Extraction",
    "@type": "WebAPI",
    "offers": {
      "url": "https://x402all.com/resource/www-hirescrape-com-api-tools-reddit",
      "@type": "Offer",
      "price": "0.07",
      "availability": "https://schema.org/InStock",
      "priceCurrency": "USDC",
      "priceSpecification": {
        "@type": "UnitPriceSpecification",
        "price": "0.070000",
        "unitText": "call",
        "priceCurrency": "USDC"
      },
      "eligibleCustomerType": "Agent",
      "additionalProperty": [
        {
          "@type": "PropertyValue",
          "name": "paymentNetwork",
          "value": "base"
        },
        {
          "@type": "PropertyValue",
          "name": "paymentAsset",
          "value": "0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913"
        }
      ]
    },
    "sameAs": "https://www.hirescrape.com/api/tools/reddit",
    "@context": "https://schema.org",
    "provider": {
      "@id": "https://x402all.com/server/www-hirescrape-com",
      "url": "https://www.hirescrape.com",
      "name": "Hirescrape \u2014 Pay-per-call scraper API for AI agents",
      "@type": "Organization"
    },
    "identifier": "www-hirescrape-com-api-tools-reddit",
    "description": "Scrape Reddit posts, comments, subreddits, and ads via x402 micropayments. 0.07 USDC per call. Base network. No API keys required.",
    "potentialAction": {
      "@type": "BuyAction",
      "target": "https://axon402.com/test-buy?resource=www-hirescrape-com-api-tools-reddit",
      "description": "Test-buy this endpoint on AXON"
    },
    "applicationCategory": "data"
  },
  "axon_deep_link": "https://axon402.com/test-buy?resource=www-hirescrape-com-api-tools-reddit"
}
