{
  "generated_at": "2026-04-24T15:01:35.625676+00:00",
  "slug": "api-venice-ai-api-v1-chat-completions",
  "title": "Venice AI \u00b7 Chat Completions",
  "url": "https://api.venice.ai/api/v1/chat/completions",
  "category": "ai",
  "summary": "Generate text completions using Venice AI's private, uncensored language models. Returns chat completion choices with streaming support.",
  "seo": {
    "title": "Venice AI Chat Completions API | x402 Pay-Per-Call",
    "description": "Private, uncensored LLM inference via x402 micropayments. Text generation with Venice AI. 10 USDC per call on Base."
  },
  "use_cases": [
    "Generate conversational responses for AI agents",
    "Produce uncensored text content for research applications",
    "Stream real-time completions for interactive interfaces"
  ],
  "ideal_buyer": "AI agent developers and researchers needing private, uncensored LLM inference without subscription overhead.",
  "example_prompt": "Explain the concept of recursive self-improvement in AI systems",
  "example_request_body": {
    "model": "default",
    "stream": false,
    "messages": [
      {
        "role": "user",
        "content": "Explain recursive self-improvement in AI"
      }
    ]
  },
  "risk_notes": [],
  "pricing_sanity": {
    "flag": "expensive_outlier",
    "ratio": 1000,
    "median_category_atomic": 10000
  },
  "pricing_review_required": false,
  "pricing_decimal_suspect": false,
  "trust_tier": "indexed_external",
  "accepts": [
    {
      "scheme": "exact",
      "network": "base",
      "pay_to": "0x2670b922ef37c7df47158725c0cc407b5382293f",
      "asset": "0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913",
      "max_amount_required_atomic": "10000000",
      "max_timeout_seconds": 300,
      "verified": false,
      "hints": {
        "input": {
          "type": "http",
          "method": "POST",
          "bodyFields": {
            "n": {
              "type": "integer",
              "description": "How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs."
            },
            "seed": {
              "type": "integer",
              "description": "The random seed used to generate the response. This is useful for reproducibility."
            },
            "stop": {
              "description": "Up to 4 sequences where the API will stop generating further tokens. Defaults to null."
            },
            "text": {
              "type": "object",
              "properties": {
                "verbosity": {
                  "enum": [
                    "low",
                    "medium",
                    "high",
                    "auto"
                  ],
                  "type": "string",
                  "description": "Controls the verbosity of the text response."
                }
              },
              "description": "OpenAI-compatible text configuration parameter."
            },
            "user": {
              "type": "string",
              "description": "This field is discarded on the request but is supported in the Venice API for compatibility with OpenAI clients."
            },
            "min_p": {
              "type": "number",
              "description": "Sets a minimum probability threshold for token selection. Tokens with probabilities below this value are filtered out."
            },
            "model": {
              "type": "string",
              "required": true,
              "description": "The ID of the model you wish to prompt. May also be a model trait, or a model compatibility mapping. See the models endpoint for a list of models available to you. You can use feature suffixes to enable features from the venice_parameters object. Please see \"Model Feature Suffix\" documentation for more details."
            },
            "store": {
              "type": "boolean",
              "description": "This field is accepted for OpenAI compatibility but is not used by Venice."
            },
            "tools": {
              "type": "array",
              "items": {
                "description": "A tool that can be called by the model. Currently, only functions are supported as tools."
              },
              "description": "A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for."
            },
            "top_k": {
              "type": "integer",
              "description": "The number of highest probability vocabulary tokens to keep for top-k-filtering."
            },
            "top_p": {
              "type": "number",
              "description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered."
            },
            "stream": {
              "type": "boolean",
              "description": "Whether to stream back partial progress. Defaults to false."
            },
            "include": {
              "type": "array",
              "items": {
                "type": "string"
              },
              "description": "OpenAI-compatible parameter specifying additional data to include in the response."
            },
            "logprobs": {
              "type": "boolean",
              "description": "Whether to include log probabilities in the response. This is not supported by all models."
            },
            "max_temp": {
              "type": "number",
              "description": "Maximum temperature value for dynamic temperature scaling."
            },
            "messages": {
              "type": "array",
              "items": {},
              "required": true,
              "description": "A list of messages comprising the conversation so far. Depending on the model you use, different message types (modalities) are supported, like text and images. Non-multimodal models reject image content. For vision models that support multiple images (supportsMultipleImages), images are preserved across all messages in the conversation history. For single-image vision models, only the last image-containing message retains its images."
            },
            "metadata": {
              "type": "object",
              "description": "OpenAI-compatible metadata parameter for request tracking."
            },
            "min_temp": {
              "type": "number",
              "description": "Minimum temperature value for dynamic temperature scaling."
            },
            "reasoning": {
              "type": "object",
              "properties": {
                "effort": {
                  "enum": [
                    "none",
                    "minimal",
                    "low",
                    "medium",
                    "high",
                    "xhigh",
                    "max"
                  ],
                  "type": "string",
                  "description": "Controls the reasoning effort level for supported models. Higher effort means more thorough reasoning but increased token usage. Defaults to the model configuration if not specified."
                },
                "summary": {
                  "enum": [
                    "auto",
                    "concise",
                    "detailed"
                  ],
                  "type": "string",
                  "description": "Controls whether and how the model generates a summary of its reasoning. \"auto\" lets the model decide, \"concise\" requests a brief summary, \"detailed\" requests a thorough summary."
                }
              },
              "description": "Configuration for reasoning behavior on supported models."
            },
            "max_tokens": {
              "type": "integer",
              "description": "The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API. Values of 0 or less are ignored and the model will use its default maximum. This value is now deprecated in favor of max_completion_tokens."
            },
            "temperature": {
              "type": "number",
              "description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both."
            },
            "tool_choice": {},
            "top_logprobs": {
              "type": "integer",
              "description": "The number of highest probability tokens to return for each token position."
            },
            "stop_token_ids": {
              "type": "array",
              "items": {
                "type": "number"
              },
              "description": "Array of token IDs where the API will stop generating further tokens."
            },
            "stream_options": {
              "type": "object",
              "properties": {
                "include_usage": {
                  "type": "boolean",
                  "description": "Whether to include usage information in the stream."
                }
              }
            },
            "response_format": {
              "description": "Format in which the response should be returned."
            },
            "presence_penalty": {
              "type": "number",
              "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."
            },
            "prompt_cache_key": {
              "type": "string",
              "description": "When supplied, this field may be used to optimize conversation routing to improve cache performance and thus reduce latency."
            },
            "reasoning_effort": {
              "enum": [
                "none",
                "minimal",
                "low",
                "medium",
                "high",
                "xhigh",
                "max"
              ],
              "type": "string",
              "description": "OpenAI-compatible parameter to control reasoning effort level for supported models. Takes precedence over reasoning.effort if both are provided."
            },
            "frequency_penalty": {
              "type": "number",
              "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
            },
            "venice_parameters": {
              "type": "object",
              "properties": {
                "enable_e2ee": {
                  "type": "boolean",
                  "description": "Enable end-to-end encryption for E2EE-capable models. When true (default), E2EE is used if E2EE headers are present. When false, the model runs in TEE-only mode even if E2EE headers are present. Only applicable to models with E2EE capability."
                },
                "character_slug": {
                  "type": "string",
                  "description": "The character slug of a public Venice character. Discoverable as the \"Public ID\" on the published character page."
                },
                "enable_x_search": {
                  "type": "boolean",
                  "description": "Enable xAI native search (web + X/Twitter) for supported models. When enabled, the model performs web and X searches server-side instead of Venice search augmentation. Only available on models with supportsXSearch capability (e.g., grok-4-20). Additional per-search charges apply (~$0.01/search)."
                },
                "disable_thinking": {
                  "type": "boolean",
                  "description": "On supported reasoning models, will disable thinking and strip the <think></think> blocks from the response. Defaults to false."
                },
                "enable_web_search": {
                  "enum": [
                    "auto",
                    "off",
                    "on"
                  ],
                  "type": "string",
                  "description": "Enable web search for this request. Defaults to off. On will force web search on the request. Auto will enable it based on the model's discretion. Citations will be returned either in the first chunk of a streaming result, or in the non streaming response."
                },
                "enable_web_scraping": {
                  "type": "boolean",
                  "description": "Enable Venice web scraping of URLs in the latest user message using Firecrawl. Off by default."
                },
                "enable_web_citations": {
                  "type": "boolean",
                  "description": "When web search is enabled, this will request that the LLM cite its sources using a ^index^ or ^i,j^ superscript format (e.g., ^1^). Defaults to false."
                },
                "strip_thinking_response": {
                  "type": "boolean",
                  "description": "Strip <think></think> blocks from the response. Applicable only to reasoning / thinking models. Also available to use as a model feature suffix. Defaults to false."
                },
                "include_venice_system_prompt": {
                  "type": "boolean",
                  "description": "Whether to include the Venice supplied system prompts along side specified system prompts. Defaults to true."
                },
                "include_search_results_in_stream": {
                  "type": "boolean",
                  "description": "Experimental feature - When set to true, the LLM will include search results in the stream as the first emitted chunk. Defaults to false."
                },
                "return_search_results_as_documents": {
                  "type": "boolean",
                  "description": "When set, search results are also surfaced in an OpenAI-compatible tool call named \"venice_web_search_documents\" to ease LangChain consumption."
                }
              },
              "description": "Unique parameters to Venice's API implementation. Customize these to control the behavior of the model."
            },
            "repetition_penalty": {
              "type": "number",
              "description": "The parameter for repetition penalty. 1.0 means no penalty. Values > 1.0 discourage repetition."
            },
            "parallel_tool_calls": {
              "type": "boolean",
              "description": "Whether to enable parallel function calling during tool use."
            },
            "max_completion_tokens": {
              "type": "integer",
              "description": "An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens."
            },
            "prompt_cache_retention": {
              "enum": [
                "default",
                "extended",
                "24h"
              ],
              "type": "string",
              "description": "OpenAI-compatible parameter to control prompt cache retention. \"extended\" or \"24h\" extends retention to 24 hours for supported models."
            }
          },
          "headerFields": {
            "Accept-Encoding": {
              "type": "string",
              "description": "Supported compression encodings (gzip, br). Only applied when stream is false."
            }
          }
        },
        "output": {
          "type": "object",
          "example": {
            "id": "chatcmpl-a81fbc2d81a7a083bb83ccf9f44c6e5e",
            "model": "qwen-2.5-vl",
            "usage": {
              "total_tokens": 758,
              "prompt_tokens": 612,
              "completion_tokens": 146
            },
            "object": "chat.completion",
            "choices": [
              {
                "index": 0,
                "message": {
                  "role": "assistant",
                  "content": "The sky appears blue because of the way Earth's atmosphere scatters sunlight. When sunlight reaches Earth's atmosphere, it is made up of various colors of the spectrum, but blue light waves are shorter and scatter more easily when they hit the gases and particles in the atmosphere. This scattering occurs in all directions, but from our perspective on the ground, it appears as a blue hue that dominates the sky's color. This phenomenon is known as Rayleigh scattering. During sunrise and sunset, the sunlight has to travel further through the atmosphere, which allows more time for the blue light to scatter away from our direct line of sight, leaving the longer wavelengths, such as red, yellow, and orange, to dominate the sky's color.",
                  "tool_calls": []
                },
                "finish_reason": "stop"
              }
            ],
            "created": 1739928524,
            "venice_parameters": {
              "enable_e2ee": true,
              "character_slug": "venice",
              "disable_thinking": true,
              "enable_web_search": "auto",
              "enable_web_scraping": false,
              "enable_web_citations": true,
              "web_search_citations": [],
              "strip_thinking_response": true,
              "include_venice_system_prompt": true,
              "include_search_results_in_stream": false,
              "return_search_results_as_documents": false
            }
          },
          "required": [
            "created",
            "id",
            "model",
            "object",
            "usage"
          ],
          "properties": {
            "id": {
              "type": "string",
              "example": "chatcmpl-abc123",
              "description": "The ID of the request."
            },
            "model": {
              "type": "string",
              "example": "zai-org-glm-4.7",
              "description": "The model id used for the request."
            },
            "usage": {
              "type": "object",
              "required": [
                "completion_tokens",
                "prompt_tokens",
                "total_tokens"
              ],
              "properties": {
                "total_tokens": {
                  "type": "integer",
                  "example": 30,
                  "description": "The total number of tokens used in the request."
                },
                "prompt_tokens": {
                  "type": "integer",
                  "example": 10,
                  "description": "The number of tokens in the prompt."
                },
                "completion_tokens": {
                  "type": "integer",
                  "example": 20,
                  "description": "The number of tokens in the completion."
                },
                "prompt_tokens_details": {
                  "type": "object",
                  "nullable": true,
                  "properties": {
                    "cached_tokens": {
                      "type": "integer",
                      "example": 128,
                      "description": "Number of prompt tokens that were served from cache (cache read) which may result in a discounted rate."
                    },
                    "cache_creation_input_tokens": {
                      "type": "integer",
                      "example": 64,
                      "description": "Number of prompt tokens that were written to cache (cache write). For some providers like Anthropic, this may be charged at a premium rate."
                    }
                  },
                  "description": "Breakdown of prompt tokens. Includes cached_tokens (cache read) and cache_creation_input_tokens (cache write) for models that support context caching."
                },
                "completion_tokens_details": {
                  "type": "object",
                  "nullable": true,
                  "properties": {
                    "reasoning_tokens": {
                      "type": "integer",
                      "example": 32,
                      "description": "Number of completion tokens consumed by reasoning/thinking output when the upstream model reports it."
                    }
                  },
                  "description": "Breakdown of completion tokens for providers that expose reasoning token accounting."
                }
              }
            },
            "object": {
              "enum": [
                "chat.completion"
              ],
              "type": "string",
              "example": "chat.completion",
              "description": "The type of the object returned."
            },
            "choices": {
              "type": "array",
              "items": {
                "type": "object",
                "required": [
                  "finish_reason",
                  "index",
                  "logprobs",
                  "message"
                ],
                "properties": {
                  "index": {
                    "type": "integer",
                    "example": 0,
                    "description": "The index of the choice in the list."
                  },
                  "message": {
                    "anyOf": [
                      {
                        "type": "object",
                        "title": "Assistant Message",
                        "required": [
                          "role"
                        ],
                        "properties": {
                          "name": {
                            "type": "string",
                            "nullable": true
                          },
                          "role": {
                            "enum": [
                              "assistant"
                            ],
                            "type": "string"
                          },
                          "content": {
                            "anyOf": [
                              {
                                "type": "string",
                                "title": "String"
                              },
                              {
                                "type": "array",
                                "items": {
                                  "type": "object",
                                  "title": "text",
                                  "example": {
                                    "text": "Why is the sky blue?",
                                    "type": "text"
                                  },
                                  "required": [
                                    "text",
                                    "type"
                                  ],
                                  "properties": {
                                    "text": {
                                      "type": "string",
                                      "title": "Text Content Object",
                                      "example": "Why is the sky blue?",
                                      "minLength": 1,
                                      "description": "The prompt text of the message. Must be at-least one character in length"
                                    },
                                    "type": {
                                      "enum": [
                                        "text"
                                      ],
                                      "type": "string",
                                      "title": "Text Content String"
                                    },
                                    "cache_control": {
                                      "type": "object",
                                      "title": "Cache Control",
                                      "example": {
                                        "type": "ephemeral"
                                      },
                                      "required": [
                                        "type"
                                      ],
                                      "properties": {
                                        "ttl": {
                                          "type": "string",
                                          "example": "1h",
                                          "description": "Optional TTL for extended cache duration. Beta feature requiring special header."
                                        },
                                        "type": {
                                          "enum": [
                                            "ephemeral"
                                          ],
                                          "type": "string",
                                          "example": "ephemeral",
                                          "description": "The type of cache control. Currently only \"ephemeral\" is supported."
                                        }
                                      },
                                      "description": "Optional cache control for prompt caching on supported providers."
                                    }
                                  },
                                  "description": "Text message type. Supports optional cache_control for prompt caching on supported providers."
                                },
                                "title": "Objects"
                              },
                              {
                                "title": "null",
                                "nullable": true
                              }
                            ]
                          },
                          "tool_calls": {
                            "type": "array",
                            "items": {
                              "nullable": true
                            },
                            "nullable": true
                          },
                          "reasoning_content": {
                            "type": "string",
                            "nullable": true
                          },
                          "reasoning_details": {
                            "type": "array",
                            "items": {
                              "type": "object",
                              "required": [
                                "type"
                              ],
                              "properties": {
                                "id": {
                                  "type": "string"
                                },
                                "data": {
                                  "type": "string"
                                },
                                "text": {
                                  "type": "string"
                                },
                                "type": {
                                  "type": "string"
                                },
                                "index": {
                                  "type": "number"
                                },
                                "format": {
                                  "type": "string"
                                }
                              }
                            },
                            "description": "Reasoning details returned by certain reasoning models that support this feature (e.g., Gemini 3 Pro). Not all reasoning models return this field. For multi-turn conversations with tool calls on supported models, pass back the reasoning_details exactly as received to preserve thought signatures."
                          }
                        },
                        "description": "The assistant message contains the response from the LLM. Must have either content or tool_calls."
                      },
                      {
                        "type": "object",
                        "title": "Tool Message",
                        "required": [
                          "content",
                          "role",
                          "tool_call_id"
                        ],
                        "properties": {
                          "name": {
                            "type": "string",
                            "nullable": true
                          },
                          "role": {
                            "enum": [
                              "tool"
                            ],
                            "type": "string"
                          },
                          "content": {
                            "type": "string"
                          },
                          "tool_calls": {
                            "type": "array",
                            "items": {
                              "nullable": true
                            },
                            "nullable": true
                          },
                          "tool_call_id": {
                            "type": "string"
                          },
                          "reasoning_content": {
                            "type": "string",
                            "nullable": true
                          }
                        },
                        "description": "The tool message is a special message that is used to call a tool. It is not part of the conversation and is not visible to the user."
                      }
                    ]
                  },
                  "logprobs": {
                    "type": "object",
                    "nullable": true,
                    "required": [
                      "logprob",
                      "token"
                    ],
                    "properties": {
                      "bytes": {
                        "type": "array",
                        "items": {
                          "type": "number"
                        },
                        "example": [
                          104,
                          101,
                          108,
                          108,
                          111
                        ],
                        "description": "Raw bytes of the token"
                      },
                      "token": {
                        "type": "string",
                        "example": "hello",
                        "description": "The token string"
                      },
                      "logprob": {
                        "type": "number",
                        "example": -0.34,
                        "description": "The log probability of this token"
                      },
                      "top_logprobs": {
                        "type": "array",
                        "items": {
                          "type": "object",
                          "required": [
                            "logprob",
                            "token"
                          ],
                          "properties": {
                            "bytes": {
                              "type": "array",
                              "items": {
                                "type": "number"
                              }
                            },
                            "token": {
                              "type": "string"
                            },
                            "logprob": {
                              "type": "number"
                            }
                          }
                        },
                        "description": "Top tokens considered with their log probabilities"
                      }
                    }
                  },
                  "stop_reason": {
                    "enum": [
                      "stop",
                      "length"
                    ],
                    "type": "string",
                    "example": "stop",
                    "nullable": true,
                    "description": "The reason the completion stopped."
                  },
                  "finish_reason": {
                    "enum": [
                      "stop",
                      "length",
                      "tool_calls"
                    ],
                    "type": "string",
                    "example": "stop",
                    "description": "The reason the completion finished."
                  }
                }
              },
              "example": [
                {
                  "index": 0,
                  "message": {
                    "role": "assistant",
                    "content": "The sky appears blue because of the way Earth's atmosphere scatters sunlight. When sunlight reaches Earth's atmosphere, it is made up of various colors of the spectrum, but blue light waves are shorter and scatter more easily when they hit the gases and particles in the atmosphere. This scattering occurs in all directions, but from our perspective on the ground, it appears as a blue hue that dominates the sky's color. This phenomenon is known as Rayleigh scattering. During sunrise and sunset, the sunlight has to travel further through the atmosphere, which allows more time for the blue light to scatter away from our direct line of sight, leaving the longer wavelengths, such as red, yellow, and orange, to dominate the sky's color.",
                    "tool_calls": []
                  },
                  "finish_reason": "stop"
                }
              ],
              "description": "A list of chat completion choices. Can be more than one if n is greater than 1. Certain models may not return this field under certain conditions."
            },
            "created": {
              "type": "integer",
              "example": 1677858240,
              "description": "The time at which the request was created."
            },
            "prompt_logprobs": {
              "anyOf": [
                {
                  "title": "null",
                  "nullable": true
                },
                {
                  "type": "object",
                  "additionalProperties": {
                    "nullable": true
                  }
                },
                {
                  "title": "null",
                  "nullable": true
                }
              ],
              "description": "Log probability information for the prompt."
            },
            "venice_parameters": {
              "type": "object",
              "required": [
                "enable_e2ee",
                "enable_web_search",
                "enable_web_citations",
                "enable_web_scraping",
                "include_venice_system_prompt",
                "include_search_results_in_stream",
                "return_search_results_as_documents",
                "strip_thinking_response",
                "disable_thinking"
              ],
              "properties": {
                "enable_e2ee": {
                  "type": "boolean",
                  "example": true,
                  "description": "Did the request enable end-to-end encryption? Only applicable to E2EE-capable models."
                },
                "character_slug": {
                  "type": "string",
                  "example": "venice",
                  "description": "The character slug of a public Venice character."
                },
                "disable_thinking": {
                  "type": "boolean",
                  "example": true,
                  "description": "Did the request disable thinking?"
                },
                "enable_web_search": {
                  "enum": [
                    "auto",
                    "off",
                    "on"
                  ],
                  "type": "string",
                  "example": "auto",
                  "description": "Did the request enable web search?"
                },
                "enable_web_scraping": {
                  "type": "boolean",
                  "example": false,
                  "description": "Did the request enable web scraping of URLs via Firecrawl?"
                },
                "enable_web_citations": {
                  "type": "boolean",
                  "example": true,
                  "description": "Did the request enable web citations?"
                },
                "web_search_citations": {
                  "type": "array",
                  "items": {
                    "type": "object",
                    "required": [
                      "title",
                      "url"
                    ],
                    "properties": {
                      "url": {
                        "type": "string"
                      },
                      "date": {
                        "type": "string"
                      },
                      "title": {
                        "type": "string"
                      },
                      "content": {
                        "type": "string"
                      }
                    }
                  },
                  "example": [
                    {
                      "url": "https://www.skyatnightmagazine.com/space-science/why-is-the-sky-blue",
                      "date": "2024-08-13T13:45:16",
                      "title": "Why is the sky blue? | BBC Sky at Night Magazine",
                      "content": "What&#x27;s the scientific reason behind Earth&#x27;s sky appearing blue to the human eye? And what&#x27;s the real colour of the sky?\n\nSave 30% on the shop price when you subscribe to BBC Sky at Night Magazine today!\n\nIn this article we'll look at the science behind why the sky is blue, or at least why it appears blue to our eyes.\n\nA beautiful blue sky is the sign of a pleasant day ahead. But what makes the sky appear blue?\n\nSo, the sky appears blue because the molecules of nitrogen and oxygen in the atmosphere scatter light in short wavelengths towards the blue end of the visible spectrum."
                    },
                    {
                      "url": "https://theconversation.com/why-is-the-sky-blue-246393",
                      "date": "2025-04-16T16:55:11",
                      "title": "Why is the sky blue?",
                      "content": "It was around 1870 when the British physicist John William Strutt, better known as Lord Rayleigh, first found an explanation for why the sky is blue: Blue light from the Sun is scattered the most when it passes through the atmosphere.\n\nPublished: January 20, 2025 8:34am EST \u00b7 Daniel Freedman, University of Wisconsin-Stout \u00b7 Daniel Freedman \u00b7 Dean of the College of Science, Technology, Engineering, Mathematics & Management, University of Wisconsin-Stout \u00b7\n\nThe answer has to do with molecules.\n\nIt was around 1870 when the British physicist John William Strutt, better known as Lord Rayleigh, first found an explanation for why the sky is blue: Blue light from the Sun is scattered the most when it passes through the atmosphere.\n\nWhen the Sun is near the horizon, its light passes through a lot more of the atmosphere to reach the Earth\u2019s surface than when it is directly overhead. The blue and green light is scattered so well that you can hardly see it. The sky is colored, instead, with red and orange light."
                    }
                  ],
                  "description": "Citations from web search results."
                },
                "strip_thinking_response": {
                  "type": "boolean",
                  "example": true,
                  "description": "Did the request strip thinking response?"
                },
                "include_venice_system_prompt": {
                  "type": "boolean",
                  "example": true,
                  "description": "Did the request include the Venice system prompt?"
                },
                "include_search_results_in_stream": {
                  "type": "boolean",
                  "example": false,
                  "description": "Did the request include search results in the stream?"
                },
                "return_search_results_as_documents": {
                  "type": "boolean",
                  "example": true,
                  "description": "Did the request also return search results as a tool-call documents block?"
                }
              },
              "description": "Unique parameters to Venice's API implementation."
            }
          }
        }
      }
    }
  ],
  "origin": {
    "slug": "api-venice-ai",
    "host": "api.venice.ai",
    "title": "Venice API Docs",
    "description": "Harness the full capabilities of Venice AI with the Venice API, a private and uncensored AI API enabling the development of advanced applications that generate text and images.",
    "url": "https://api.venice.ai",
    "og_image": "https://venice.ai/images/venice_social_preview.png",
    "favicon": "https://docs.venice.ai/mintlify-assets/_mintlify/favicons/veniceai/HJGBlV4jYrSOrFXh/_generated/favicon/favicon-16x16.png"
  },
  "json_ld": {
    "@id": "https://x402all.com/resource/api-venice-ai-api-v1-chat-completions",
    "url": "https://x402all.com/resource/api-venice-ai-api-v1-chat-completions",
    "name": "Venice AI \u00b7 Chat Completions",
    "@type": "WebAPI",
    "offers": {
      "url": "https://x402all.com/resource/api-venice-ai-api-v1-chat-completions",
      "@type": "Offer",
      "price": "10",
      "availability": "https://schema.org/InStock",
      "priceCurrency": "USDC",
      "priceSpecification": {
        "@type": "UnitPriceSpecification",
        "price": "10.000000",
        "unitText": "call",
        "priceCurrency": "USDC"
      },
      "eligibleCustomerType": "Agent",
      "additionalProperty": [
        {
          "@type": "PropertyValue",
          "name": "paymentNetwork",
          "value": "base"
        },
        {
          "@type": "PropertyValue",
          "name": "paymentAsset",
          "value": "0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913"
        }
      ]
    },
    "sameAs": "https://api.venice.ai/api/v1/chat/completions",
    "@context": "https://schema.org",
    "provider": {
      "@id": "https://x402all.com/server/api-venice-ai",
      "url": "https://api.venice.ai",
      "name": "Venice API Docs",
      "@type": "Organization"
    },
    "identifier": "api-venice-ai-api-v1-chat-completions",
    "description": "Private, uncensored LLM inference via x402 micropayments. Text generation with Venice AI. 10 USDC per call on Base.",
    "potentialAction": {
      "@type": "BuyAction",
      "target": "https://axon402.com/test-buy?resource=api-venice-ai-api-v1-chat-completions",
      "description": "Test-buy this endpoint on AXON"
    },
    "applicationCategory": "ai"
  },
  "axon_deep_link": "https://axon402.com/test-buy?resource=api-venice-ai-api-v1-chat-completions"
}
