{"object":"list","data":[{"id":"prime-intellect/intellect-3","canonical_slug":"prime-intellect/intellect-3-20251126","hugging_face_id":"PrimeIntellect/INTELLECT-3-FP8","name":"Prime Intellect: INTELLECT-3","created":1764212534,"description":"INTELLECT-3 is a 106B-parameter Mixture-of-Experts model (12B active) post-trained from GLM-4.5-Air-Base using supervised fine-tuning (SFT) followed by large-scale reinforcement learning (RL). It offers state-of-the-art performance for its size across math,...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000002","completion":"0.0000011"},"top_provider":{"context_length":131072,"max_completion_tokens":131072,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.6,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/prime-intellect/intellect-3-20251126/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.2,"completion":1.1}},{"id":"tngtech/tng-r1t-chimera:free","canonical_slug":"tngtech/tng-r1t-chimera","hugging_face_id":null,"name":"TNG: R1T Chimera (free)","created":1764184161,"description":"TNG-R1T-Chimera is an experimental LLM with a faible for creative storytelling and character interaction. It is a derivate of the original TNG/DeepSeek-R1T-Chimera released in April 2025 and is available exclusively via Chutes and OpenRouter.\n\nCharacteristics and improvements include:\n\nWe think that it has a creative and pleasant personality.\nIt has a preliminary EQ-Bench3 value of about 1305.\nIt is quite a bit more intelligent than the original, albeit a slightly slower.\nIt is much more think-token consistent, i.e. reasoning and answer blocks are properly delineated.\nTool calling is much improved.\n\nTNG Tech, the model authors, ask that users follow the careful guidelines that Microsoft has created for their \"MAI-DS-R1\" DeepSeek-based model. These guidelines are available on Hugging Face (https://huggingface.co/microsoft/MAI-DS-R1).","context_length":163840,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":163840,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"tngtech/tng-r1t-chimera","canonical_slug":"tngtech/tng-r1t-chimera","hugging_face_id":null,"name":"TNG: R1T Chimera","created":1764184161,"description":"TNG-R1T-Chimera is an experimental LLM with a faible for creative storytelling and character interaction. It is a derivate of the original TNG/DeepSeek-R1T-Chimera released in April 2025 and is available exclusively via Chutes and OpenRouter.\n\nCharacteristics and improvements include:\n\nWe think that it has a creative and pleasant personality.\nIt has a preliminary EQ-Bench3 value of about 1305.\nIt is quite a bit more intelligent than the original, albeit a slightly slower.\nIt is much more think-token consistent, i.e. reasoning and answer blocks are properly delineated.\nTool calling is much improved.\n\nTNG Tech, the model authors, ask that users follow the careful guidelines that Microsoft has created for their \"MAI-DS-R1\" DeepSeek-based model. These guidelines are available on Hugging Face (https://huggingface.co/microsoft/MAI-DS-R1).","context_length":163840,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000025","completion":"0.00000085","input_cache_read":"0.000000125"},"top_provider":{"context_length":163840,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.25,"completion":0.85}},{"id":"anthropic/claude-opus-4.5","canonical_slug":"anthropic/claude-4.5-opus-20251124","hugging_face_id":"","name":"Anthropic: Claude Opus 4.5","created":1764010580,"description":"Claude Opus 4.5 is Anthropic’s frontier reasoning model optimized for complex software engineering, agentic workflows, and long-horizon computer use. It offers strong multimodal capabilities, competitive performance across real-world coding and reasoning benchmarks, and improved robustness to prompt injection. The model is designed to operate efficiently across varied effort levels, enabling developers to trade off speed, depth, and token usage depending on task requirements. It comes with a new parameter to control token efficiency, which can be accessed using the OpenRouter Verbosity parameter with low, medium, or high.\r\n\r\nOpus 4.5 supports advanced tool use, extended context management, and coordinated multi-agent setups, making it well-suited for autonomous research, debugging, multi-step planning, and spreadsheet/browser manipulation. It delivers substantial gains in structured reasoning, execution reliability, and alignment compared to prior Opus generations, while reducing token overhead and improving performance on long-running tasks.","context_length":200000,"architecture":{"modality":"text+image+file->text","input_modalities":["file","image","text"],"output_modalities":["text"],"tokenizer":"Claude","instruct_type":null},"pricing":{"prompt":"0.000005","completion":"0.000025","web_search":"0.01","input_cache_read":"0.0000005","input_cache_write":"0.00000625"},"top_provider":{"context_length":200000,"max_completion_tokens":64000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","verbosity"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/anthropic/claude-4.5-opus-20251124/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":5.0,"completion":25.0}},{"id":"openrouter/bert-nebulon-alpha","canonical_slug":"openrouter/bert-nebulon-alpha","hugging_face_id":"","name":"Bert-Nebulon Alpha","created":1764005058,"description":"This is a cloaked model provided to the community to gather feedback. A general-purpose multimodal model (text/image in, text out) designed for reliability, long-context comprehension, and adaptive logic. It is engineered for production-grade assistants, retrieval-augmented systems, science workloads, and complex agentic workflows.\n\n**Note:** All prompts and completions for this model are logged by the provider and may be used to improve the model.","context_length":256000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":256000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","response_format","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":0.0645,"top_p":null,"frequency_penalty":null},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"allenai/olmo-3-32b-think","canonical_slug":"allenai/olmo-3-32b-think-20251121","hugging_face_id":"allenai/Olmo-3-32B-Think","name":"AllenAI: Olmo 3 32B Think","created":1763758276,"description":"Olmo 3 32B Think is a large-scale, 32-billion-parameter model purpose-built for deep reasoning, complex logic chains and advanced instruction-following scenarios. Its capacity enables strong performance on demanding evaluation tasks and...","context_length":65536,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000015","completion":"0.0000005"},"top_provider":{"context_length":65536,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"],"default_parameters":{"temperature":0.6,"top_p":0.95,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/allenai/olmo-3-32b-think-20251121/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.15,"completion":0.5}},{"id":"allenai/olmo-3-7b-instruct","canonical_slug":"allenai/olmo-3-7b-instruct-20251121","hugging_face_id":"allenai/Olmo-3-7B-Instruct","name":"AllenAI: Olmo 3 7B Instruct","created":1763758273,"description":"Olmo 3 7B Instruct is a supervised instruction-fine-tuned variant of the Olmo 3 7B base model, optimized for instruction-following, question-answering, and natural conversational dialogue. By leveraging high-quality instruction data and an open training pipeline, it delivers strong performance across everyday NLP tasks while remaining accessible and easy to integrate. Developed by Ai2 under the Apache 2.0 license, the model offers a transparent, community-friendly option for instruction-driven applications.","context_length":65536,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000001","completion":"0.0000002"},"top_provider":{"context_length":65536,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"],"default_parameters":{"temperature":0.6,"top_p":0.95,"frequency_penalty":null},"expiration_date":"2026-03-23","object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.1,"completion":0.2}},{"id":"allenai/olmo-3-7b-think","canonical_slug":"allenai/olmo-3-7b-think-20251121","hugging_face_id":"allenai/Olmo-3-7B-Think","name":"AllenAI: Olmo 3 7B Think","created":1763758270,"description":"Olmo 3 7B Think is a research-oriented language model in the Olmo family designed for advanced reasoning and instruction-driven tasks. It excels at multi-step problem solving, logical inference, and maintaining coherent conversational context. Developed by Ai2 under the Apache 2.0 license, Olmo 3 7B Think supports transparent, fully open experimentation and provides a lightweight yet capable foundation for academic research and practical NLP workflows.","context_length":65536,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000012","completion":"0.0000002"},"top_provider":{"context_length":65536,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"],"default_parameters":{"temperature":0.6,"top_p":0.95,"frequency_penalty":null},"expiration_date":"2026-03-23","object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.12,"completion":0.2}},{"id":"google/gemini-3-pro-image-preview","canonical_slug":"google/gemini-3-pro-image-preview-20251120","hugging_face_id":"","name":"Google: Nano Banana Pro (Gemini 3 Pro Image Preview)","created":1763653797,"description":"Nano Banana Pro is Google’s most advanced image-generation and editing model, built on Gemini 3 Pro. It extends the original Nano Banana with significantly improved multimodal reasoning, real-world grounding, and high-fidelity visual synthesis. The model generates context-rich graphics, from infographics and diagrams to cinematic composites, and can incorporate real-time information via Search grounding.\r\n\r\nIt offers industry-leading text rendering in images (including long passages and multilingual layouts), consistent multi-image blending, and accurate identity preservation across up to five subjects. Nano Banana Pro adds fine-grained creative controls such as localized edits, lighting and focus adjustments, camera transformations, and support for 2K/4K outputs and flexible aspect ratios. It is designed for professional-grade design, product visualization, storyboarding, and complex multi-element compositions while remaining efficient for general image creation workflows.","context_length":65536,"architecture":{"modality":"text+image->text+image","input_modalities":["image","text"],"output_modalities":["image","text"],"tokenizer":"Gemini","instruct_type":null},"pricing":{"prompt":"0.000002","completion":"0.000012","image":"0.000002","audio":"0.000002","web_search":"0.014","internal_reasoning":"0.000012","input_cache_read":"0.0000002","input_cache_write":"0.000000375"},"top_provider":{"context_length":65536,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/google/gemini-3-pro-image-preview-20251120/endpoints"},"object":"model","owned_by":"proxy","type":"image","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"x-ai/grok-4.1-fast:free","canonical_slug":"x-ai/grok-4.1-fast","hugging_face_id":"","name":"xAI: Grok 4.1 Fast (free)","created":1763587502,"description":"Grok 4.1 Fast is xAI's best agentic tool calling model that shines in real-world use cases like customer support and deep research. 2M context window.\n\nReasoning can be enabled/disabled using the `reasoning` `enabled` parameter in the API. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#controlling-reasoning-tokens)","context_length":2000000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Grok","instruct_type":null},"pricing":{"prompt":"0","completion":"0","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":2000000,"max_completion_tokens":30000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","logprobs","max_tokens","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{"temperature":0.7,"top_p":0.95,"frequency_penalty":null},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"google/gemini-3-pro-preview","canonical_slug":"google/gemini-3-pro-preview-20251117","hugging_face_id":"","name":"Google: Gemini 3 Pro Preview","created":1763474668,"description":"Gemini 3 Pro is Google’s flagship frontier model for high-precision multimodal reasoning, combining strong performance across text, image, video, audio, and code with a 1M-token context window. Reasoning Details must be preserved when using multi-turn tool calling, see our docs here: https://openrouter.ai/docs/use-cases/reasoning-tokens#preserving-reasoning-blocks. It delivers state-of-the-art benchmark results in general reasoning, STEM problem solving, factual QA, and multimodal understanding, including leading scores on LMArena, GPQA Diamond, MathArena Apex, MMMU-Pro, and Video-MMMU. Interactions emphasize depth and interpretability: the model is designed to infer intent with minimal prompting and produce direct, insight-focused responses.\r\n\r\nBuilt for advanced development and agentic workflows, Gemini 3 Pro provides robust tool-calling, long-horizon planning stability, and strong zero-shot generation for complex UI, visualization, and coding tasks. It excels at agentic coding (SWE-Bench Verified, Terminal-Bench 2.0), multimodal analysis, and structured long-form tasks such as research synthesis, planning, and interactive learning experiences. Suitable applications include autonomous agents, coding assistants, multimodal analytics, scientific reasoning, and high-context information processing.","context_length":1048576,"architecture":{"modality":"text+image+file+audio+video->text","input_modalities":["text","image","file","audio","video"],"output_modalities":["text"],"tokenizer":"Gemini","instruct_type":null},"pricing":{"prompt":"0.000002","completion":"0.000012","image":"0.000002","audio":"0.000002","internal_reasoning":"0.000012","input_cache_read":"0.0000002","input_cache_write":"0.000000375"},"top_provider":{"context_length":1048576,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"expiration_date":"2026-03-26","object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.0,"completion":12.0}},{"id":"deepcogito/cogito-v2.1-671b","canonical_slug":"deepcogito/cogito-v2.1-671b-20251118","hugging_face_id":"","name":"Deep Cogito: Cogito v2.1 671B","created":1763071233,"description":"Cogito v2.1 671B MoE represents one of the strongest open models globally, matching performance of frontier closed and open models. This model is trained using self play with reinforcement learning...","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000125","completion":"0.00000125"},"top_provider":{"context_length":128000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","stop","structured_outputs","temperature","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/deepcogito/cogito-v2.1-671b-20251118/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.25,"completion":1.25}},{"id":"openai/gpt-5.1","canonical_slug":"openai/gpt-5.1-20251113","hugging_face_id":"","name":"OpenAI: GPT-5.1","created":1763060305,"description":"GPT-5.1 is the latest frontier-grade model in the GPT-5 series, offering stronger general-purpose reasoning, improved instruction adherence, and a more natural conversational style compared to GPT-5. It uses adaptive reasoning...","context_length":400000,"architecture":{"modality":"text+image+file->text","input_modalities":["image","text","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00000125","completion":"0.00001","input_cache_read":"0.00000013"},"top_provider":{"context_length":400000,"max_completion_tokens":128000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5.1-20251113/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.25,"completion":10.0}},{"id":"openai/gpt-5.1-chat","canonical_slug":"openai/gpt-5.1-chat-20251113","hugging_face_id":"","name":"OpenAI: GPT-5.1 Chat","created":1763060302,"description":"GPT-5.1 Chat (AKA Instant is the fast, lightweight member of the 5.1 family, optimized for low-latency chat while retaining strong general intelligence. It uses adaptive reasoning to selectively “think” on harder queries, improving accuracy on math, coding, and multi-step tasks without slowing down typical conversations. The model is warmer and more conversational by default, with better instruction following and more stable short-form reasoning. GPT-5.1 Chat is designed for high-throughput, interactive workloads where responsiveness and consistency matter more than deep deliberation.","context_length":128000,"architecture":{"modality":"text+image+file->text","input_modalities":["file","image","text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00000125","completion":"0.00001","web_search":"0.01","input_cache_read":"0.000000125"},"top_provider":{"context_length":128000,"max_completion_tokens":16384,"is_moderated":true},"per_request_limits":null,"supported_parameters":["max_completion_tokens","max_tokens","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5.1-chat-20251113/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.25,"completion":10.0}},{"id":"openai/gpt-5.1-codex","canonical_slug":"openai/gpt-5.1-codex-20251113","hugging_face_id":"","name":"OpenAI: GPT-5.1-Codex","created":1763060298,"description":"GPT-5.1-Codex is a specialized version of GPT-5.1 optimized for software engineering and coding workflows. It is designed for both interactive development sessions and long, independent execution of complex engineering tasks....","context_length":400000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00000125","completion":"0.00001","input_cache_read":"0.000000125"},"top_provider":{"context_length":400000,"max_completion_tokens":128000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5.1-codex-20251113/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.25,"completion":10.0}},{"id":"openai/gpt-5.1-codex-mini","canonical_slug":"openai/gpt-5.1-codex-mini-20251113","hugging_face_id":"","name":"OpenAI: GPT-5.1-Codex-Mini","created":1763057820,"description":"GPT-5.1-Codex-Mini is a smaller and faster version of GPT-5.1-Codex","context_length":400000,"architecture":{"modality":"text+image->text","input_modalities":["image","text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00000025","completion":"0.000002","input_cache_read":"0.00000003"},"top_provider":{"context_length":400000,"max_completion_tokens":128000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5.1-codex-mini-20251113/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.25,"completion":2.0}},{"id":"kwaipilot/kat-coder-pro:free","canonical_slug":"kwaipilot/kat-coder-pro-v1","hugging_face_id":"","name":"Kwaipilot: KAT-Coder-Pro V1 (free)","created":1762745912,"description":"KAT-Coder-Pro V1 is KwaiKAT's most advanced agentic coding model in the KAT-Coder series. Designed specifically for agentic coding tasks, it excels in real-world software engineering scenarios, achieving 73.4% solve rate on the SWE-Bench Verified benchmark. \n\nThe model has been optimized for tool-use capability, multi-turn interaction, instruction following, generalization, and comprehensive capabilities through a multi-stage training process, including mid-training, supervised fine-tuning (SFT), reinforcement fine-tuning (RFT), and scalable agentic RL.","context_length":256000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":256000,"max_completion_tokens":128000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"moonshotai/kimi-linear-48b-a3b-instruct","canonical_slug":"moonshotai/kimi-linear-48b-a3b-instruct-20251029","hugging_face_id":"moonshotai/Kimi-Linear-48B-A3B-Instruct","name":"MoonshotAI: Kimi Linear 48B A3B Instruct","created":1762565833,"description":"Kimi Linear is a hybrid linear attention architecture that outperforms traditional full attention methods across various contexts, including short, long, and reinforcement learning (RL) scaling regimes. At its core is Kimi Delta Attention (KDA)—a refined version of Gated DeltaNet that introduces a more efficient gating mechanism to optimize the use of finite-state RNN memory.\n\nKimi Linear achieves superior performance and hardware efficiency, especially for long-context tasks. It reduces the need for large KV caches by up to 75% and boosts decoding throughput by up to 6x for contexts as long as 1M tokens.","context_length":1048576,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000007","completion":"0.0000009","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":1048576,"max_completion_tokens":1048576,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.7,"completion":0.9}},{"id":"moonshotai/kimi-k2-thinking","canonical_slug":"moonshotai/kimi-k2-thinking-20251106","hugging_face_id":"moonshotai/Kimi-K2-Thinking","name":"MoonshotAI: Kimi K2 Thinking","created":1762440622,"description":"Kimi K2 Thinking is Moonshot AI’s most advanced open reasoning model to date, extending the K2 series into agentic, long-horizon reasoning. Built on the trillion-parameter Mixture-of-Experts (MoE) architecture introduced in...","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000006","completion":"0.0000025","input_cache_read":"0.00000015"},"top_provider":{"context_length":262144,"max_completion_tokens":262144,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/moonshotai/kimi-k2-thinking-20251106/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.6,"completion":2.5}},{"id":"amazon/nova-premier-v1","canonical_slug":"amazon/nova-premier-v1","hugging_face_id":"","name":"Amazon: Nova Premier 1.0","created":1761950332,"description":"Amazon Nova Premier is the most capable of Amazon’s multimodal models for complex reasoning tasks and for use as the best teacher for distilling custom models.","context_length":1000000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Nova","instruct_type":null},"pricing":{"prompt":"0.0000025","completion":"0.0000125","input_cache_read":"0.000000625"},"top_provider":{"context_length":1000000,"max_completion_tokens":32000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["max_tokens","stop","temperature","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/amazon/nova-premier-v1/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.5,"completion":12.5}},{"id":"perplexity/sonar-pro-search","canonical_slug":"perplexity/sonar-pro-search","hugging_face_id":"","name":"Perplexity: Sonar Pro Search","created":1761854366,"description":"Exclusively available on the OpenRouter API, Sonar Pro's new Pro Search mode is Perplexity's most advanced agentic search system. It is designed for deeper reasoning and analysis. Pricing is based...","context_length":200000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.000003","completion":"0.000015","web_search":"0.018"},"top_provider":{"context_length":200000,"max_completion_tokens":8000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","structured_outputs","temperature","top_k","top_p","web_search_options"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/perplexity/sonar-pro-search/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":3.0,"completion":15.0}},{"id":"mistralai/voxtral-small-24b-2507","canonical_slug":"mistralai/voxtral-small-24b-2507","hugging_face_id":"mistralai/Voxtral-Small-24B-2507","name":"Mistral: Voxtral Small 24B 2507","created":1761835144,"description":"Voxtral Small is an enhancement of Mistral Small 3, incorporating state-of-the-art audio input capabilities while retaining best-in-class text performance. It excels at speech transcription, translation and audio understanding. Input audio...","context_length":32000,"architecture":{"modality":"text+audio->text","input_modalities":["text","audio"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.0000001","completion":"0.0000003","audio":"0.0001","input_cache_read":"0.00000001"},"top_provider":{"context_length":32000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.2,"top_p":0.95,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/mistralai/voxtral-small-24b-2507/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.1,"completion":0.3}},{"id":"openai/gpt-oss-safeguard-20b","canonical_slug":"openai/gpt-oss-safeguard-20b","hugging_face_id":"openai/gpt-oss-safeguard-20b","name":"OpenAI: gpt-oss-safeguard-20b","created":1761752836,"description":"gpt-oss-safeguard-20b is a safety reasoning model from OpenAI built upon gpt-oss-20b. This open-weight, 21B-parameter Mixture-of-Experts (MoE) model offers lower latency for safety tasks like content classification, LLM filtering, and trust...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.000000075","completion":"0.0000003","input_cache_read":"0.000000037"},"top_provider":{"context_length":131072,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-oss-safeguard-20b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.075,"completion":0.3}},{"id":"nvidia/nemotron-nano-12b-v2-vl:free","canonical_slug":"nvidia/nemotron-nano-12b-v2-vl","hugging_face_id":"nvidia/NVIDIA-Nemotron-Nano-12B-v2-VL-BF16","name":"NVIDIA: Nemotron Nano 12B 2 VL (free)","created":1761675565,"description":"NVIDIA Nemotron Nano 2 VL is a 12-billion-parameter open multimodal reasoning model designed for video understanding and document intelligence. It introduces a hybrid Transformer-Mamba architecture, combining transformer-level accuracy with Mamba’s...","context_length":128000,"architecture":{"modality":"text+image+video->text","input_modalities":["image","text","video"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":128000,"max_completion_tokens":128000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","seed","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/nvidia/nemotron-nano-12b-v2-vl/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"nvidia/nemotron-nano-12b-v2-vl","canonical_slug":"nvidia/nemotron-nano-12b-v2-vl","hugging_face_id":"nvidia/NVIDIA-Nemotron-Nano-12B-v2-VL-BF16","name":"NVIDIA: Nemotron Nano 12B 2 VL","created":1761675565,"description":"NVIDIA Nemotron Nano 2 VL is a 12-billion-parameter open multimodal reasoning model designed for video understanding and document intelligence. It introduces a hybrid Transformer-Mamba architecture, combining transformer-level accuracy with Mamba’s...","context_length":131072,"architecture":{"modality":"text+image+video->text","input_modalities":["image","text","video"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000002","completion":"0.0000006"},"top_provider":{"context_length":131072,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","temperature","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":"2026-05-07","links":{"details":"/api/v1/models/nvidia/nemotron-nano-12b-v2-vl/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.2,"completion":0.6}},{"id":"minimax/minimax-m2","canonical_slug":"minimax/minimax-m2","hugging_face_id":"MiniMaxAI/MiniMax-M2","name":"MiniMax: MiniMax M2","created":1761252093,"description":"MiniMax-M2 is a compact, high-efficiency large language model optimized for end-to-end coding and agentic workflows. With 10 billion activated parameters (230 billion total), it delivers near-frontier intelligence across general reasoning,...","context_length":196608,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.000000255","completion":"0.000001","input_cache_read":"0.00000003"},"top_provider":{"context_length":196608,"max_completion_tokens":196608,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/minimax/minimax-m2/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.255,"completion":1.0}},{"id":"liquid/lfm2-8b-a1b","canonical_slug":"liquid/lfm2-8b-a1b","hugging_face_id":"LiquidAI/LFM2-8B-A1B","name":"LiquidAI: LFM2-8B-A1B","created":1760970984,"description":"LFM2-8B-A1B is an efficient on-device Mixture-of-Experts (MoE) model from Liquid AI’s LFM2 family, built for fast, high-quality inference on edge hardware. It uses 8.3B total parameters with only ~1.5B active per token, delivering strong performance while keeping compute and memory usage low—making it ideal for phones, tablets, and laptops.","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000001","completion":"0.00000002"},"top_provider":{"context_length":32768,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","min_p","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"knowledge_cutoff":null,"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.01,"completion":0.02}},{"id":"liquid/lfm-2.2-6b","canonical_slug":"liquid/lfm-2.2-6b","hugging_face_id":"LiquidAI/LFM2-2.6B","name":"LiquidAI: LFM2-2.6B","created":1760970889,"description":"LFM2 is a new generation of hybrid models developed by Liquid AI, specifically designed for edge AI and on-device deployment. It sets a new standard in terms of quality, speed, and memory efficiency.","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000001","completion":"0.00000002"},"top_provider":{"context_length":32768,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","min_p","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"knowledge_cutoff":null,"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.01,"completion":0.02}},{"id":"ibm-granite/granite-4.0-h-micro","canonical_slug":"ibm-granite/granite-4.0-h-micro","hugging_face_id":"ibm-granite/granite-4.0-h-micro","name":"IBM: Granite 4.0 Micro","created":1760927695,"description":"Granite-4.0-H-Micro is a 3B parameter from the Granite 4 family of models. These models are the latest in a series of models released by IBM. They are fine-tuned for long...","context_length":131000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.000000017","completion":"0.00000011"},"top_provider":{"context_length":131000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","temperature","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/ibm-granite/granite-4.0-h-micro/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.017,"completion":0.11}},{"id":"deepcogito/cogito-v2-preview-llama-405b","canonical_slug":"deepcogito/cogito-v2-preview-llama-405b","hugging_face_id":"deepcogito/cogito-v2-preview-llama-405B","name":"Deep Cogito: Cogito V2 Preview Llama 405B","created":1760709933,"description":"Cogito v2 405B is a dense hybrid reasoning model that combines direct answering capabilities with advanced self-reflection. It represents a significant step toward frontier intelligence with dense architecture delivering performance competitive with leading closed models. This advanced reasoning system combines policy improvement with massive scale for exceptional capabilities.","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":null},"pricing":{"prompt":"0.0000035","completion":"0.0000035"},"top_provider":{"context_length":32768,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"expiration_date":"2026-02-04","object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":3.5,"completion":3.5}},{"id":"openai/gpt-5-image-mini","canonical_slug":"openai/gpt-5-image-mini","hugging_face_id":"","name":"OpenAI: GPT-5 Image Mini","created":1760624583,"description":"GPT-5 Image Mini combines OpenAI's advanced language capabilities, powered by [GPT-5 Mini](https://openrouter.ai/openai/gpt-5-mini), with GPT Image 1 Mini for efficient image generation. This natively multimodal model features superior instruction following, text...","context_length":400000,"architecture":{"modality":"text+image+file->text+image","input_modalities":["file","image","text"],"output_modalities":["image","text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.0000025","completion":"0.000002","web_search":"0.01","input_cache_read":"0.00000025"},"top_provider":{"context_length":400000,"max_completion_tokens":128000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","presence_penalty","reasoning","response_format","seed","stop","structured_outputs","temperature","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5-image-mini/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.5,"completion":2.0}},{"id":"anthropic/claude-haiku-4.5","canonical_slug":"anthropic/claude-4.5-haiku-20251001","hugging_face_id":"","name":"Anthropic: Claude Haiku 4.5","created":1760547638,"description":"Claude Haiku 4.5 is Anthropic’s fastest and most efficient model, delivering near-frontier intelligence at a fraction of the cost and latency of larger Claude models. Matching Claude Sonnet 4’s performance across reasoning, coding, and computer-use tasks, Haiku 4.5 brings frontier-level capability to real-time and high-volume applications.\r\n\r\nIt introduces extended thinking to the Haiku line; enabling controllable reasoning depth, summarized or interleaved thought output, and tool-assisted workflows with full support for coding, bash, web search, and computer-use tools. Scoring >73% on SWE-bench Verified, Haiku 4.5 ranks among the world’s best coding models while maintaining exceptional responsiveness for sub-agents, parallelized execution, and scaled deployment.","context_length":200000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"Claude","instruct_type":null},"pricing":{"prompt":"0.000001","completion":"0.000005","web_search":"0.01","input_cache_read":"0.0000001","input_cache_write":"0.00000125"},"top_provider":{"context_length":200000,"max_completion_tokens":64000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/anthropic/claude-4.5-haiku-20251001/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.0,"completion":5.0}},{"id":"qwen/qwen3-vl-8b-thinking","canonical_slug":"qwen/qwen3-vl-8b-thinking","hugging_face_id":"Qwen/Qwen3-VL-8B-Thinking","name":"Qwen: Qwen3 VL 8B Thinking","created":1760463746,"description":"Qwen3-VL-8B-Thinking is the reasoning-optimized variant of the Qwen3-VL-8B multimodal model, designed for advanced visual and textual reasoning across complex scenes, documents, and temporal sequences. It integrates enhanced multimodal alignment and...","context_length":131072,"architecture":{"modality":"text+image->text","input_modalities":["image","text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.000000117","completion":"0.000001365"},"top_provider":{"context_length":131072,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":1,"top_p":0.95},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-vl-8b-thinking/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.117,"completion":1.365}},{"id":"qwen/qwen3-vl-8b-instruct","canonical_slug":"qwen/qwen3-vl-8b-instruct","hugging_face_id":"Qwen/Qwen3-VL-8B-Instruct","name":"Qwen: Qwen3 VL 8B Instruct","created":1760463308,"description":"Qwen3-VL-8B-Instruct is a multimodal vision-language model from the Qwen3-VL series, built for high-fidelity understanding and reasoning across text, images, and video. It features improved multimodal fusion with Interleaved-MRoPE for long-horizon...","context_length":131072,"architecture":{"modality":"text+image->text","input_modalities":["image","text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.00000008","completion":"0.0000005"},"top_provider":{"context_length":131072,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.7,"top_p":0.8,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-vl-8b-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.08,"completion":0.5}},{"id":"openai/gpt-5-image","canonical_slug":"openai/gpt-5-image","hugging_face_id":"","name":"OpenAI: GPT-5 Image","created":1760447986,"description":"[GPT-5](https://openrouter.ai/openai/gpt-5) Image combines OpenAI's GPT-5 model with state-of-the-art image generation capabilities. It offers major improvements in reasoning, code quality, and user experience while incorporating GPT Image 1's superior instruction following,...","context_length":400000,"architecture":{"modality":"text+image+file->text+image","input_modalities":["image","text","file"],"output_modalities":["image","text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00001","completion":"0.00001","web_search":"0.01","input_cache_read":"0.00000125"},"top_provider":{"context_length":400000,"max_completion_tokens":128000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","presence_penalty","reasoning","response_format","seed","stop","structured_outputs","temperature","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5-image/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":10.0,"completion":10.0}},{"id":"openai/o3-deep-research","canonical_slug":"openai/o3-deep-research-2025-06-26","hugging_face_id":"","name":"OpenAI: o3 Deep Research","created":1760129661,"description":"o3-deep-research is OpenAI's advanced model for deep research, designed to tackle complex, multi-step research tasks.\n\nNote: This model always uses the 'web_search' tool which adds additional cost.","context_length":200000,"architecture":{"modality":"text+image+file->text","input_modalities":["image","text","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00001","completion":"0.00004","web_search":"0.01","input_cache_read":"0.0000025"},"top_provider":{"context_length":200000,"max_completion_tokens":100000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","presence_penalty","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openai/o3-deep-research-2025-06-26/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":10.0,"completion":40.0}},{"id":"openai/o4-mini-deep-research","canonical_slug":"openai/o4-mini-deep-research-2025-06-26","hugging_face_id":"","name":"OpenAI: o4 Mini Deep Research","created":1760129642,"description":"o4-mini-deep-research is OpenAI's faster, more affordable deep research model—ideal for tackling complex, multi-step research tasks.\n\nNote: This model always uses the 'web_search' tool which adds additional cost.","context_length":200000,"architecture":{"modality":"text+image+file->text","input_modalities":["file","image","text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.000002","completion":"0.000008","web_search":"0.01","input_cache_read":"0.0000005"},"top_provider":{"context_length":200000,"max_completion_tokens":100000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","presence_penalty","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openai/o4-mini-deep-research-2025-06-26/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.0,"completion":8.0}},{"id":"nvidia/llama-3.3-nemotron-super-49b-v1.5","canonical_slug":"nvidia/llama-3.3-nemotron-super-49b-v1.5","hugging_face_id":"nvidia/Llama-3_3-Nemotron-Super-49B-v1_5","name":"NVIDIA: Llama 3.3 Nemotron Super 49B V1.5","created":1760101395,"description":"Llama-3.3-Nemotron-Super-49B-v1.5 is a 49B-parameter, English-centric reasoning/chat model derived from Meta’s Llama-3.3-70B-Instruct with a 128K context. It’s post-trained for agentic workflows (RAG, tool calling) via SFT across math, code, science, and...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":null},"pricing":{"prompt":"0.0000001","completion":"0.0000004"},"top_provider":{"context_length":131072,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.6,"top_p":0.95,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-03-31","expiration_date":null,"links":{"details":"/api/v1/models/nvidia/llama-3.3-nemotron-super-49b-v1.5/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.1,"completion":0.4}},{"id":"baidu/ernie-4.5-21b-a3b-thinking","canonical_slug":"baidu/ernie-4.5-21b-a3b-thinking","hugging_face_id":"baidu/ERNIE-4.5-21B-A3B-Thinking","name":"Baidu: ERNIE 4.5 21B A3B Thinking","created":1760048887,"description":"ERNIE-4.5-21B-A3B-Thinking is Baidu's upgraded lightweight MoE model, refined to boost reasoning depth and quality for top-tier performance in logical puzzles, math, science, coding, text generation, and expert-level academic benchmarks.","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000007","completion":"0.00000028"},"top_provider":{"context_length":131072,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{"temperature":0.6,"top_p":0.95,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/baidu/ernie-4.5-21b-a3b-thinking/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.07,"completion":0.28}},{"id":"google/gemini-2.5-flash-image","canonical_slug":"google/gemini-2.5-flash-image","hugging_face_id":"","name":"Google: Nano Banana (Gemini 2.5 Flash Image)","created":1759870431,"description":"Gemini 2.5 Flash Image, a.k.a. \"Nano Banana,\" is now generally available. It is a state of the art image generation model with contextual understanding. It is capable of image generation,...","context_length":32768,"architecture":{"modality":"text+image->text+image","input_modalities":["image","text"],"output_modalities":["image","text"],"tokenizer":"Gemini","instruct_type":null},"pricing":{"prompt":"0.0000003","completion":"0.0000025","image":"0.0000003","audio":"0.000001","web_search":"0.014","internal_reasoning":"0.0000025","input_cache_read":"0.00000003","input_cache_write":"0.00000008333333333333334"},"top_provider":{"context_length":32768,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","response_format","seed","stop","structured_outputs","temperature","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-01-31","expiration_date":null,"links":{"details":"/api/v1/models/google/gemini-2.5-flash-image/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.3,"completion":2.5}},{"id":"qwen/qwen3-vl-30b-a3b-thinking","canonical_slug":"qwen/qwen3-vl-30b-a3b-thinking","hugging_face_id":"Qwen/Qwen3-VL-30B-A3B-Thinking","name":"Qwen: Qwen3 VL 30B A3B Thinking","created":1759794479,"description":"Qwen3-VL-30B-A3B-Thinking is a multimodal model that unifies strong text generation with visual understanding for images and videos. Its Thinking variant enhances reasoning in STEM, math, and complex tasks. It excels...","context_length":131072,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.00000013","completion":"0.00000156"},"top_provider":{"context_length":131072,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.8,"top_p":0.95,"top_k":20,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":1},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-vl-30b-a3b-thinking/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.13,"completion":1.56}},{"id":"qwen/qwen3-vl-30b-a3b-instruct","canonical_slug":"qwen/qwen3-vl-30b-a3b-instruct","hugging_face_id":"Qwen/Qwen3-VL-30B-A3B-Instruct","name":"Qwen: Qwen3 VL 30B A3B Instruct","created":1759794476,"description":"Qwen3-VL-30B-A3B-Instruct is a multimodal model that unifies strong text generation with visual understanding for images and videos. Its Instruct variant optimizes instruction-following for general multimodal tasks. It excels in perception...","context_length":131072,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.00000013","completion":"0.00000052"},"top_provider":{"context_length":131072,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.7,"top_p":0.8,"top_k":20,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":1},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-vl-30b-a3b-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.13,"completion":0.52}},{"id":"openai/gpt-5-pro","canonical_slug":"openai/gpt-5-pro-2025-10-06","hugging_face_id":"","name":"OpenAI: GPT-5 Pro","created":1759776663,"description":"GPT-5 Pro is OpenAI’s most advanced model, offering major improvements in reasoning, code quality, and user experience. It is optimized for complex tasks that require step-by-step reasoning, instruction following, and...","context_length":400000,"architecture":{"modality":"text+image+file->text","input_modalities":["image","text","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.000015","completion":"0.00012","web_search":"0.01"},"top_provider":{"context_length":400000,"max_completion_tokens":128000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-09-30","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5-pro-2025-10-06/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":15.0,"completion":120.0}},{"id":"z-ai/glm-4.6","canonical_slug":"z-ai/glm-4.6","hugging_face_id":"zai-org/GLM-4.6","name":"Z.ai: GLM 4.6","created":1759235576,"description":"Compared with GLM-4.5, this generation brings several key improvements: Longer context window: The context window has been expanded from 128K to 200K tokens, enabling the model to handle more complex...","context_length":204800,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000039","completion":"0.0000019"},"top_provider":{"context_length":204800,"max_completion_tokens":204800,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.6,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":"2026-05-14","links":{"details":"/api/v1/models/z-ai/glm-4.6/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.39,"completion":1.9}},{"id":"z-ai/glm-4.6:exacto","canonical_slug":"z-ai/glm-4.6","hugging_face_id":"","name":"Z.ai: GLM 4.6 (exacto)","created":1759235576,"description":"Compared with GLM-4.5, this generation brings several key improvements:\n\nLonger context window: The context window has been expanded from 128K to 200K tokens, enabling the model to handle more complex agentic tasks.\nSuperior coding performance: The model achieves higher scores on code benchmarks and demonstrates better real-world performance in applications such as Claude Code、Cline、Roo Code and Kilo Code, including improvements in generating visually polished front-end pages.\nAdvanced reasoning: GLM-4.6 shows a clear improvement in reasoning performance and supports tool use during inference, leading to stronger overall capability.\nMore capable agents: GLM-4.6 exhibits stronger performance in tool using and search-based agents, and integrates more effectively within agent frameworks.\nRefined writing: Better aligns with human preferences in style and readability, and performs more naturally in role-playing scenarios.","context_length":204800,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000044","completion":"0.00000176","input_cache_read":"0.00000011"},"top_provider":{"context_length":204800,"max_completion_tokens":131072,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.6,"top_p":null,"frequency_penalty":null},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.44,"completion":1.76}},{"id":"anthropic/claude-sonnet-4.5","canonical_slug":"anthropic/claude-4.5-sonnet-20250929","hugging_face_id":"","name":"Anthropic: Claude Sonnet 4.5","created":1759161676,"description":"Claude Sonnet 4.5 is Anthropic’s most advanced Sonnet model to date, optimized for real-world agents and coding workflows. It delivers state-of-the-art performance on coding benchmarks such as SWE-bench Verified, with...","context_length":1000000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"Claude","instruct_type":null},"pricing":{"prompt":"0.000003","completion":"0.000015","web_search":"0.01","input_cache_read":"0.0000003","input_cache_write":"0.00000375"},"top_provider":{"context_length":1000000,"max_completion_tokens":64000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":1,"top_p":1,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-01-31","expiration_date":null,"links":{"details":"/api/v1/models/anthropic/claude-4.5-sonnet-20250929/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":3.0,"completion":15.0}},{"id":"deepseek/deepseek-v3.2-exp","canonical_slug":"deepseek/deepseek-v3.2-exp","hugging_face_id":"deepseek-ai/DeepSeek-V3.2-Exp","name":"DeepSeek: DeepSeek V3.2 Exp","created":1759150481,"description":"DeepSeek-V3.2-Exp is an experimental large language model released by DeepSeek as an intermediate step between V3.1 and future architectures. It introduces DeepSeek Sparse Attention (DSA), a fine-grained sparse attention mechanism designed to improve training and inference efficiency in long-context scenarios while maintaining output quality. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)\r\n\r\nThe model was trained under conditions aligned with V3.1-Terminus to enable direct comparison. Benchmarking shows performance roughly on par with V3.1 across reasoning, coding, and agentic tool-use tasks, with minor tradeoffs and gains depending on the domain. This release focuses on validating architectural optimizations for extended context lengths rather than advancing raw task accuracy, making it primarily a research-oriented model for exploring efficient transformer designs.","context_length":163840,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"DeepSeek","instruct_type":"deepseek-v3.1"},"pricing":{"prompt":"0.00000027","completion":"0.00000041"},"top_provider":{"context_length":163840,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.6,"top_p":0.95,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-07-31","expiration_date":null,"links":{"details":"/api/v1/models/deepseek/deepseek-v3.2-exp/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.21,"completion":0.32}},{"id":"thedrummer/cydonia-24b-v4.1","canonical_slug":"thedrummer/cydonia-24b-v4.1","hugging_face_id":"thedrummer/cydonia-24b-v4.1","name":"TheDrummer: Cydonia 24B V4.1","created":1758931878,"description":"Uncensored and creative writing model based on Mistral Small 3.2 24B with good recall, prompt adherence, and intelligence.","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000003","completion":"0.0000005","input_cache_read":"0.00000015"},"top_provider":{"context_length":131072,"max_completion_tokens":131072,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-04-30","expiration_date":null,"links":{"details":"/api/v1/models/thedrummer/cydonia-24b-v4.1/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.3,"completion":0.5}},{"id":"relace/relace-apply-3","canonical_slug":"relace/relace-apply-3","hugging_face_id":"","name":"Relace: Relace Apply 3","created":1758891572,"description":"Relace Apply 3 is a specialized code-patching LLM that merges AI-suggested edits straight into your source files. It can apply updates from GPT-4o, Claude, and others into your files at...","context_length":256000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000085","completion":"0.00000125"},"top_provider":{"context_length":256000,"max_completion_tokens":128000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","seed","stop"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/relace/relace-apply-3/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.85,"completion":1.25}},{"id":"google/gemini-2.5-flash-preview-09-2025","canonical_slug":"google/gemini-2.5-flash-preview-09-2025","hugging_face_id":"","name":"Google: Gemini 2.5 Flash Preview 09-2025","created":1758820178,"description":"Gemini 2.5 Flash Preview September 2025 Checkpoint is Google's state-of-the-art workhorse model, specifically designed for advanced reasoning, coding, mathematics, and scientific tasks. It includes built-in \"thinking\" capabilities, enabling it to provide responses with greater accuracy and nuanced context handling. \n\nAdditionally, Gemini 2.5 Flash is configurable through the \"max tokens for reasoning\" parameter, as described in the documentation (https://openrouter.ai/docs/use-cases/reasoning-tokens#max-tokens-for-reasoning).","context_length":1048576,"architecture":{"modality":"text+image+file+audio+video->text","input_modalities":["image","file","text","audio","video"],"output_modalities":["text"],"tokenizer":"Gemini","instruct_type":null},"pricing":{"prompt":"0.0000003","completion":"0.0000025","image":"0.0000003","audio":"0.000001","internal_reasoning":"0.0000025","input_cache_read":"0.00000003","input_cache_write":"0.00000008333333333333334"},"top_provider":{"context_length":1048576,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"expiration_date":"2026-02-17","object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.3,"completion":2.5}},{"id":"google/gemini-2.5-flash-lite-preview-09-2025","canonical_slug":"google/gemini-2.5-flash-lite-preview-09-2025","hugging_face_id":"","name":"Google: Gemini 2.5 Flash Lite Preview 09-2025","created":1758819686,"description":"Gemini 2.5 Flash-Lite is a lightweight reasoning model in the Gemini 2.5 family, optimized for ultra-low latency and cost efficiency. It offers improved throughput, faster token generation, and better performance...","context_length":1048576,"architecture":{"modality":"text+image+file+audio+video->text","input_modalities":["text","image","file","audio","video"],"output_modalities":["text"],"tokenizer":"Gemini","instruct_type":null},"pricing":{"prompt":"0.0000001","completion":"0.0000004","image":"0.0000001","audio":"0.0000003","web_search":"0.014","internal_reasoning":"0.0000004","input_cache_read":"0.00000001","input_cache_write":"0.00000008333333333333334"},"top_provider":{"context_length":1048576,"max_completion_tokens":65535,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-01-31","expiration_date":null,"links":{"details":"/api/v1/models/google/gemini-2.5-flash-lite-preview-09-2025/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.1,"completion":0.4}},{"id":"qwen/qwen3-vl-235b-a22b-thinking","canonical_slug":"qwen/qwen3-vl-235b-a22b-thinking","hugging_face_id":"Qwen/Qwen3-VL-235B-A22B-Thinking","name":"Qwen: Qwen3 VL 235B A22B Thinking","created":1758668690,"description":"Qwen3-VL-235B-A22B Thinking is a multimodal model that unifies strong text generation with visual understanding across images and video. The Thinking model is optimized for multimodal reasoning in STEM and math....","context_length":131072,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.00000026","completion":"0.0000026"},"top_provider":{"context_length":131072,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.8,"top_p":0.95,"top_k":20,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":1},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-vl-235b-a22b-thinking/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.26,"completion":2.6}},{"id":"qwen/qwen3-vl-235b-a22b-instruct","canonical_slug":"qwen/qwen3-vl-235b-a22b-instruct","hugging_face_id":"Qwen/Qwen3-VL-235B-A22B-Instruct","name":"Qwen: Qwen3 VL 235B A22B Instruct","created":1758668687,"description":"Qwen3-VL-235B-A22B Instruct is an open-weight multimodal model that unifies strong text generation with visual understanding across images and video. The Instruct model targets general vision-language use (VQA, document parsing, chart/table...","context_length":262144,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.0000002","completion":"0.00000088","input_cache_read":"0.00000011"},"top_provider":{"context_length":262144,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.7,"top_p":0.8,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-vl-235b-a22b-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.2,"completion":0.88}},{"id":"qwen/qwen3-max","canonical_slug":"qwen/qwen3-max","hugging_face_id":"","name":"Qwen: Qwen3 Max","created":1758662808,"description":"Qwen3-Max is an updated release built on the Qwen3 series, offering major improvements in reasoning, instruction following, multilingual support, and long-tail knowledge coverage compared to the January 2025 version. It...","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.00000078","completion":"0.0000039","input_cache_read":"0.000000156","input_cache_write":"0.000000975"},"top_provider":{"context_length":262144,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","presence_penalty","response_format","seed","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":1,"top_p":1,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-06-30","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-max/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.78,"completion":3.9}},{"id":"qwen/qwen3-coder-plus","canonical_slug":"qwen/qwen3-coder-plus","hugging_face_id":"","name":"Qwen: Qwen3 Coder Plus","created":1758662707,"description":"Qwen3 Coder Plus is Alibaba's proprietary version of the Open Source Qwen3 Coder 480B A35B. It is a powerful coding agent model specializing in autonomous programming via tool calling and...","context_length":1000000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.00000065","completion":"0.00000325","input_cache_read":"0.00000013","input_cache_write":"0.0000008125"},"top_provider":{"context_length":1000000,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","presence_penalty","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-06-30","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-coder-plus/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.65,"completion":3.25}},{"id":"openai/gpt-5-codex","canonical_slug":"openai/gpt-5-codex","hugging_face_id":"","name":"OpenAI: GPT-5 Codex","created":1758643403,"description":"GPT-5-Codex is a specialized version of GPT-5 optimized for software engineering and coding workflows. It is designed for both interactive development sessions and long, independent execution of complex engineering tasks....","context_length":400000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00000125","completion":"0.00001","input_cache_read":"0.000000125"},"top_provider":{"context_length":400000,"max_completion_tokens":128000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-09-30","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5-codex/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.25,"completion":10.0}},{"id":"deepseek/deepseek-v3.1-terminus:exacto","canonical_slug":"deepseek/deepseek-v3.1-terminus","hugging_face_id":"deepseek-ai/DeepSeek-V3.1-Terminus","name":"DeepSeek: DeepSeek V3.1 Terminus (exacto)","created":1758548275,"description":"DeepSeek-V3.1 Terminus is an update to [DeepSeek V3.1](/deepseek/deepseek-chat-v3.1) that maintains the model's original capabilities while addressing issues reported by users, including language consistency and agent capabilities, further optimizing the model's performance in coding and search agents. It is a large hybrid reasoning model (671B parameters, 37B active) that supports both thinking and non-thinking modes. It extends the DeepSeek-V3 base with a two-phase long-context training process, reaching up to 128K tokens, and uses FP8 microscaling for efficient inference. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)\n\nThe model improves tool use, code generation, and reasoning efficiency, achieving performance comparable to DeepSeek-R1 on difficult benchmarks while responding more quickly. It supports structured tool calling, code agents, and search agents, making it suitable for research, coding, and agentic workflows.","context_length":163840,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"DeepSeek","instruct_type":"deepseek-v3.1"},"pricing":{"prompt":"0.00000021","completion":"0.00000079","input_cache_read":"0.000000168"},"top_provider":{"context_length":163840,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.21,"completion":0.79}},{"id":"deepseek/deepseek-v3.1-terminus","canonical_slug":"deepseek/deepseek-v3.1-terminus","hugging_face_id":"deepseek-ai/DeepSeek-V3.1-Terminus","name":"DeepSeek: DeepSeek V3.1 Terminus","created":1758548275,"description":"DeepSeek-V3.1 Terminus is an update to [DeepSeek V3.1](/deepseek/deepseek-chat-v3.1) that maintains the model's original capabilities while addressing issues reported by users, including language consistency and agent capabilities, further optimizing the model's performance in coding and search agents. It is a large hybrid reasoning model (671B parameters, 37B active) that supports both thinking and non-thinking modes. It extends the DeepSeek-V3 base with a two-phase long-context training process, reaching up to 128K tokens, and uses FP8 microscaling for efficient inference. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)\r\n\r\nThe model improves tool use, code generation, and reasoning efficiency, achieving performance comparable to DeepSeek-R1 on difficult benchmarks while responding more quickly. It supports structured tool calling, code agents, and search agents, making it suitable for research, coding, and agentic workflows.","context_length":163840,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"DeepSeek","instruct_type":"deepseek-v3.1"},"pricing":{"prompt":"0.00000027","completion":"0.00000095","input_cache_read":"0.00000013"},"top_provider":{"context_length":163840,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/deepseek/deepseek-v3.1-terminus/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.21,"completion":0.79}},{"id":"x-ai/grok-4-fast","canonical_slug":"x-ai/grok-4-fast","hugging_face_id":"","name":"xAI: Grok 4 Fast","created":1758240090,"description":"Grok 4 Fast is xAI's latest multimodal model with SOTA cost-efficiency and a 2M token context window. It comes in two flavors: non-reasoning and reasoning. Read more about the model...","context_length":2000000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"Grok","instruct_type":null},"pricing":{"prompt":"0.0000002","completion":"0.0000005","web_search":"0.005","input_cache_read":"0.00000005"},"top_provider":{"context_length":2000000,"max_completion_tokens":30000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","logprobs","max_tokens","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-09-30","expiration_date":"2026-05-15","links":{"details":"/api/v1/models/x-ai/grok-4-fast/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.2,"completion":0.5}},{"id":"alibaba/tongyi-deepresearch-30b-a3b:free","canonical_slug":"alibaba/tongyi-deepresearch-30b-a3b","hugging_face_id":"Alibaba-NLP/Tongyi-DeepResearch-30B-A3B","name":"Tongyi DeepResearch 30B A3B (free)","created":1758210804,"description":"Tongyi DeepResearch is an agentic large language model developed by Tongyi Lab, with 30 billion total parameters activating only 3 billion per token. It's optimized for long-horizon, deep information-seeking tasks and delivers state-of-the-art performance on benchmarks like Humanity's Last Exam, BrowserComp, BrowserComp-ZH, WebWalkerQA, GAIA, xbench-DeepSearch, and FRAMES. This makes it superior for complex agentic search, reasoning, and multi-step problem-solving compared to prior models.\n\nThe model includes a fully automated synthetic data pipeline for scalable pre-training, fine-tuning, and reinforcement learning. It uses large-scale continual pre-training on diverse agentic data to boost reasoning and stay fresh. It also features end-to-end on-policy RL with a customized Group Relative Policy Optimization, including token-level gradients and negative sample filtering for stable training. The model supports ReAct for core ability checks and an IterResearch-based 'Heavy' mode for max performance through test-time scaling. It's ideal for advanced research agents, tool use, and heavy inference workflows.","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":131072,"max_completion_tokens":131072,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"alibaba/tongyi-deepresearch-30b-a3b","canonical_slug":"alibaba/tongyi-deepresearch-30b-a3b","hugging_face_id":"Alibaba-NLP/Tongyi-DeepResearch-30B-A3B","name":"Tongyi DeepResearch 30B A3B","created":1758210804,"description":"Tongyi DeepResearch is an agentic large language model developed by Tongyi Lab, with 30 billion total parameters activating only 3 billion per token. It's optimized for long-horizon, deep information-seeking tasks...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000009","completion":"0.00000045","input_cache_read":"0.00000009"},"top_provider":{"context_length":131072,"max_completion_tokens":131072,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/alibaba/tongyi-deepresearch-30b-a3b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.09,"completion":0.45}},{"id":"qwen/qwen3-coder-flash","canonical_slug":"qwen/qwen3-coder-flash","hugging_face_id":"","name":"Qwen: Qwen3 Coder Flash","created":1758115536,"description":"Qwen3 Coder Flash is Alibaba's fast and cost efficient version of their proprietary Qwen3 Coder Plus. It is a powerful coding agent model specializing in autonomous programming via tool calling...","context_length":1000000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.000000195","completion":"0.000000975","input_cache_read":"0.000000039","input_cache_write":"0.00000024375"},"top_provider":{"context_length":1000000,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","presence_penalty","response_format","seed","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-06-30","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-coder-flash/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.195,"completion":0.975}},{"id":"opengvlab/internvl3-78b","canonical_slug":"opengvlab/internvl3-78b","hugging_face_id":"OpenGVLab/InternVL3-78B","name":"OpenGVLab: InternVL3 78B","created":1757962555,"description":"The InternVL3 series is an advanced multimodal large language model (MLLM). Compared to InternVL 2.5, InternVL3 demonstrates stronger multimodal perception and reasoning capabilities. \n\nIn addition, InternVL3 is benchmarked against the Qwen2.5 Chat models, whose pre-trained base models serve as the initialization for its language component. Benefiting from Native Multimodal Pre-Training, the InternVL3 series surpasses the Qwen2.5 series in overall text performance.","context_length":32768,"architecture":{"modality":"text+image->text","input_modalities":["image","text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000015","completion":"0.0000006","input_cache_read":"0.000000075"},"top_provider":{"context_length":32768,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"],"default_parameters":{},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.15,"completion":0.6}},{"id":"qwen/qwen3-next-80b-a3b-thinking","canonical_slug":"qwen/qwen3-next-80b-a3b-thinking-2509","hugging_face_id":"Qwen/Qwen3-Next-80B-A3B-Thinking","name":"Qwen: Qwen3 Next 80B A3B Thinking","created":1757612284,"description":"Qwen3-Next-80B-A3B-Thinking is a reasoning-first chat model in the Qwen3-Next line that outputs structured “thinking” traces by default. It’s designed for hard multi-step problems; math proofs, code synthesis/debugging, logic, and agentic...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.0000000975","completion":"0.00000078"},"top_provider":{"context_length":131072,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-09-30","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-next-80b-a3b-thinking-2509/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0975,"completion":0.78}},{"id":"qwen/qwen3-next-80b-a3b-instruct","canonical_slug":"qwen/qwen3-next-80b-a3b-instruct-2509","hugging_face_id":"Qwen/Qwen3-Next-80B-A3B-Instruct","name":"Qwen: Qwen3 Next 80B A3B Instruct","created":1757612213,"description":"Qwen3-Next-80B-A3B-Instruct is an instruction-tuned chat model in the Qwen3-Next series optimized for fast, stable responses without “thinking” traces. It targets complex tasks across reasoning, code generation, knowledge QA, and multilingual...","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.00000009","completion":"0.0000011"},"top_provider":{"context_length":262144,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-09-30","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-next-80b-a3b-instruct-2509/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.09,"completion":1.1}},{"id":"meituan/longcat-flash-chat:free","canonical_slug":"meituan/longcat-flash-chat","hugging_face_id":"meituan-longcat/LongCat-Flash-Chat","name":"Meituan: LongCat Flash Chat (free)","created":1757427658,"description":"LongCat-Flash-Chat is a large-scale Mixture-of-Experts (MoE) model with 560B total parameters, of which 18.6B–31.3B (≈27B on average) are dynamically activated per input. It introduces a shortcut-connected MoE design to reduce communication overhead and achieve high throughput while maintaining training stability through advanced scaling strategies such as hyperparameter transfer, deterministic computation, and multi-stage optimization.\n\nThis release, LongCat-Flash-Chat, is a non-thinking foundation model optimized for conversational and agentic tasks. It supports long context windows up to 128K tokens and shows competitive performance across reasoning, coding, instruction following, and domain benchmarks, with particular strengths in tool use and complex multi-step interactions.","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":131072,"max_completion_tokens":131072,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"meituan/longcat-flash-chat","canonical_slug":"meituan/longcat-flash-chat","hugging_face_id":"meituan-longcat/LongCat-Flash-Chat","name":"Meituan: LongCat Flash Chat","created":1757427658,"description":"LongCat-Flash-Chat is a large-scale Mixture-of-Experts (MoE) model with 560B total parameters, of which 18.6B–31.3B (≈27B on average) are dynamically activated per input. It introduces a shortcut-connected MoE design to reduce...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000002","completion":"0.0000008","input_cache_read":"0.0000002"},"top_provider":{"context_length":131072,"max_completion_tokens":131072,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/meituan/longcat-flash-chat/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.2,"completion":0.8}},{"id":"qwen/qwen-plus-2025-07-28","canonical_slug":"qwen/qwen-plus-2025-07-28","hugging_face_id":"","name":"Qwen: Qwen Plus 0728","created":1757347599,"description":"Qwen Plus 0728, based on the Qwen3 foundation model, is a 1 million context hybrid reasoning model with a balanced performance, speed, and cost combination.","context_length":1000000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.00000026","completion":"0.00000078","input_cache_write":"0.000000325"},"top_provider":{"context_length":1000000,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","presence_penalty","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen-plus-2025-07-28/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.26,"completion":0.78}},{"id":"qwen/qwen-plus-2025-07-28:thinking","canonical_slug":"qwen/qwen-plus-2025-07-28","hugging_face_id":"","name":"Qwen: Qwen Plus 0728 (thinking)","created":1757347599,"description":"Qwen Plus 0728, based on the Qwen3 foundation model, is a 1 million context hybrid reasoning model with a balanced performance, speed, and cost combination.","context_length":1000000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.00000026","completion":"0.00000078","input_cache_write":"0.000000325"},"top_provider":{"context_length":1000000,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen-plus-2025-07-28/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.26,"completion":0.78}},{"id":"nvidia/nemotron-nano-9b-v2:free","canonical_slug":"nvidia/nemotron-nano-9b-v2","hugging_face_id":"nvidia/NVIDIA-Nemotron-Nano-9B-v2","name":"NVIDIA: Nemotron Nano 9B V2 (free)","created":1757106807,"description":"NVIDIA-Nemotron-Nano-9B-v2 is a large language model (LLM) trained from scratch by NVIDIA, and designed as a unified model for both reasoning and non-reasoning tasks. It responds to user queries and...","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":128000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/nvidia/nemotron-nano-9b-v2/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"nvidia/nemotron-nano-9b-v2","canonical_slug":"nvidia/nemotron-nano-9b-v2","hugging_face_id":"nvidia/NVIDIA-Nemotron-Nano-9B-v2","name":"NVIDIA: Nemotron Nano 9B V2","created":1757106807,"description":"NVIDIA-Nemotron-Nano-9B-v2 is a large language model (LLM) trained from scratch by NVIDIA, and designed as a unified model for both reasoning and non-reasoning tasks. It responds to user queries and...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000004","completion":"0.00000016"},"top_provider":{"context_length":131072,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/nvidia/nemotron-nano-9b-v2/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.04,"completion":0.16}},{"id":"moonshotai/kimi-k2-0905","canonical_slug":"moonshotai/kimi-k2-0905","hugging_face_id":"moonshotai/Kimi-K2-Instruct-0905","name":"MoonshotAI: Kimi K2 0905","created":1757021147,"description":"Kimi K2 0905 is the September update of [Kimi K2 0711](moonshotai/kimi-k2). It is a large-scale Mixture-of-Experts (MoE) language model developed by Moonshot AI, featuring 1 trillion total parameters with 32...","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000004","completion":"0.000002"},"top_provider":{"context_length":262144,"max_completion_tokens":262144,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-12-31","expiration_date":"2026-05-14","links":{"details":"/api/v1/models/moonshotai/kimi-k2-0905/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.4,"completion":2.0}},{"id":"moonshotai/kimi-k2-0905:exacto","canonical_slug":"moonshotai/kimi-k2-0905","hugging_face_id":"moonshotai/Kimi-K2-Instruct-0905","name":"MoonshotAI: Kimi K2 0905 (exacto)","created":1757021147,"description":"Kimi K2 0905 is the September update of [Kimi K2 0711](moonshotai/kimi-k2). It is a large-scale Mixture-of-Experts (MoE) language model developed by Moonshot AI, featuring 1 trillion total parameters with 32 billion active per forward pass. It supports long-context inference up to 256k tokens, extended from the previous 128k.\n\nThis update improves agentic coding with higher accuracy and better generalization across scaffolds, and enhances frontend coding with more aesthetic and functional outputs for web, 3D, and related tasks. Kimi K2 is optimized for agentic capabilities, including advanced tool use, reasoning, and code synthesis. It excels across coding (LiveCodeBench, SWE-bench), reasoning (ZebraLogic, GPQA), and tool-use (Tau2, AceBench) benchmarks. The model is trained with a novel stack incorporating the MuonClip optimizer for stable large-scale MoE training.","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000006","completion":"0.0000025"},"top_provider":{"context_length":262144,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.6,"completion":2.5}},{"id":"deepcogito/cogito-v2-preview-llama-70b","canonical_slug":"deepcogito/cogito-v2-preview-llama-70b","hugging_face_id":"deepcogito/cogito-v2-preview-llama-70B","name":"Deep Cogito: Cogito V2 Preview Llama 70B","created":1756831784,"description":"Cogito v2 70B is a dense hybrid reasoning model that combines direct answering capabilities with advanced self-reflection. Built with iterative policy improvement, it delivers strong performance across reasoning tasks while maintaining efficiency through shorter reasoning chains and improved intuition.","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":null},"pricing":{"prompt":"0.00000088","completion":"0.00000088"},"top_provider":{"context_length":32768,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"expiration_date":"2026-02-04","object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.88,"completion":0.88}},{"id":"deepcogito/cogito-v2-preview-llama-109b-moe","canonical_slug":"deepcogito/cogito-v2-preview-llama-109b-moe","hugging_face_id":"deepcogito/cogito-v2-preview-llama-109B-MoE","name":"Cogito V2 Preview Llama 109B","created":1756831568,"description":"An instruction-tuned, hybrid-reasoning Mixture-of-Experts model built on Llama-4-Scout-17B-16E. Cogito v2 can answer directly or engage an extended “thinking” phase, with alignment guided by Iterated Distillation & Amplification (IDA). It targets coding, STEM, instruction following, and general helpfulness, with stronger multilingual, tool-calling, and reasoning performance than size-equivalent baselines. The model supports long-context use (up to 10M tokens) and standard Transformers workflows. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)","context_length":32767,"architecture":{"modality":"text+image->text","input_modalities":["image","text"],"output_modalities":["text"],"tokenizer":"Llama4","instruct_type":null},"pricing":{"prompt":"0.00000018","completion":"0.00000059"},"top_provider":{"context_length":32767,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"expiration_date":"2026-02-04","object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.18,"completion":0.59}},{"id":"deepcogito/cogito-v2-preview-deepseek-671b","canonical_slug":"deepcogito/cogito-v2-preview-deepseek-671b","hugging_face_id":"deepcogito/cogito-v2-preview-deepseek-671B-MoE","name":"Deep Cogito: Cogito V2 Preview Deepseek 671B","created":1756830949,"description":"Cogito v2 is a multilingual, instruction-tuned Mixture of Experts (MoE) large language model with 671 billion parameters. It supports both standard and reasoning-based generation modes. The model introduces hybrid reasoning via Iterated Distillation and Amplification (IDA)—an iterative self-improvement strategy designed to scale alignment with general intelligence. Cogito v2 has been optimized for STEM, programming, instruction following, and tool use. It supports 128k context length and offers strong performance in both multilingual and code-heavy environments. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)","context_length":163840,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"DeepSeek","instruct_type":null},"pricing":{"prompt":"0.00000125","completion":"0.00000125","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":163840,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","stop","temperature","top_k","top_p"],"default_parameters":{},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.25,"completion":1.25}},{"id":"stepfun-ai/step3","canonical_slug":"stepfun-ai/step3","hugging_face_id":"stepfun-ai/step3","name":"StepFun: Step3","created":1756415375,"description":"Step3 is a cutting-edge multimodal reasoning model—built on a Mixture-of-Experts architecture with 321B total parameters and 38B active. It is designed end-to-end to minimize decoding costs while delivering top-tier performance in vision–language reasoning. Through the co-design of Multi-Matrix Factorization Attention (MFA) and Attention-FFN Disaggregation (AFD), Step3 maintains exceptional efficiency across both flagship and low-end accelerators.","context_length":65536,"architecture":{"modality":"text+image->text","input_modalities":["image","text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000057","completion":"0.00000142"},"top_provider":{"context_length":65536,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","reasoning","response_format","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"expiration_date":"2026-02-08","object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.57,"completion":1.42}},{"id":"qwen/qwen3-30b-a3b-thinking-2507","canonical_slug":"qwen/qwen3-30b-a3b-thinking-2507","hugging_face_id":"Qwen/Qwen3-30B-A3B-Thinking-2507","name":"Qwen: Qwen3 30B A3B Thinking 2507","created":1756399192,"description":"Qwen3-30B-A3B-Thinking-2507 is a 30B parameter Mixture-of-Experts reasoning model optimized for complex tasks requiring extended multi-step thinking. The model is designed specifically for “thinking mode,” where internal reasoning traces are separated...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.00000008","completion":"0.0000004","input_cache_read":"0.00000008"},"top_provider":{"context_length":131072,"max_completion_tokens":131072,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-06-30","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-30b-a3b-thinking-2507/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.08,"completion":0.4}},{"id":"x-ai/grok-code-fast-1","canonical_slug":"x-ai/grok-code-fast-1","hugging_face_id":"","name":"xAI: Grok Code Fast 1","created":1756238927,"description":"Grok Code Fast 1 is a speedy and economical reasoning model that excels at agentic coding. With reasoning traces visible in the response, developers can steer Grok Code for high-quality...","context_length":256000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Grok","instruct_type":null},"pricing":{"prompt":"0.0000002","completion":"0.0000015","web_search":"0.005","input_cache_read":"0.00000002"},"top_provider":{"context_length":256000,"max_completion_tokens":10000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","logprobs","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-09-30","expiration_date":"2026-05-15","links":{"details":"/api/v1/models/x-ai/grok-code-fast-1/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.2,"completion":1.5}},{"id":"nousresearch/hermes-4-70b","canonical_slug":"nousresearch/hermes-4-70b","hugging_face_id":"NousResearch/Hermes-4-70B","name":"Nous: Hermes 4 70B","created":1756236182,"description":"Hermes 4 70B is a hybrid reasoning model from Nous Research, built on Meta-Llama-3.1-70B. It introduces the same hybrid mode as the larger 405B release, allowing the model to either...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":null},"pricing":{"prompt":"0.00000013","completion":"0.0000004"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-08-31","expiration_date":null,"links":{"details":"/api/v1/models/nousresearch/hermes-4-70b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.13,"completion":0.4}},{"id":"nousresearch/hermes-4-405b","canonical_slug":"nousresearch/hermes-4-405b","hugging_face_id":"NousResearch/Hermes-4-405B","name":"Nous: Hermes 4 405B","created":1756235463,"description":"Hermes 4 is a large-scale reasoning model built on Meta-Llama-3.1-405B and released by Nous Research. It introduces a hybrid reasoning mode, where the model can choose to deliberate internally with...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.000001","completion":"0.000003"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-08-31","expiration_date":null,"links":{"details":"/api/v1/models/nousresearch/hermes-4-405b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.0,"completion":3.0}},{"id":"google/gemini-2.5-flash-image-preview","canonical_slug":"google/gemini-2.5-flash-image-preview","hugging_face_id":"","name":"Google: Gemini 2.5 Flash Image Preview (Nano Banana)","created":1756218977,"description":"Gemini 2.5 Flash Image Preview, a.k.a. \"Nano Banana,\" is a state of the art image generation model with contextual understanding. It is capable of image generation, edits, and multi-turn conversations.","context_length":32768,"architecture":{"modality":"text+image->text+image","input_modalities":["image","text"],"output_modalities":["image","text"],"tokenizer":"Gemini","instruct_type":null},"pricing":{"prompt":"0.0000003","completion":"0.0000025","request":"0","image":"0.001238","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":32768,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","response_format","seed","structured_outputs","temperature","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.3,"completion":2.5}},{"id":"deepseek/deepseek-chat-v3.1","canonical_slug":"deepseek/deepseek-chat-v3.1","hugging_face_id":"deepseek-ai/DeepSeek-V3.1","name":"DeepSeek: DeepSeek V3.1","created":1755779628,"description":"DeepSeek-V3.1 is a large hybrid reasoning model (671B parameters, 37B active) that supports both thinking and non-thinking modes via prompt templates. It extends the DeepSeek-V3 base with a two-phase long-context training process, reaching up to 128K tokens, and uses FP8 microscaling for efficient inference. Users can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)\r\n\r\nThe model improves tool use, code generation, and reasoning efficiency, achieving performance comparable to DeepSeek-R1 on difficult benchmarks while responding more quickly. It supports structured tool calling, code agents, and search agents, making it suitable for research, coding, and agentic workflows. \r\n\r\nIt succeeds the [DeepSeek V3-0324](/deepseek/deepseek-chat-v3-0324) model and performs well on a variety of tasks.","context_length":163840,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"DeepSeek","instruct_type":"deepseek-v3.1"},"pricing":{"prompt":"0.00000021","completion":"0.00000079","input_cache_read":"0.00000013"},"top_provider":{"context_length":163840,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/deepseek/deepseek-chat-v3.1/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.15,"completion":0.75}},{"id":"openai/gpt-4o-audio-preview","canonical_slug":"openai/gpt-4o-audio-preview","hugging_face_id":"","name":"OpenAI: GPT-4o Audio","created":1755233061,"description":"The gpt-4o-audio-preview model adds support for audio inputs as prompts. This enhancement allows the model to detect nuances within audio recordings and add depth to generated user experiences. Audio outputs...","context_length":128000,"architecture":{"modality":"text+audio->text+audio","input_modalities":["audio","text"],"output_modalities":["text","audio"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.0000025","completion":"0.00001","audio":"0.00004"},"top_provider":{"context_length":128000,"max_completion_tokens":16384,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2023-10-31","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-4o-audio-preview/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.5,"completion":10.0}},{"id":"mistralai/mistral-medium-3.1","canonical_slug":"mistralai/mistral-medium-3.1","hugging_face_id":"","name":"Mistral: Mistral Medium 3.1","created":1755095639,"description":"Mistral Medium 3.1 is an updated version of Mistral Medium 3, which is a high-performance enterprise-grade language model designed to deliver frontier-level capabilities at significantly reduced operational cost. It balances...","context_length":131072,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.0000004","completion":"0.000002","input_cache_read":"0.00000004"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.3},"supported_voices":null,"knowledge_cutoff":"2025-06-30","expiration_date":null,"links":{"details":"/api/v1/models/mistralai/mistral-medium-3.1/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.4,"completion":2.0}},{"id":"baidu/ernie-4.5-21b-a3b","canonical_slug":"baidu/ernie-4.5-21b-a3b","hugging_face_id":"baidu/ERNIE-4.5-21B-A3B-PT","name":"Baidu: ERNIE 4.5 21B A3B","created":1755034167,"description":"A sophisticated text-based Mixture-of-Experts (MoE) model featuring 21B total parameters with 3B activated per token, delivering exceptional multimodal understanding and generation through heterogeneous MoE structures and modality-isolated routing. Supporting an...","context_length":120000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000007","completion":"0.00000028"},"top_provider":{"context_length":120000,"max_completion_tokens":8000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.8,"top_p":0.8,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/baidu/ernie-4.5-21b-a3b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.07,"completion":0.28}},{"id":"baidu/ernie-4.5-vl-28b-a3b","canonical_slug":"baidu/ernie-4.5-vl-28b-a3b","hugging_face_id":"baidu/ERNIE-4.5-VL-28B-A3B-PT","name":"Baidu: ERNIE 4.5 VL 28B A3B","created":1755032836,"description":"A powerful multimodal Mixture-of-Experts chat model featuring 28B total parameters with 3B activated per token, delivering exceptional text and vision understanding through its innovative heterogeneous MoE structure with modality-isolated routing....","context_length":30000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000014","completion":"0.00000056"},"top_provider":{"context_length":30000,"max_completion_tokens":8000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","seed","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/baidu/ernie-4.5-vl-28b-a3b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.14,"completion":0.56}},{"id":"z-ai/glm-4.5v","canonical_slug":"z-ai/glm-4.5v","hugging_face_id":"zai-org/GLM-4.5V","name":"Z.ai: GLM 4.5V","created":1754922288,"description":"GLM-4.5V is a vision-language foundation model for multimodal agent applications. Built on a Mixture-of-Experts (MoE) architecture with 106B parameters and 12B activated parameters, it achieves state-of-the-art results in video understanding,...","context_length":65536,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000006","completion":"0.0000018","input_cache_read":"0.00000011"},"top_provider":{"context_length":65536,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","seed","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.75,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-12-31","expiration_date":null,"links":{"details":"/api/v1/models/z-ai/glm-4.5v/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.6,"completion":1.8}},{"id":"ai21/jamba-mini-1.7","canonical_slug":"ai21/jamba-mini-1.7","hugging_face_id":"ai21labs/AI21-Jamba-Mini-1.7","name":"AI21: Jamba Mini 1.7","created":1754670601,"description":"Jamba Mini 1.7 is a compact and efficient member of the Jamba open model family, incorporating key improvements in grounding and instruction-following while maintaining the benefits of the SSM-Transformer hybrid architecture and 256K context window. Despite its compact size, it delivers accurate, contextually grounded responses and improved steerability.","context_length":256000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000002","completion":"0.0000004"},"top_provider":{"context_length":256000,"max_completion_tokens":4096,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","response_format","stop","temperature","tool_choice","tools","top_p"],"default_parameters":{},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.2,"completion":0.4}},{"id":"ai21/jamba-large-1.7","canonical_slug":"ai21/jamba-large-1.7","hugging_face_id":"ai21labs/AI21-Jamba-Large-1.7","name":"AI21: Jamba Large 1.7","created":1754669020,"description":"Jamba Large 1.7 is the latest model in the Jamba open family, offering improvements in grounding, instruction-following, and overall efficiency. Built on a hybrid SSM-Transformer architecture with a 256K context...","context_length":256000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.000002","completion":"0.000008"},"top_provider":{"context_length":256000,"max_completion_tokens":4096,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","response_format","stop","temperature","tool_choice","tools","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-08-31","expiration_date":null,"links":{"details":"/api/v1/models/ai21/jamba-large-1.7/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.0,"completion":8.0}},{"id":"openai/gpt-5-chat","canonical_slug":"openai/gpt-5-chat-2025-08-07","hugging_face_id":"","name":"OpenAI: GPT-5 Chat","created":1754587837,"description":"GPT-5 Chat is designed for advanced, natural, multimodal, and context-aware conversations for enterprise applications.","context_length":128000,"architecture":{"modality":"text+image+file->text","input_modalities":["file","image","text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00000125","completion":"0.00001","web_search":"0.01","input_cache_read":"0.000000125"},"top_provider":{"context_length":128000,"max_completion_tokens":16384,"is_moderated":true},"per_request_limits":null,"supported_parameters":["max_tokens","response_format","seed","structured_outputs"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-09-30","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5-chat-2025-08-07/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.25,"completion":10.0}},{"id":"openai/gpt-5","canonical_slug":"openai/gpt-5-2025-08-07","hugging_face_id":"","name":"OpenAI: GPT-5","created":1754587413,"description":"GPT-5 is OpenAI’s most advanced model, offering major improvements in reasoning, code quality, and user experience. It is optimized for complex tasks that require step-by-step reasoning, instruction following, and accuracy in high-stakes use cases. It supports test-time routing features and advanced prompt understanding, including user-specified intent like \"think hard about this.\" Improvements include reductions in hallucination, sycophancy, and better performance in coding, writing, and health-related tasks.","context_length":400000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00000125","completion":"0.00001","input_cache_read":"0.000000125"},"top_provider":{"context_length":400000,"max_completion_tokens":128000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-09-30","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5-2025-08-07/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.25,"completion":10.0}},{"id":"openai/gpt-5-mini","canonical_slug":"openai/gpt-5-mini-2025-08-07","hugging_face_id":"","name":"OpenAI: GPT-5 Mini","created":1754587407,"description":"GPT-5 Mini is a compact version of GPT-5, designed to handle lighter-weight reasoning tasks. It provides the same instruction-following and safety-tuning benefits as GPT-5, but with reduced latency and cost....","context_length":400000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00000025","completion":"0.000002","web_search":"0.01","input_cache_read":"0.000000025"},"top_provider":{"context_length":400000,"max_completion_tokens":128000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-05-31","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5-mini-2025-08-07/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.25,"completion":2.0}},{"id":"openai/gpt-5-nano","canonical_slug":"openai/gpt-5-nano-2025-08-07","hugging_face_id":"","name":"OpenAI: GPT-5 Nano","created":1754587402,"description":"GPT-5-Nano is the smallest and fastest variant in the GPT-5 system, optimized for developer tools, rapid interactions, and ultra-low latency environments. While limited in reasoning depth compared to its larger...","context_length":400000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00000005","completion":"0.0000004","input_cache_read":"0.00000001"},"top_provider":{"context_length":400000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-05-31","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5-nano-2025-08-07/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.05,"completion":0.4}},{"id":"openai/gpt-oss-120b:exacto","canonical_slug":"openai/gpt-oss-120b","hugging_face_id":"openai/gpt-oss-120b","name":"OpenAI: gpt-oss-120b (exacto)","created":1754414231,"description":"gpt-oss-120b is an open-weight, 117B-parameter Mixture-of-Experts (MoE) language model from OpenAI designed for high-reasoning, agentic, and general-purpose production use cases. It activates 5.1B parameters per forward pass and is optimized to run on a single H100 GPU with native MXFP4 quantization. The model supports configurable reasoning depth, full chain-of-thought access, and native tool use, including function calling, browsing, and structured output generation.","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.000000039","completion":"0.00000019"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.039,"completion":0.19}},{"id":"openai/gpt-oss-120b","canonical_slug":"openai/gpt-oss-120b","hugging_face_id":"openai/gpt-oss-120b","name":"OpenAI: gpt-oss-120b","created":1754414231,"description":"gpt-oss-120b is an open-weight, 117B-parameter Mixture-of-Experts (MoE) language model from OpenAI designed for high-reasoning, agentic, and general-purpose production use cases. It activates 5.1B parameters per forward pass and is optimized to run on a single H100 GPU with native MXFP4 quantization. The model supports configurable reasoning depth, full chain-of-thought access, and native tool use, including function calling, browsing, and structured output generation.","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.000000039","completion":"0.00000018"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-06-30","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-oss-120b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.02,"completion":0.1}},{"id":"openai/gpt-oss-20b:free","canonical_slug":"openai/gpt-oss-20b","hugging_face_id":"openai/gpt-oss-20b","name":"OpenAI: gpt-oss-20b (free)","created":1754414229,"description":"gpt-oss-20b is an open-weight 21B parameter model released by OpenAI under the Apache 2.0 license. It uses a Mixture-of-Experts (MoE) architecture with 3.6B active parameters per forward pass, optimized for...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":131072,"max_completion_tokens":8192,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","seed","stop","temperature","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-06-30","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-oss-20b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"openai/gpt-oss-20b","canonical_slug":"openai/gpt-oss-20b","hugging_face_id":"openai/gpt-oss-20b","name":"OpenAI: gpt-oss-20b","created":1754414229,"description":"gpt-oss-20b is an open-weight 21B parameter model released by OpenAI under the Apache 2.0 license. It uses a Mixture-of-Experts (MoE) architecture with 3.6B active parameters per forward pass, optimized for lower-latency inference and deployability on consumer or single-GPU hardware. The model is trained in OpenAI’s Harmony response format and supports reasoning level configuration, fine-tuning, and agentic capabilities including function calling, tool use, and structured outputs.","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00000003","completion":"0.00000014"},"top_provider":{"context_length":131072,"max_completion_tokens":131072,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-06-30","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-oss-20b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.02,"completion":0.1}},{"id":"anthropic/claude-opus-4.1","canonical_slug":"anthropic/claude-4.1-opus-20250805","hugging_face_id":"","name":"Anthropic: Claude Opus 4.1","created":1754411591,"description":"Claude Opus 4.1 is an updated version of Anthropic’s flagship model, offering improved performance in coding, reasoning, and agentic tasks. It achieves 74.5% on SWE-bench Verified and shows notable gains...","context_length":200000,"architecture":{"modality":"text+image+file->text","input_modalities":["image","text","file"],"output_modalities":["text"],"tokenizer":"Claude","instruct_type":null},"pricing":{"prompt":"0.000015","completion":"0.000075","web_search":"0.01","input_cache_read":"0.0000015","input_cache_write":"0.00001875"},"top_provider":{"context_length":200000,"max_completion_tokens":32000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-01-31","expiration_date":null,"links":{"details":"/api/v1/models/anthropic/claude-4.1-opus-20250805/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":15.0,"completion":75.0}},{"id":"mistralai/codestral-2508","canonical_slug":"mistralai/codestral-2508","hugging_face_id":"","name":"Mistral: Codestral 2508","created":1754079630,"description":"Mistral's cutting-edge language model for coding released end of July 2025. Codestral specializes in low-latency, high-frequency tasks such as fill-in-the-middle (FIM), code correction and test generation.\n\n[Blog Post](https://mistral.ai/news/codestral-25-08)","context_length":256000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.0000003","completion":"0.0000009","input_cache_read":"0.00000003"},"top_provider":{"context_length":256000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.3},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/mistralai/codestral-2508/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.3,"completion":0.9}},{"id":"qwen/qwen3-coder-30b-a3b-instruct","canonical_slug":"qwen/qwen3-coder-30b-a3b-instruct","hugging_face_id":"Qwen/Qwen3-Coder-30B-A3B-Instruct","name":"Qwen: Qwen3 Coder 30B A3B Instruct","created":1753972379,"description":"Qwen3-Coder-30B-A3B-Instruct is a 30.5B parameter Mixture-of-Experts (MoE) model with 128 experts (8 active per forward pass), designed for advanced code generation, repository-scale understanding, and agentic tool use. Built on the...","context_length":160000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.00000007","completion":"0.00000027"},"top_provider":{"context_length":160000,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-06-30","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-coder-30b-a3b-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.07,"completion":0.27}},{"id":"qwen/qwen3-30b-a3b-instruct-2507","canonical_slug":"qwen/qwen3-30b-a3b-instruct-2507","hugging_face_id":"Qwen/Qwen3-30B-A3B-Instruct-2507","name":"Qwen: Qwen3 30B A3B Instruct 2507","created":1753806965,"description":"Qwen3-30B-A3B-Instruct-2507 is a 30.5B-parameter mixture-of-experts language model from Qwen, with 3.3B active parameters per inference. It operates in non-thinking mode and is designed for high-quality instruction following, multilingual understanding, and...","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.00000009","completion":"0.0000003"},"top_provider":{"context_length":262144,"max_completion_tokens":262144,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-06-30","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-30b-a3b-instruct-2507/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.09,"completion":0.3}},{"id":"z-ai/glm-4.5","canonical_slug":"z-ai/glm-4.5","hugging_face_id":"zai-org/GLM-4.5","name":"Z.ai: GLM 4.5","created":1753471347,"description":"GLM-4.5 is our latest flagship foundation model, purpose-built for agent-based applications. It leverages a Mixture-of-Experts (MoE) architecture and supports a context length of up to 128k tokens. GLM-4.5 delivers significantly...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000006","completion":"0.0000022","input_cache_read":"0.00000011"},"top_provider":{"context_length":131072,"max_completion_tokens":98304,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.75,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-12-31","expiration_date":null,"links":{"details":"/api/v1/models/z-ai/glm-4.5/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.6,"completion":2.2}},{"id":"z-ai/glm-4.5-air:free","canonical_slug":"z-ai/glm-4.5-air","hugging_face_id":"zai-org/GLM-4.5-Air","name":"Z.ai: GLM 4.5 Air (free)","created":1753471258,"description":"GLM-4.5-Air is the lightweight variant of our latest flagship model family, also purpose-built for agent-centric applications. Like GLM-4.5, it adopts the Mixture-of-Experts (MoE) architecture but with a more compact parameter...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":131072,"max_completion_tokens":96000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.75,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-12-31","expiration_date":null,"links":{"details":"/api/v1/models/z-ai/glm-4.5-air/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"z-ai/glm-4.5-air","canonical_slug":"z-ai/glm-4.5-air","hugging_face_id":"zai-org/GLM-4.5-Air","name":"Z.ai: GLM 4.5 Air","created":1753471258,"description":"GLM-4.5-Air is the lightweight variant of our latest flagship model family, also purpose-built for agent-centric applications. Like GLM-4.5, it adopts the Mixture-of-Experts (MoE) architecture but with a more compact parameter...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000013","completion":"0.00000085","input_cache_read":"0.000000025"},"top_provider":{"context_length":131072,"max_completion_tokens":98304,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","seed","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.75,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-12-31","expiration_date":null,"links":{"details":"/api/v1/models/z-ai/glm-4.5-air/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.13,"completion":0.85}},{"id":"qwen/qwen3-235b-a22b-thinking-2507","canonical_slug":"qwen/qwen3-235b-a22b-thinking-2507","hugging_face_id":"Qwen/Qwen3-235B-A22B-Thinking-2507","name":"Qwen: Qwen3 235B A22B Thinking 2507","created":1753449557,"description":"Qwen3-235B-A22B-Thinking-2507 is a high-performance, open-weight Mixture-of-Experts (MoE) language model optimized for complex reasoning tasks. It activates 22B of its 235B parameters per forward pass and natively supports up to 262,144...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":"qwen3"},"pricing":{"prompt":"0.0000001495","completion":"0.000001495"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-06-30","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-235b-a22b-thinking-2507/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.1495,"completion":1.495}},{"id":"z-ai/glm-4-32b","canonical_slug":"z-ai/glm-4-32b-0414","hugging_face_id":"","name":"Z.ai: GLM 4 32B ","created":1753376617,"description":"GLM 4 32B is a cost-effective foundation language model. It can efficiently perform complex tasks and has significantly enhanced capabilities in tool use, online search, and code-related intelligent tasks. It...","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000001","completion":"0.0000001"},"top_provider":{"context_length":128000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.75,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-06-30","expiration_date":null,"links":{"details":"/api/v1/models/z-ai/glm-4-32b-0414/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.1,"completion":0.1}},{"id":"qwen/qwen3-coder:free","canonical_slug":"qwen/qwen3-coder-480b-a35b-07-25","hugging_face_id":"Qwen/Qwen3-Coder-480B-A35B-Instruct","name":"Qwen: Qwen3 Coder 480B A35B (free)","created":1753230546,"description":"Qwen3-Coder-480B-A35B-Instruct is a Mixture-of-Experts (MoE) code generation model developed by the Qwen team. It is optimized for agentic coding tasks such as function calling, tool use, and long-context reasoning over...","context_length":262000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":262000,"max_completion_tokens":262000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-06-30","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-coder-480b-a35b-07-25/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"qwen/qwen3-coder","canonical_slug":"qwen/qwen3-coder-480b-a35b-07-25","hugging_face_id":"Qwen/Qwen3-Coder-480B-A35B-Instruct","name":"Qwen: Qwen3 Coder 480B A35B","created":1753230546,"description":"Qwen3-Coder-480B-A35B-Instruct is a Mixture-of-Experts (MoE) code generation model developed by the Qwen team. It is optimized for agentic coding tasks such as function calling, tool use, and long-context reasoning over...","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.00000022","completion":"0.0000018"},"top_provider":{"context_length":262144,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-06-30","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-coder-480b-a35b-07-25/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.22,"completion":1.8}},{"id":"qwen/qwen3-coder:exacto","canonical_slug":"qwen/qwen3-coder-480b-a35b-07-25","hugging_face_id":"Qwen/Qwen3-Coder-480B-A35B-Instruct","name":"Qwen: Qwen3 Coder 480B A35B (exacto)","created":1753230546,"description":"Qwen3-Coder-480B-A35B-Instruct is a Mixture-of-Experts (MoE) code generation model developed by the Qwen team. It is optimized for agentic coding tasks such as function calling, tool use, and long-context reasoning over repositories. The model features 480 billion total parameters, with 35 billion active per forward pass (8 out of 160 experts).\n\nPricing for the Alibaba endpoints varies by context length. Once a request is greater than 128k input tokens, the higher pricing is used.","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.00000022","completion":"0.0000018","input_cache_read":"0.000000022"},"top_provider":{"context_length":262144,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","repetition_penalty","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.22,"completion":1.8}},{"id":"bytedance/ui-tars-1.5-7b","canonical_slug":"bytedance/ui-tars-1.5-7b","hugging_face_id":"ByteDance-Seed/UI-TARS-1.5-7B","name":"ByteDance: UI-TARS 7B ","created":1753205056,"description":"UI-TARS-1.5 is a multimodal vision-language agent optimized for GUI-based environments, including desktop interfaces, web browsers, mobile systems, and games. Built by ByteDance, it builds upon the UI-TARS framework with reinforcement...","context_length":128000,"architecture":{"modality":"text+image->text","input_modalities":["image","text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000001","completion":"0.0000002","input_cache_read":"0.0000001"},"top_provider":{"context_length":128000,"max_completion_tokens":2048,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-01-31","expiration_date":null,"links":{"details":"/api/v1/models/bytedance/ui-tars-1.5-7b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.1,"completion":0.2}},{"id":"google/gemini-2.5-flash-lite","canonical_slug":"google/gemini-2.5-flash-lite","hugging_face_id":"","name":"Google: Gemini 2.5 Flash Lite","created":1753200276,"description":"Gemini 2.5 Flash-Lite is a lightweight reasoning model in the Gemini 2.5 family, optimized for ultra-low latency and cost efficiency. It offers improved throughput, faster token generation, and better performance...","context_length":1048576,"architecture":{"modality":"text+image+file+audio+video->text","input_modalities":["text","image","file","audio","video"],"output_modalities":["text"],"tokenizer":"Gemini","instruct_type":null},"pricing":{"prompt":"0.0000001","completion":"0.0000004","image":"0.0000001","audio":"0.0000003","web_search":"0.014","internal_reasoning":"0.0000004","input_cache_read":"0.00000001","input_cache_write":"0.00000008333333333333334"},"top_provider":{"context_length":1048576,"max_completion_tokens":65535,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-01-31","expiration_date":null,"links":{"details":"/api/v1/models/google/gemini-2.5-flash-lite/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.1,"completion":0.4}},{"id":"qwen/qwen3-235b-a22b-2507","canonical_slug":"qwen/qwen3-235b-a22b-07-25","hugging_face_id":"Qwen/Qwen3-235B-A22B-Instruct-2507","name":"Qwen: Qwen3 235B A22B Instruct 2507","created":1753119555,"description":"Qwen3-235B-A22B-Instruct-2507 is a multilingual, instruction-tuned mixture-of-experts language model based on the Qwen3-235B architecture, with 22B active parameters per forward pass. It is optimized for general-purpose text generation, including instruction following,...","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.000000071","completion":"0.0000001"},"top_provider":{"context_length":262144,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-06-30","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-235b-a22b-07-25/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.071,"completion":0.1}},{"id":"switchpoint/router","canonical_slug":"switchpoint/router","hugging_face_id":"","name":"Switchpoint Router","created":1752272899,"description":"Switchpoint AI's router instantly analyzes your request and directs it to the optimal AI from an ever-evolving library. As the world of LLMs advances, our router gets smarter, ensuring you...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000085","completion":"0.0000034"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/switchpoint/router/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.85,"completion":3.4}},{"id":"moonshotai/kimi-k2:free","canonical_slug":"moonshotai/kimi-k2","hugging_face_id":"moonshotai/Kimi-K2-Instruct","name":"MoonshotAI: Kimi K2 0711 (free)","created":1752263252,"description":"Kimi K2 Instruct is a large-scale Mixture-of-Experts (MoE) language model developed by Moonshot AI, featuring 1 trillion total parameters with 32 billion active per forward pass. It is optimized for agentic capabilities, including advanced tool use, reasoning, and code synthesis. Kimi K2 excels across a broad range of benchmarks, particularly in coding (LiveCodeBench, SWE-bench), reasoning (ZebraLogic, GPQA), and tool-use (Tau2, AceBench) tasks. It supports long-context inference up to 128K tokens and is designed with a novel training stack that includes the MuonClip optimizer for stable large-scale MoE training.","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":32768,"max_completion_tokens":4096,"is_moderated":true},"per_request_limits":null,"supported_parameters":["max_tokens","seed","stop","temperature"],"default_parameters":{},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"moonshotai/kimi-k2","canonical_slug":"moonshotai/kimi-k2","hugging_face_id":"moonshotai/Kimi-K2-Instruct","name":"MoonshotAI: Kimi K2 0711","created":1752263252,"description":"Kimi K2 Instruct is a large-scale Mixture-of-Experts (MoE) language model developed by Moonshot AI, featuring 1 trillion total parameters with 32 billion active per forward pass. It is optimized for...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000057","completion":"0.0000023"},"top_provider":{"context_length":131072,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-12-31","expiration_date":null,"links":{"details":"/api/v1/models/moonshotai/kimi-k2/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.57,"completion":2.3}},{"id":"thudm/glm-4.1v-9b-thinking","canonical_slug":"thudm/glm-4.1v-9b-thinking","hugging_face_id":"THUDM/GLM-4.1V-9B-Thinking","name":"THUDM: GLM 4.1V 9B Thinking","created":1752244385,"description":"GLM-4.1V-9B-Thinking is a 9B parameter vision-language model developed by THUDM, based on the GLM-4-9B foundation. It introduces a reasoning-centric \"thinking paradigm\" enhanced with reinforcement learning to improve multimodal reasoning, long-context understanding (up to 64K tokens), and complex problem solving. It achieves state-of-the-art performance among models in its class, outperforming even larger models like Qwen-2.5-VL-72B on a majority of benchmark tasks.","context_length":65536,"architecture":{"modality":"text+image->text","input_modalities":["image","text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.000000035","completion":"0.000000138","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":65536,"max_completion_tokens":8000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.035,"completion":0.138}},{"id":"mistralai/devstral-medium","canonical_slug":"mistralai/devstral-medium-2507","hugging_face_id":"","name":"Mistral: Devstral Medium","created":1752161321,"description":"Devstral Medium is a high-performance code generation and agentic reasoning model developed jointly by Mistral AI and All Hands AI. Positioned as a step up from Devstral Small, it achieves...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.0000004","completion":"0.000002","input_cache_read":"0.00000004"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.3},"supported_voices":null,"knowledge_cutoff":"2025-06-30","expiration_date":null,"links":{"details":"/api/v1/models/mistralai/devstral-medium-2507/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.4,"completion":2.0}},{"id":"mistralai/devstral-small","canonical_slug":"mistralai/devstral-small-2507","hugging_face_id":"mistralai/Devstral-Small-2507","name":"Mistral: Devstral Small 1.1","created":1752160751,"description":"Devstral Small 1.1 is a 24B parameter open-weight language model for software engineering agents, developed by Mistral AI in collaboration with All Hands AI. Finetuned from Mistral Small 3.1 and...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.0000001","completion":"0.0000003","input_cache_read":"0.00000001"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.3},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/mistralai/devstral-small-2507/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.1,"completion":0.3}},{"id":"cognitivecomputations/dolphin-mistral-24b-venice-edition:free","canonical_slug":"venice/uncensored","hugging_face_id":"cognitivecomputations/Dolphin-Mistral-24B-Venice-Edition","name":"Venice: Uncensored (free)","created":1752094966,"description":"Venice Uncensored Dolphin Mistral 24B Venice Edition is a fine-tuned variant of Mistral-Small-24B-Instruct-2501, developed by dphn.ai in collaboration with Venice.ai. This model is designed as an “uncensored” instruct-tuned LLM, preserving...","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":32768,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","stop","structured_outputs","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-04-30","expiration_date":null,"links":{"details":"/api/v1/models/venice/uncensored/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"x-ai/grok-4","canonical_slug":"x-ai/grok-4-07-09","hugging_face_id":"","name":"xAI: Grok 4","created":1752087689,"description":"Grok 4 is xAI's latest reasoning model with a 256k context window. It supports parallel tool calling, structured outputs, and both image and text inputs. Note that reasoning is not...","context_length":256000,"architecture":{"modality":"text+image+file->text","input_modalities":["image","text","file"],"output_modalities":["text"],"tokenizer":"Grok","instruct_type":null},"pricing":{"prompt":"0.000003","completion":"0.000015","web_search":"0.005","input_cache_read":"0.00000075"},"top_provider":{"context_length":256000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","logprobs","max_tokens","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-07-31","expiration_date":"2026-05-15","links":{"details":"/api/v1/models/x-ai/grok-4-07-09/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":3.0,"completion":15.0}},{"id":"google/gemma-3n-e2b-it:free","canonical_slug":"google/gemma-3n-e2b-it","hugging_face_id":"google/gemma-3n-E2B-it","name":"Google: Gemma 3n 2B (free)","created":1752074904,"description":"Gemma 3n E2B IT is a multimodal, instruction-tuned model developed by Google DeepMind, designed to operate efficiently at an effective parameter size of 2B while leveraging a 6B architecture. Based...","context_length":8192,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":8192,"max_completion_tokens":2048,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","response_format","seed","temperature","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-08-31","expiration_date":null,"links":{"details":"/api/v1/models/google/gemma-3n-e2b-it/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"tencent/hunyuan-a13b-instruct","canonical_slug":"tencent/hunyuan-a13b-instruct","hugging_face_id":"tencent/Hunyuan-A13B-Instruct","name":"Tencent: Hunyuan A13B Instruct","created":1751987664,"description":"Hunyuan-A13B is a 13B active parameter Mixture-of-Experts (MoE) language model developed by Tencent, with a total parameter count of 80B and support for reasoning via Chain-of-Thought. It offers competitive benchmark...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000014","completion":"0.00000057"},"top_provider":{"context_length":131072,"max_completion_tokens":131072,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","reasoning","response_format","structured_outputs","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/tencent/hunyuan-a13b-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.14,"completion":0.57}},{"id":"tngtech/deepseek-r1t2-chimera:free","canonical_slug":"tngtech/deepseek-r1t2-chimera","hugging_face_id":"tngtech/DeepSeek-TNG-R1T2-Chimera","name":"TNG: DeepSeek R1T2 Chimera (free)","created":1751986985,"description":"DeepSeek-TNG-R1T2-Chimera is the second-generation Chimera model from TNG Tech. It is a 671 B-parameter mixture-of-experts text-generation model assembled from DeepSeek-AI’s R1-0528, R1, and V3-0324 checkpoints with an Assembly-of-Experts merge. The tri-parent design yields strong reasoning performance while running roughly 20 % faster than the original R1 and more than 2× faster than R1-0528 under vLLM, giving a favorable cost-to-intelligence trade-off. The checkpoint supports contexts up to 60 k tokens in standard use (tested to ~130 k) and maintains consistent <think> token behaviour, making it suitable for long-context analysis, dialogue and other open-ended generation tasks.","context_length":163840,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"DeepSeek","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":163840,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"tngtech/deepseek-r1t2-chimera","canonical_slug":"tngtech/deepseek-r1t2-chimera","hugging_face_id":"tngtech/DeepSeek-TNG-R1T2-Chimera","name":"TNG: DeepSeek R1T2 Chimera","created":1751986985,"description":"DeepSeek-TNG-R1T2-Chimera is the second-generation Chimera model from TNG Tech. It is a 671 B-parameter mixture-of-experts text-generation model assembled from DeepSeek-AI’s R1-0528, R1, and V3-0324 checkpoints with an Assembly-of-Experts merge. The...","context_length":163840,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"DeepSeek","instruct_type":null},"pricing":{"prompt":"0.0000003","completion":"0.0000011","input_cache_read":"0.00000015"},"top_provider":{"context_length":163840,"max_completion_tokens":163840,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-07-31","expiration_date":null,"links":{"details":"/api/v1/models/tngtech/deepseek-r1t2-chimera/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.3,"completion":1.1}},{"id":"morph/morph-v3-large","canonical_slug":"morph/morph-v3-large","hugging_face_id":"","name":"Morph: Morph V3 Large","created":1751910858,"description":"Morph's high-accuracy apply model for complex code edits. ~4,500 tokens/sec with 98% accuracy for precise code transformations. The model requires the prompt to be in the following format: <instruction>{instruction}</instruction> <code>{initial_code}</code>...","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000009","completion":"0.0000019"},"top_provider":{"context_length":262144,"max_completion_tokens":131072,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","stop","temperature"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/morph/morph-v3-large/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.9,"completion":1.9}},{"id":"morph/morph-v3-fast","canonical_slug":"morph/morph-v3-fast","hugging_face_id":"","name":"Morph: Morph V3 Fast","created":1751910002,"description":"Morph's fastest apply model for code edits. ~10,500 tokens/sec with 96% accuracy for rapid code transformations. The model requires the prompt to be in the following format: <instruction>{instruction}</instruction> <code>{initial_code}</code> <update>{edit_snippet}</update>...","context_length":81920,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000008","completion":"0.0000012"},"top_provider":{"context_length":81920,"max_completion_tokens":38000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","stop","temperature"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/morph/morph-v3-fast/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.8,"completion":1.2}},{"id":"baidu/ernie-4.5-vl-424b-a47b","canonical_slug":"baidu/ernie-4.5-vl-424b-a47b","hugging_face_id":"baidu/ERNIE-4.5-VL-424B-A47B-PT","name":"Baidu: ERNIE 4.5 VL 424B A47B ","created":1751300903,"description":"ERNIE-4.5-VL-424B-A47B is a multimodal Mixture-of-Experts (MoE) model from Baidu’s ERNIE 4.5 series, featuring 424B total parameters with 47B active per token. It is trained jointly on text and image data...","context_length":123000,"architecture":{"modality":"text+image->text","input_modalities":["image","text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000042","completion":"0.00000125"},"top_provider":{"context_length":123000,"max_completion_tokens":16000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/baidu/ernie-4.5-vl-424b-a47b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.42,"completion":1.25}},{"id":"baidu/ernie-4.5-300b-a47b","canonical_slug":"baidu/ernie-4.5-300b-a47b","hugging_face_id":"baidu/ERNIE-4.5-300B-A47B-PT","name":"Baidu: ERNIE 4.5 300B A47B ","created":1751300139,"description":"ERNIE-4.5-300B-A47B is a 300B parameter Mixture-of-Experts (MoE) language model developed by Baidu as part of the ERNIE 4.5 series. It activates 47B parameters per token and supports text generation in...","context_length":123000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000028","completion":"0.0000011"},"top_provider":{"context_length":123000,"max_completion_tokens":12000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/baidu/ernie-4.5-300b-a47b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.28,"completion":1.1}},{"id":"thedrummer/anubis-70b-v1.1","canonical_slug":"thedrummer/anubis-70b-v1.1","hugging_face_id":"TheDrummer/Anubis-70B-v1.1","name":"TheDrummer: Anubis 70B V1.1","created":1751208347,"description":"TheDrummer's Anubis v1.1 is an unaligned, creative Llama 3.3 70B model focused on providing character-driven roleplay & stories. It excels at gritty, visceral prose, unique character adherence, and coherent narratives, while maintaining the instruction following Llama 3.3 70B is known for.","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":null},"pricing":{"prompt":"0.00000075","completion":"0.000001","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":131072,"max_completion_tokens":131072,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.75,"completion":1.0}},{"id":"inception/mercury","canonical_slug":"inception/mercury","hugging_face_id":"","name":"Inception: Mercury","created":1750973026,"description":"Mercury is the first diffusion large language model (dLLM). Applying a breakthrough discrete diffusion approach, the model runs 5-10x faster than even speed optimized models like GPT-4.1 Nano and Claude...","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000025","completion":"0.00000075","input_cache_read":"0.000000025"},"top_provider":{"context_length":128000,"max_completion_tokens":32000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","response_format","stop","structured_outputs","temperature","tool_choice","tools"],"default_parameters":{"temperature":0,"top_p":null,"frequency_penalty":null},"knowledge_cutoff":"2025-01-31","expiration_date":"2026-04-15","links":{"details":"/api/v1/models/inception/mercury/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.25,"completion":0.75}},{"id":"mistralai/mistral-small-3.2-24b-instruct","canonical_slug":"mistralai/mistral-small-3.2-24b-instruct-2506","hugging_face_id":"mistralai/Mistral-Small-3.2-24B-Instruct-2506","name":"Mistral: Mistral Small 3.2 24B","created":1750443016,"description":"Mistral-Small-3.2-24B-Instruct-2506 is an updated 24B parameter model from Mistral optimized for instruction following, repetition reduction, and improved function calling. Compared to the 3.1 release, version 3.2 significantly improves accuracy on...","context_length":128000,"architecture":{"modality":"text+image->text","input_modalities":["image","text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.000000075","completion":"0.0000002"},"top_provider":{"context_length":128000,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.3},"supported_voices":null,"knowledge_cutoff":"2023-10-31","expiration_date":null,"links":{"details":"/api/v1/models/mistralai/mistral-small-3.2-24b-instruct-2506/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.075,"completion":0.2}},{"id":"minimax/minimax-m1","canonical_slug":"minimax/minimax-m1","hugging_face_id":"","name":"MiniMax: MiniMax M1","created":1750200414,"description":"MiniMax-M1 is a large-scale, open-weight reasoning model designed for extended context and high-efficiency inference. It leverages a hybrid Mixture-of-Experts (MoE) architecture paired with a custom \"lightning attention\" mechanism, allowing it...","context_length":1000000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000004","completion":"0.0000022"},"top_provider":{"context_length":1000000,"max_completion_tokens":40000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","seed","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-06-30","expiration_date":null,"links":{"details":"/api/v1/models/minimax/minimax-m1/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.4,"completion":2.2}},{"id":"google/gemini-2.5-flash","canonical_slug":"google/gemini-2.5-flash","hugging_face_id":"","name":"Google: Gemini 2.5 Flash","created":1750172488,"description":"Gemini 2.5 Flash is Google's state-of-the-art workhorse model, specifically designed for advanced reasoning, coding, mathematics, and scientific tasks. It includes built-in \"thinking\" capabilities, enabling it to provide responses with greater...","context_length":1048576,"architecture":{"modality":"text+image+file+audio+video->text","input_modalities":["file","image","text","audio","video"],"output_modalities":["text"],"tokenizer":"Gemini","instruct_type":null},"pricing":{"prompt":"0.0000003","completion":"0.0000025","image":"0.0000003","audio":"0.000001","web_search":"0.014","internal_reasoning":"0.0000025","input_cache_read":"0.00000003","input_cache_write":"0.00000008333333333333334"},"top_provider":{"context_length":1048576,"max_completion_tokens":65535,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-01-31","expiration_date":null,"links":{"details":"/api/v1/models/google/gemini-2.5-flash/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.3,"completion":2.5}},{"id":"google/gemini-2.5-pro","canonical_slug":"google/gemini-2.5-pro","hugging_face_id":"","name":"Google: Gemini 2.5 Pro","created":1750169544,"description":"Gemini 2.5 Pro is Google’s state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks. It employs “thinking” capabilities, enabling it to reason through responses with enhanced accuracy...","context_length":1048576,"architecture":{"modality":"text+image+file+audio+video->text","input_modalities":["text","image","file","audio","video"],"output_modalities":["text"],"tokenizer":"Gemini","instruct_type":null},"pricing":{"prompt":"0.00000125","completion":"0.00001","image":"0.00000125","audio":"0.00000125","web_search":"0.014","internal_reasoning":"0.00001","input_cache_read":"0.000000125","input_cache_write":"0.000000375"},"top_provider":{"context_length":1048576,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-01-31","expiration_date":null,"links":{"details":"/api/v1/models/google/gemini-2.5-pro/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.25,"completion":10.0}},{"id":"moonshotai/kimi-dev-72b","canonical_slug":"moonshotai/kimi-dev-72b","hugging_face_id":"moonshotai/Kimi-Dev-72B","name":"MoonshotAI: Kimi Dev 72B","created":1750115909,"description":"Kimi-Dev-72B is an open-source large language model fine-tuned for software engineering and issue resolution tasks. Based on Qwen2.5-72B, it is optimized using large-scale reinforcement learning that applies code patches in real repositories and validates them via full test suite execution—rewarding only correct, robust completions. The model achieves 60.4% on SWE-bench Verified, setting a new benchmark among open-source models for software bug fixing and code reasoning.","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000029","completion":"0.00000115"},"top_provider":{"context_length":131072,"max_completion_tokens":131072,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","reasoning","response_format","structured_outputs","temperature","top_k","top_p"],"default_parameters":{},"expiration_date":"2026-02-08","object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.29,"completion":1.15}},{"id":"openai/o3-pro","canonical_slug":"openai/o3-pro-2025-06-10","hugging_face_id":"","name":"OpenAI: o3 Pro","created":1749598352,"description":"The o-series of models are trained with reinforcement learning to think before they answer and perform complex reasoning. The o3-pro model uses more compute to think harder and provide consistently...","context_length":200000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","file","image"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00002","completion":"0.00008","web_search":"0.01"},"top_provider":{"context_length":200000,"max_completion_tokens":100000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-06-30","expiration_date":null,"links":{"details":"/api/v1/models/openai/o3-pro-2025-06-10/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":20.0,"completion":80.0}},{"id":"x-ai/grok-3-mini","canonical_slug":"x-ai/grok-3-mini","hugging_face_id":"","name":"xAI: Grok 3 Mini","created":1749583245,"description":"A lightweight model that thinks before responding. Fast, smart, and great for logic-based tasks that do not require deep domain knowledge. The raw thinking traces are accessible.","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Grok","instruct_type":null},"pricing":{"prompt":"0.0000003","completion":"0.0000005","web_search":"0.005","input_cache_read":"0.000000075"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","logprobs","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-02-28","expiration_date":"2026-05-15","links":{"details":"/api/v1/models/x-ai/grok-3-mini/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.3,"completion":0.5}},{"id":"x-ai/grok-3","canonical_slug":"x-ai/grok-3","hugging_face_id":"","name":"xAI: Grok 3","created":1749582908,"description":"Grok 3 is the latest model from xAI. It's their flagship model that excels at enterprise use cases like data extraction, coding, and text summarization. Possesses deep domain knowledge in...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Grok","instruct_type":null},"pricing":{"prompt":"0.000003","completion":"0.000015","web_search":"0.005","input_cache_read":"0.00000075"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-02-28","expiration_date":"2026-05-15","links":{"details":"/api/v1/models/x-ai/grok-3/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":3.0,"completion":15.0}},{"id":"mistralai/magistral-small-2506","canonical_slug":"mistralai/magistral-small-2506","hugging_face_id":"mistralai/Magistral-Small-2506","name":"Mistral: Magistral Small 2506","created":1749569561,"description":"Magistral Small is a 24B parameter instruction-tuned model based on Mistral-Small-3.1 (2503), enhanced through supervised fine-tuning on traces from Magistral Medium and further refined via reinforcement learning. It is optimized for reasoning and supports a wide multilingual range, including over 20 languages.","context_length":40000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.0000005","completion":"0.0000015","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":40000,"max_completion_tokens":40000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.3},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.5,"completion":1.5}},{"id":"mistralai/magistral-medium-2506:thinking","canonical_slug":"mistralai/magistral-medium-2506","hugging_face_id":"","name":"Mistral: Magistral Medium 2506 (thinking)","created":1749354054,"description":"Magistral is Mistral's first reasoning model. It is ideal for general purpose use requiring longer thought processing and better accuracy than with non-reasoning LLMs. From legal research and financial forecasting to software development and creative storytelling — this model solves multi-step challenges where transparency and precision are critical.","context_length":40960,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.000002","completion":"0.000005","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":40960,"max_completion_tokens":40000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.3},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.0,"completion":5.0}},{"id":"mistralai/magistral-medium-2506","canonical_slug":"mistralai/magistral-medium-2506","hugging_face_id":"","name":"Mistral: Magistral Medium 2506","created":1749354054,"description":"Magistral is Mistral's first reasoning model. It is ideal for general purpose use requiring longer thought processing and better accuracy than with non-reasoning LLMs. From legal research and financial forecasting to software development and creative storytelling — this model solves multi-step challenges where transparency and precision are critical.","context_length":40960,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.000002","completion":"0.000005","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":40960,"max_completion_tokens":40000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.3},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.0,"completion":5.0}},{"id":"google/gemini-2.5-pro-preview","canonical_slug":"google/gemini-2.5-pro-preview-06-05","hugging_face_id":"","name":"Google: Gemini 2.5 Pro Preview 06-05","created":1749137257,"description":"Gemini 2.5 Pro is Google’s state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks. It employs “thinking” capabilities, enabling it to reason through responses with enhanced accuracy...","context_length":1048576,"architecture":{"modality":"text+image+file+audio->text","input_modalities":["file","image","text","audio"],"output_modalities":["text"],"tokenizer":"Gemini","instruct_type":null},"pricing":{"prompt":"0.00000125","completion":"0.00001","image":"0.00000125","audio":"0.00000125","web_search":"0.014","internal_reasoning":"0.00001","input_cache_read":"0.000000125","input_cache_write":"0.000000375"},"top_provider":{"context_length":1048576,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-01-31","expiration_date":null,"links":{"details":"/api/v1/models/google/gemini-2.5-pro-preview-06-05/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.25,"completion":10.0}},{"id":"deepseek/deepseek-r1-0528-qwen3-8b","canonical_slug":"deepseek/deepseek-r1-0528-qwen3-8b","hugging_face_id":"deepseek-ai/deepseek-r1-0528-qwen3-8b","name":"DeepSeek: DeepSeek R1 0528 Qwen3 8B","created":1748538543,"description":"DeepSeek-R1-0528 is a lightly upgraded release of DeepSeek R1 that taps more compute and smarter post-training tricks, pushing its reasoning and inference to the brink of flagship models like O3 and Gemini 2.5 Pro.\nIt now tops math, programming, and logic leaderboards, showcasing a step-change in depth-of-thought.\nThe distilled variant, DeepSeek-R1-0528-Qwen3-8B, transfers this chain-of-thought into an 8 B-parameter form, beating standard Qwen3 8B by +10 pp and tying the 235 B “thinking” giant on AIME 2024.","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":"deepseek-r1"},"pricing":{"prompt":"0.00000006","completion":"0.00000009","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":128000,"max_completion_tokens":32000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.06,"completion":0.09}},{"id":"deepseek/deepseek-r1-0528","canonical_slug":"deepseek/deepseek-r1-0528","hugging_face_id":"deepseek-ai/DeepSeek-R1-0528","name":"DeepSeek: R1 0528","created":1748455170,"description":"May 28th update to the [original DeepSeek R1](/deepseek/deepseek-r1) Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active in an inference pass.\r\n\r\nFully open-source model.","context_length":163840,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"DeepSeek","instruct_type":"deepseek-r1"},"pricing":{"prompt":"0.0000005","completion":"0.00000215","input_cache_read":"0.00000035"},"top_provider":{"context_length":163840,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/deepseek/deepseek-r1-0528/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.4,"completion":1.75}},{"id":"anthropic/claude-opus-4","canonical_slug":"anthropic/claude-4-opus-20250522","hugging_face_id":"","name":"Anthropic: Claude Opus 4","created":1747931245,"description":"Claude Opus 4 is benchmarked as the world’s best coding model, at time of release, bringing sustained performance on complex, long-running tasks and agent workflows. It sets new benchmarks in software engineering, achieving leading results on SWE-bench (72.5%) and Terminal-bench (43.2%). Opus 4 supports extended, agentic workflows, handling thousands of task steps continuously for hours without degradation. \r\n\r\nRead more at the [blog post here](https://www.anthropic.com/news/claude-4)","context_length":200000,"architecture":{"modality":"text+image+file->text","input_modalities":["image","text","file"],"output_modalities":["text"],"tokenizer":"Claude","instruct_type":null},"pricing":{"prompt":"0.000015","completion":"0.000075","web_search":"0.01","input_cache_read":"0.0000015","input_cache_write":"0.00001875"},"top_provider":{"context_length":200000,"max_completion_tokens":32000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-01-31","expiration_date":null,"links":{"details":"/api/v1/models/anthropic/claude-4-opus-20250522/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":15.0,"completion":75.0}},{"id":"anthropic/claude-sonnet-4","canonical_slug":"anthropic/claude-4-sonnet-20250522","hugging_face_id":"","name":"Anthropic: Claude Sonnet 4","created":1747930371,"description":"Claude Sonnet 4 significantly enhances the capabilities of its predecessor, Sonnet 3.7, excelling in both coding and reasoning tasks with improved precision and controllability. Achieving state-of-the-art performance on SWE-bench (72.7%),...","context_length":1000000,"architecture":{"modality":"text+image+file->text","input_modalities":["image","text","file"],"output_modalities":["text"],"tokenizer":"Claude","instruct_type":null},"pricing":{"prompt":"0.000003","completion":"0.000015","web_search":"0.01","input_cache_read":"0.0000003","input_cache_write":"0.00000375"},"top_provider":{"context_length":1000000,"max_completion_tokens":64000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-01-31","expiration_date":null,"links":{"details":"/api/v1/models/anthropic/claude-4-sonnet-20250522/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":3.0,"completion":15.0}},{"id":"mistralai/devstral-small-2505","canonical_slug":"mistralai/devstral-small-2505","hugging_face_id":"mistralai/Devstral-Small-2505","name":"Mistral: Devstral Small 2505","created":1747837379,"description":"Devstral-Small-2505 is a 24B parameter agentic LLM fine-tuned from Mistral-Small-3.1, jointly developed by Mistral AI and All Hands AI for advanced software engineering tasks. It is optimized for codebase exploration, multi-file editing, and integration into coding agents, achieving state-of-the-art results on SWE-Bench Verified (46.8%).\n\nDevstral supports a 128k context window and uses a custom Tekken tokenizer. It is text-only, with the vision encoder removed, and is suitable for local deployment on high-end consumer hardware (e.g., RTX 4090, 32GB RAM Macs). Devstral is best used in agentic workflows via the OpenHands scaffold and is compatible with inference frameworks like vLLM, Transformers, and Ollama. It is released under the Apache 2.0 license.","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.00000006","completion":"0.00000012","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":128000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","temperature","top_k","top_p"],"default_parameters":{"temperature":0.3},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.06,"completion":0.12}},{"id":"google/gemma-3n-e4b-it:free","canonical_slug":"google/gemma-3n-e4b-it","hugging_face_id":"google/gemma-3n-E4B-it","name":"Google: Gemma 3n 4B (free)","created":1747776824,"description":"Gemma 3n E4B-it is optimized for efficient execution on mobile and low-resource devices, such as phones, laptops, and tablets. It supports multimodal inputs—including text, visual data, and audio—enabling diverse tasks...","context_length":8192,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":8192,"max_completion_tokens":2048,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","response_format","seed","temperature","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-08-31","expiration_date":null,"links":{"details":"/api/v1/models/google/gemma-3n-e4b-it/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"google/gemma-3n-e4b-it","canonical_slug":"google/gemma-3n-e4b-it","hugging_face_id":"google/gemma-3n-E4B-it","name":"Google: Gemma 3n 4B","created":1747776824,"description":"Gemma 3n E4B-it is optimized for efficient execution on mobile and low-resource devices, such as phones, laptops, and tablets. It supports multimodal inputs—including text, visual data, and audio—enabling diverse tasks...","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000006","completion":"0.00000012"},"top_provider":{"context_length":32768,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","stop","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-08-31","expiration_date":null,"links":{"details":"/api/v1/models/google/gemma-3n-e4b-it/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.06,"completion":0.12}},{"id":"openai/codex-mini","canonical_slug":"openai/codex-mini","hugging_face_id":"","name":"OpenAI: Codex Mini","created":1747409761,"description":"codex-mini-latest is a fine-tuned version of o4-mini specifically for use in Codex CLI. For direct use in the API, we recommend starting with gpt-4.1.","context_length":200000,"architecture":{"modality":"text+image->text","input_modalities":["image","text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.0000015","completion":"0.000006","input_cache_read":"0.000000375"},"top_provider":{"context_length":200000,"max_completion_tokens":100000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.5,"completion":6.0}},{"id":"nousresearch/deephermes-3-mistral-24b-preview","canonical_slug":"nousresearch/deephermes-3-mistral-24b-preview","hugging_face_id":"NousResearch/DeepHermes-3-Mistral-24B-Preview","name":"Nous: DeepHermes 3 Mistral 24B Preview","created":1746830904,"description":"DeepHermes 3 (Mistral 24B Preview) is an instruction-tuned language model by Nous Research based on Mistral-Small-24B, designed for chat, function calling, and advanced multi-turn reasoning. It introduces a dual-mode system that toggles between intuitive chat responses and structured “deep reasoning” mode using special system prompts. Fine-tuned via distillation from R1, it supports structured output (JSON mode) and function call syntax for agent-based applications.\n\nDeepHermes 3 supports a **reasoning toggle via system prompt**, allowing users to switch between fast, intuitive responses and deliberate, multi-step reasoning. When activated with the following specific system instruction, the model enters a *\"deep thinking\"* mode—generating extended chains of thought wrapped in `<think></think>` tags before delivering a final answer. \n\nSystem Prompt: You are a deep thinking AI, you may use extremely long chains of thought to deeply consider the problem and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. You should enclose your thoughts and internal monologue inside <think> </think> tags, and then provide your solution or response to the problem.","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000002","completion":"0.0000001","input_cache_read":"0.00000001"},"top_provider":{"context_length":32768,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.02,"completion":0.1}},{"id":"mistralai/mistral-medium-3","canonical_slug":"mistralai/mistral-medium-3","hugging_face_id":"","name":"Mistral: Mistral Medium 3","created":1746627341,"description":"Mistral Medium 3 is a high-performance enterprise-grade language model designed to deliver frontier-level capabilities at significantly reduced operational cost. It balances state-of-the-art reasoning and multimodal performance with 8× lower cost...","context_length":131072,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.0000004","completion":"0.000002","input_cache_read":"0.00000004"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.3},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/mistralai/mistral-medium-3/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.4,"completion":2.0}},{"id":"google/gemini-2.5-pro-preview-05-06","canonical_slug":"google/gemini-2.5-pro-preview-03-25","hugging_face_id":"","name":"Google: Gemini 2.5 Pro Preview 05-06","created":1746578513,"description":"Gemini 2.5 Pro is Google’s state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks. It employs “thinking” capabilities, enabling it to reason through responses with enhanced accuracy...","context_length":1048576,"architecture":{"modality":"text+image+file+audio+video->text","input_modalities":["text","image","file","audio","video"],"output_modalities":["text"],"tokenizer":"Gemini","instruct_type":null},"pricing":{"prompt":"0.00000125","completion":"0.00001","image":"0.00000125","audio":"0.00000125","web_search":"0.014","internal_reasoning":"0.00001","input_cache_read":"0.000000125","input_cache_write":"0.000000375"},"top_provider":{"context_length":1048576,"max_completion_tokens":65535,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-01-31","expiration_date":null,"links":{"details":"/api/v1/models/google/gemini-2.5-pro-preview-03-25/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.25,"completion":10.0}},{"id":"arcee-ai/spotlight","canonical_slug":"arcee-ai/spotlight","hugging_face_id":"","name":"Arcee AI: Spotlight","created":1746481552,"description":"Spotlight is a 7‑billion‑parameter vision‑language model derived from Qwen 2.5‑VL and fine‑tuned by Arcee AI for tight image‑text grounding tasks. It offers a 32 k‑token context window, enabling rich multimodal...","context_length":131072,"architecture":{"modality":"text+image->text","input_modalities":["image","text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000018","completion":"0.00000018"},"top_provider":{"context_length":131072,"max_completion_tokens":65537,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","stop","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/arcee-ai/spotlight/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.18,"completion":0.18}},{"id":"arcee-ai/maestro-reasoning","canonical_slug":"arcee-ai/maestro-reasoning","hugging_face_id":"","name":"Arcee AI: Maestro Reasoning","created":1746481269,"description":"Maestro Reasoning is Arcee's flagship analysis model: a 32 B‑parameter derivative of Qwen 2.5‑32 B tuned with DPO and chain‑of‑thought RL for step‑by‑step logic. Compared to the earlier 7 B...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000009","completion":"0.0000033"},"top_provider":{"context_length":131072,"max_completion_tokens":32000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","stop","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/arcee-ai/maestro-reasoning/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.9,"completion":3.3}},{"id":"arcee-ai/virtuoso-large","canonical_slug":"arcee-ai/virtuoso-large","hugging_face_id":"","name":"Arcee AI: Virtuoso Large","created":1746478885,"description":"Virtuoso‑Large is Arcee's top‑tier general‑purpose LLM at 72 B parameters, tuned to tackle cross‑domain reasoning, creative writing and enterprise QA. Unlike many 70 B peers, it retains the 128 k...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000075","completion":"0.0000012"},"top_provider":{"context_length":131072,"max_completion_tokens":64000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/arcee-ai/virtuoso-large/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.75,"completion":1.2}},{"id":"arcee-ai/coder-large","canonical_slug":"arcee-ai/coder-large","hugging_face_id":"","name":"Arcee AI: Coder Large","created":1746478663,"description":"Coder‑Large is a 32 B‑parameter offspring of Qwen 2.5‑Instruct that has been further trained on permissively‑licensed GitHub, CodeSearchNet and synthetic bug‑fix corpora. It supports a 32k context window, enabling multi‑file...","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000005","completion":"0.0000008"},"top_provider":{"context_length":32768,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","stop","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/arcee-ai/coder-large/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.5,"completion":0.8}},{"id":"microsoft/phi-4-reasoning-plus","canonical_slug":"microsoft/phi-4-reasoning-plus-04-30","hugging_face_id":"microsoft/Phi-4-reasoning-plus","name":"Microsoft: Phi 4 Reasoning Plus","created":1746130961,"description":"Phi-4-reasoning-plus is an enhanced 14B parameter model from Microsoft, fine-tuned from Phi-4 with additional reinforcement learning to boost accuracy on math, science, and code reasoning tasks. It uses the same dense decoder-only transformer architecture as Phi-4, but generates longer, more comprehensive outputs structured into a step-by-step reasoning trace and final answer.\n\nWhile it offers improved benchmark scores over Phi-4-reasoning across tasks like AIME, OmniMath, and HumanEvalPlus, its responses are typically ~50% longer, resulting in higher latency. Designed for English-only applications, it is well-suited for structured reasoning workflows where output quality takes priority over response speed.","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000007","completion":"0.00000035","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":32768,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.07,"completion":0.35}},{"id":"inception/mercury-coder","canonical_slug":"inception/mercury-coder-small-beta","hugging_face_id":"","name":"Inception: Mercury Coder","created":1746033880,"description":"Mercury Coder is the first diffusion large language model (dLLM). Applying a breakthrough discrete diffusion approach, the model runs 5-10x faster than even speed optimized models like Claude 3.5 Haiku...","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000025","completion":"0.00000075","input_cache_read":"0.000000025"},"top_provider":{"context_length":128000,"max_completion_tokens":32000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","response_format","stop","structured_outputs","temperature","tool_choice","tools"],"default_parameters":{"temperature":0,"top_p":null,"frequency_penalty":null},"knowledge_cutoff":"2025-01-31","expiration_date":"2026-04-15","links":{"details":"/api/v1/models/inception/mercury-coder-small-beta/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.25,"completion":0.75}},{"id":"qwen/qwen3-4b:free","canonical_slug":"qwen/qwen3-4b-04-28","hugging_face_id":"Qwen/Qwen3-4B","name":"Qwen: Qwen3 4B (free)","created":1746031104,"description":"Qwen3-4B is a 4 billion parameter dense language model from the Qwen3 series, designed to support both general-purpose and reasoning-intensive tasks. It introduces a dual-mode architecture—thinking and non-thinking—allowing dynamic switching between high-precision logical reasoning and efficient dialogue generation. This makes it well-suited for multi-turn chat, instruction following, and complex agent workflows.","context_length":40960,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":"qwen3"},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":40960,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"knowledge_cutoff":"2025-03-31","expiration_date":"2026-03-29","object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"deepseek/deepseek-prover-v2","canonical_slug":"deepseek/deepseek-prover-v2","hugging_face_id":"deepseek-ai/DeepSeek-Prover-V2-671B","name":"DeepSeek: DeepSeek Prover V2","created":1746013094,"description":"DeepSeek Prover V2 is a 671B parameter model, speculated to be geared towards logic and mathematics. Likely an upgrade from [DeepSeek-Prover-V1.5](https://huggingface.co/deepseek-ai/DeepSeek-Prover-V1.5-RL) Not much is known about the model yet, as DeepSeek released it on Hugging Face without an announcement or description.","context_length":163840,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"DeepSeek","instruct_type":null},"pricing":{"prompt":"0.0000005","completion":"0.00000218","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":163840,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.5,"completion":2.18}},{"id":"meta-llama/llama-guard-4-12b","canonical_slug":"meta-llama/llama-guard-4-12b","hugging_face_id":"meta-llama/Llama-Guard-4-12B","name":"Meta: Llama Guard 4 12B","created":1745975193,"description":"Llama Guard 4 is a Llama 4 Scout-derived multimodal pretrained model, fine-tuned for content safety classification. Similar to previous versions, it can be used to classify content in both LLM...","context_length":163840,"architecture":{"modality":"text+image->text","input_modalities":["image","text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000018","completion":"0.00000018"},"top_provider":{"context_length":163840,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-08-31","expiration_date":null,"links":{"details":"/api/v1/models/meta-llama/llama-guard-4-12b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.18,"completion":0.18}},{"id":"qwen/qwen3-30b-a3b","canonical_slug":"qwen/qwen3-30b-a3b-04-28","hugging_face_id":"Qwen/Qwen3-30B-A3B","name":"Qwen: Qwen3 30B A3B","created":1745878604,"description":"Qwen3, the latest generation in the Qwen large language model series, features both dense and mixture-of-experts (MoE) architectures to excel in reasoning, multilingual support, and advanced agent tasks. Its unique...","context_length":40960,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":"qwen3"},"pricing":{"prompt":"0.00000009","completion":"0.00000045"},"top_provider":{"context_length":40960,"max_completion_tokens":20000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-30b-a3b-04-28/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.09,"completion":0.45}},{"id":"qwen/qwen3-8b","canonical_slug":"qwen/qwen3-8b-04-28","hugging_face_id":"Qwen/Qwen3-8B","name":"Qwen: Qwen3 8B","created":1745876632,"description":"Qwen3-8B is a dense 8.2B parameter causal language model from the Qwen3 series, designed for both reasoning-heavy tasks and efficient dialogue. It supports seamless switching between \"thinking\" mode for math,...","context_length":40960,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":"qwen3"},"pricing":{"prompt":"0.00000005","completion":"0.0000004","input_cache_read":"0.00000005"},"top_provider":{"context_length":40960,"max_completion_tokens":8192,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.6,"top_p":0.95,"top_k":20,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-8b-04-28/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.05,"completion":0.4}},{"id":"qwen/qwen3-14b","canonical_slug":"qwen/qwen3-14b-04-28","hugging_face_id":"Qwen/Qwen3-14B","name":"Qwen: Qwen3 14B","created":1745876478,"description":"Qwen3-14B is a dense 14.8B parameter causal language model from the Qwen3 series, designed for both complex reasoning and efficient dialogue. It supports seamless switching between a \"thinking\" mode for...","context_length":40960,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":"qwen3"},"pricing":{"prompt":"0.00000006","completion":"0.00000024"},"top_provider":{"context_length":40960,"max_completion_tokens":40960,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-14b-04-28/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.06,"completion":0.24}},{"id":"qwen/qwen3-32b","canonical_slug":"qwen/qwen3-32b-04-28","hugging_face_id":"Qwen/Qwen3-32B","name":"Qwen: Qwen3 32B","created":1745875945,"description":"Qwen3-32B is a dense 32.8B parameter causal language model from the Qwen3 series, optimized for both complex reasoning and efficient dialogue. It supports seamless switching between a \"thinking\" mode for...","context_length":40960,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":"qwen3"},"pricing":{"prompt":"0.00000008","completion":"0.00000028"},"top_provider":{"context_length":40960,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-32b-04-28/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.08,"completion":0.28}},{"id":"qwen/qwen3-235b-a22b:free","canonical_slug":"qwen/qwen3-235b-a22b-04-28","hugging_face_id":"Qwen/Qwen3-235B-A22B","name":"Qwen: Qwen3 235B A22B (free)","created":1745875757,"description":"Qwen3-235B-A22B is a 235B parameter mixture-of-experts (MoE) model developed by Qwen, activating 22B parameters per forward pass. It supports seamless switching between a \"thinking\" mode for complex reasoning, math, and code tasks, and a \"non-thinking\" mode for general conversational efficiency. The model demonstrates strong reasoning ability, multilingual support (100+ languages and dialects), advanced instruction-following, and agent tool-calling capabilities. It natively handles a 32K token context window and extends up to 131K tokens using YaRN-based scaling.","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":"qwen3"},"pricing":{"prompt":"0","completion":"0","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"qwen/qwen3-235b-a22b","canonical_slug":"qwen/qwen3-235b-a22b-04-28","hugging_face_id":"Qwen/Qwen3-235B-A22B","name":"Qwen: Qwen3 235B A22B","created":1745875757,"description":"Qwen3-235B-A22B is a 235B parameter mixture-of-experts (MoE) model developed by Qwen, activating 22B parameters per forward pass. It supports seamless switching between a \"thinking\" mode for complex reasoning, math, and...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":"qwen3"},"pricing":{"prompt":"0.000000455","completion":"0.00000182"},"top_provider":{"context_length":131072,"max_completion_tokens":8192,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","temperature","tool_choice","tools","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-235b-a22b-04-28/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.455,"completion":1.82}},{"id":"tngtech/deepseek-r1t-chimera:free","canonical_slug":"tngtech/deepseek-r1t-chimera","hugging_face_id":"tngtech/DeepSeek-R1T-Chimera","name":"TNG: DeepSeek R1T Chimera (free)","created":1745760875,"description":"DeepSeek-R1T-Chimera is created by merging DeepSeek-R1 and DeepSeek-V3 (0324), combining the reasoning capabilities of R1 with the token efficiency improvements of V3. It is based on a DeepSeek-MoE Transformer architecture and is optimized for general text generation tasks.\n\nThe model merges pretrained weights from both source models to balance performance across reasoning, efficiency, and instruction-following tasks. It is released under the MIT license and intended for research and commercial use.","context_length":163840,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"DeepSeek","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":163840,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"tngtech/deepseek-r1t-chimera","canonical_slug":"tngtech/deepseek-r1t-chimera","hugging_face_id":"tngtech/DeepSeek-R1T-Chimera","name":"TNG: DeepSeek R1T Chimera","created":1745760875,"description":"DeepSeek-R1T-Chimera is created by merging DeepSeek-R1 and DeepSeek-V3 (0324), combining the reasoning capabilities of R1 with the token efficiency improvements of V3. It is based on a DeepSeek-MoE Transformer architecture and is optimized for general text generation tasks.\n\nThe model merges pretrained weights from both source models to balance performance across reasoning, efficiency, and instruction-following tasks. It is released under the MIT license and intended for research and commercial use.","context_length":163840,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"DeepSeek","instruct_type":null},"pricing":{"prompt":"0.0000003","completion":"0.0000012","input_cache_read":"0.00000015"},"top_provider":{"context_length":163840,"max_completion_tokens":163840,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"],"default_parameters":{},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.3,"completion":1.2}},{"id":"microsoft/mai-ds-r1","canonical_slug":"microsoft/mai-ds-r1","hugging_face_id":"microsoft/MAI-DS-R1","name":"Microsoft: MAI DS R1","created":1745194100,"description":"MAI-DS-R1 is a post-trained variant of DeepSeek-R1 developed by the Microsoft AI team to improve the model’s responsiveness on previously blocked topics while enhancing its safety profile. Built on top of DeepSeek-R1’s reasoning foundation, it integrates 110k examples from the Tulu-3 SFT dataset and 350k internally curated multilingual safety-alignment samples. The model retains strong reasoning, coding, and problem-solving capabilities, while unblocking a wide range of prompts previously restricted in R1.\n\nMAI-DS-R1 demonstrates improved performance on harm mitigation benchmarks and maintains competitive results across general reasoning tasks. It surpasses R1-1776 in satisfaction metrics for blocked queries and reduces leakage in harmful content categories. The model is based on a transformer MoE architecture and is suitable for general-purpose use cases, excluding high-stakes domains such as legal, medical, or autonomous systems.","context_length":163840,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"DeepSeek","instruct_type":"deepseek-r1"},"pricing":{"prompt":"0.0000003","completion":"0.0000012","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":163840,"max_completion_tokens":163840,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"],"default_parameters":{},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.3,"completion":1.2}},{"id":"openai/o4-mini-high","canonical_slug":"openai/o4-mini-high-2025-04-16","hugging_face_id":"","name":"OpenAI: o4 Mini High","created":1744824212,"description":"OpenAI o4-mini-high is the same model as [o4-mini](/openai/o4-mini) with reasoning_effort set to high. OpenAI o4-mini is a compact reasoning model in the o-series, optimized for fast, cost-efficient performance while retaining...","context_length":200000,"architecture":{"modality":"text+image+file->text","input_modalities":["image","text","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.0000011","completion":"0.0000044","web_search":"0.01","input_cache_read":"0.000000275"},"top_provider":{"context_length":200000,"max_completion_tokens":100000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-06-30","expiration_date":null,"links":{"details":"/api/v1/models/openai/o4-mini-high-2025-04-16/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.1,"completion":4.4}},{"id":"openai/o3","canonical_slug":"openai/o3-2025-04-16","hugging_face_id":"","name":"OpenAI: o3","created":1744823457,"description":"o3 is a well-rounded and powerful model across domains. It sets a new standard for math, science, coding, and visual reasoning tasks. It also excels at technical writing and instruction-following....","context_length":200000,"architecture":{"modality":"text+image+file->text","input_modalities":["image","text","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.000002","completion":"0.000008","web_search":"0.01","input_cache_read":"0.0000005"},"top_provider":{"context_length":200000,"max_completion_tokens":100000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-06-30","expiration_date":null,"links":{"details":"/api/v1/models/openai/o3-2025-04-16/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.0,"completion":8.0}},{"id":"openai/o4-mini","canonical_slug":"openai/o4-mini-2025-04-16","hugging_face_id":"","name":"OpenAI: o4 Mini","created":1744820942,"description":"OpenAI o4-mini is a compact reasoning model in the o-series, optimized for fast, cost-efficient performance while retaining strong multimodal and agentic capabilities. It supports tool use and demonstrates competitive reasoning...","context_length":200000,"architecture":{"modality":"text+image+file->text","input_modalities":["image","text","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.0000011","completion":"0.0000044","web_search":"0.01","input_cache_read":"0.000000275"},"top_provider":{"context_length":200000,"max_completion_tokens":100000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-06-30","expiration_date":null,"links":{"details":"/api/v1/models/openai/o4-mini-2025-04-16/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.1,"completion":4.4}},{"id":"qwen/qwen2.5-coder-7b-instruct","canonical_slug":"qwen/qwen2.5-coder-7b-instruct","hugging_face_id":"Qwen/Qwen2.5-Coder-7B-Instruct","name":"Qwen: Qwen2.5 Coder 7B Instruct","created":1744734887,"description":"Qwen2.5-Coder-7B-Instruct is a 7B parameter instruction-tuned language model optimized for code-related tasks such as code generation, reasoning, and bug fixing. Based on the Qwen2.5 architecture, it incorporates enhancements like RoPE,...","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":null},"pricing":{"prompt":"0.00000003","completion":"0.00000009"},"top_provider":{"context_length":32768,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","response_format","structured_outputs","temperature","top_k","top_p"],"default_parameters":{},"knowledge_cutoff":"2024-06-30","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen2.5-coder-7b-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.03,"completion":0.09}},{"id":"openai/gpt-4.1","canonical_slug":"openai/gpt-4.1-2025-04-14","hugging_face_id":"","name":"OpenAI: GPT-4.1","created":1744651385,"description":"GPT-4.1 is a flagship large language model optimized for advanced instruction following, real-world software engineering, and long-context reasoning. It supports a 1 million token context window and outperforms GPT-4o and GPT-4.5 across coding (54.6% SWE-bench Verified), instruction compliance (87.4% IFEval), and multimodal understanding benchmarks. It is tuned for precise code diffs, agent reliability, and high recall in large document contexts, making it ideal for agents, IDE tooling, and enterprise knowledge retrieval.","context_length":1047576,"architecture":{"modality":"text+image+file->text","input_modalities":["image","text","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.000002","completion":"0.000008","input_cache_read":"0.0000005"},"top_provider":{"context_length":1047576,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_completion_tokens","max_tokens","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-06-30","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-4.1-2025-04-14/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.0,"completion":8.0}},{"id":"openai/gpt-4.1-mini","canonical_slug":"openai/gpt-4.1-mini-2025-04-14","hugging_face_id":"","name":"OpenAI: GPT-4.1 Mini","created":1744651381,"description":"GPT-4.1 Mini is a mid-sized model delivering performance competitive with GPT-4o at substantially lower latency and cost. It retains a 1 million token context window and scores 45.1% on hard...","context_length":1047576,"architecture":{"modality":"text+image+file->text","input_modalities":["image","text","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.0000004","completion":"0.0000016","web_search":"0.01","input_cache_read":"0.0000001"},"top_provider":{"context_length":1047576,"max_completion_tokens":32768,"is_moderated":true},"per_request_limits":null,"supported_parameters":["max_completion_tokens","max_tokens","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-06-30","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-4.1-mini-2025-04-14/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.4,"completion":1.6}},{"id":"openai/gpt-4.1-nano","canonical_slug":"openai/gpt-4.1-nano-2025-04-14","hugging_face_id":"","name":"OpenAI: GPT-4.1 Nano","created":1744651369,"description":"For tasks that demand low latency, GPT‑4.1 nano is the fastest and cheapest model in the GPT-4.1 series. It delivers exceptional performance at a small size with its 1 million...","context_length":1047576,"architecture":{"modality":"text+image+file->text","input_modalities":["image","text","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.0000001","completion":"0.0000004","web_search":"0.01","input_cache_read":"0.000000025"},"top_provider":{"context_length":1047576,"max_completion_tokens":32768,"is_moderated":true},"per_request_limits":null,"supported_parameters":["max_completion_tokens","max_tokens","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-06-30","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-4.1-nano-2025-04-14/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.1,"completion":0.4}},{"id":"eleutherai/llemma_7b","canonical_slug":"eleutherai/llemma_7b","hugging_face_id":"EleutherAI/llemma_7b","name":"EleutherAI: Llemma 7b","created":1744643225,"description":"Llemma 7B is a language model for mathematics. It was initialized with Code Llama 7B weights, and trained on the Proof-Pile-2 for 200B tokens. Llemma models are particularly strong at chain-of-thought mathematical reasoning and using computational tools for mathematics, such as Python and formal theorem provers.","context_length":4096,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":"code-llama"},"pricing":{"prompt":"0.0000008","completion":"0.0000012"},"top_provider":{"context_length":4096,"max_completion_tokens":4096,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","min_p","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"knowledge_cutoff":"2023-04-30","expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.8,"completion":1.2}},{"id":"alfredpros/codellama-7b-instruct-solidity","canonical_slug":"alfredpros/codellama-7b-instruct-solidity","hugging_face_id":"AlfredPros/CodeLlama-7b-Instruct-Solidity","name":"AlfredPros: CodeLLaMa 7B Instruct Solidity","created":1744641874,"description":"A finetuned 7 billion parameters Code LLaMA - Instruct model to generate Solidity smart contract using 4-bit QLoRA finetuning provided by PEFT library.","context_length":4096,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":"alpaca"},"pricing":{"prompt":"0.0000008","completion":"0.0000012"},"top_provider":{"context_length":4096,"max_completion_tokens":4096,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","min_p","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-06-30","expiration_date":null,"links":{"details":"/api/v1/models/alfredpros/codellama-7b-instruct-solidity/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.8,"completion":1.2}},{"id":"arliai/qwq-32b-arliai-rpr-v1","canonical_slug":"arliai/qwq-32b-arliai-rpr-v1","hugging_face_id":"ArliAI/QwQ-32B-ArliAI-RpR-v1","name":"ArliAI: QwQ 32B RpR v1","created":1744555982,"description":"QwQ-32B-ArliAI-RpR-v1 is a 32B parameter model fine-tuned from Qwen/QwQ-32B using a curated creative writing and roleplay dataset originally developed for the RPMax series. It is designed to maintain coherence and reasoning across long multi-turn conversations by introducing explicit reasoning steps per dialogue turn, generated and refined using the base model itself.\n\nThe model was trained using RS-QLORA+ on 8K sequence lengths and supports up to 128K context windows (with practical performance around 32K). It is optimized for creative roleplay and dialogue generation, with an emphasis on minimizing cross-context repetition while preserving stylistic diversity.","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":"deepseek-r1"},"pricing":{"prompt":"0.00000003","completion":"0.00000011","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":32768,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"],"default_parameters":{},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.03,"completion":0.11}},{"id":"x-ai/grok-3-mini-beta","canonical_slug":"x-ai/grok-3-mini-beta","hugging_face_id":"","name":"xAI: Grok 3 Mini Beta","created":1744240195,"description":"Grok 3 Mini is a lightweight, smaller thinking model. Unlike traditional models that generate answers immediately, Grok 3 Mini thinks before responding. It’s ideal for reasoning-heavy tasks that don’t demand...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Grok","instruct_type":null},"pricing":{"prompt":"0.0000003","completion":"0.0000005","web_search":"0.005","input_cache_read":"0.000000075"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","logprobs","max_tokens","reasoning","response_format","seed","stop","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-02-28","expiration_date":"2026-05-15","links":{"details":"/api/v1/models/x-ai/grok-3-mini-beta/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.3,"completion":0.5}},{"id":"x-ai/grok-3-beta","canonical_slug":"x-ai/grok-3-beta","hugging_face_id":"","name":"xAI: Grok 3 Beta","created":1744240068,"description":"Grok 3 is the latest model from xAI. It's their flagship model that excels at enterprise use cases like data extraction, coding, and text summarization. Possesses deep domain knowledge in...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Grok","instruct_type":null},"pricing":{"prompt":"0.000003","completion":"0.000015","web_search":"0.005","input_cache_read":"0.00000075"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logprobs","max_tokens","presence_penalty","response_format","seed","stop","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-02-28","expiration_date":"2026-05-15","links":{"details":"/api/v1/models/x-ai/grok-3-beta/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":3.0,"completion":15.0}},{"id":"nvidia/llama-3.1-nemotron-ultra-253b-v1","canonical_slug":"nvidia/llama-3.1-nemotron-ultra-253b-v1","hugging_face_id":"nvidia/Llama-3_1-Nemotron-Ultra-253B-v1","name":"NVIDIA: Llama 3.1 Nemotron Ultra 253B v1","created":1744115059,"description":"Llama-3.1-Nemotron-Ultra-253B-v1 is a large language model (LLM) optimized for advanced reasoning, human-interactive chat, retrieval-augmented generation (RAG), and tool-calling tasks. Derived from Meta’s Llama-3.1-405B-Instruct, it has been significantly customized using Neural...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":null},"pricing":{"prompt":"0.0000006","completion":"0.0000018"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","structured_outputs","temperature","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"knowledge_cutoff":"2024-03-31","expiration_date":null,"links":{"details":"/api/v1/models/nvidia/llama-3.1-nemotron-ultra-253b-v1/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.6,"completion":1.8}},{"id":"meta-llama/llama-4-maverick","canonical_slug":"meta-llama/llama-4-maverick-17b-128e-instruct","hugging_face_id":"meta-llama/Llama-4-Maverick-17B-128E-Instruct","name":"Meta: Llama 4 Maverick","created":1743881822,"description":"Llama 4 Maverick 17B Instruct (128E) is a high-capacity multimodal language model from Meta, built on a mixture-of-experts (MoE) architecture with 128 experts and 17 billion active parameters per forward...","context_length":1048576,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Llama4","instruct_type":null},"pricing":{"prompt":"0.00000015","completion":"0.0000006"},"top_provider":{"context_length":1048576,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-08-31","expiration_date":null,"links":{"details":"/api/v1/models/meta-llama/llama-4-maverick-17b-128e-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.15,"completion":0.6}},{"id":"meta-llama/llama-4-scout","canonical_slug":"meta-llama/llama-4-scout-17b-16e-instruct","hugging_face_id":"meta-llama/Llama-4-Scout-17B-16E-Instruct","name":"Meta: Llama 4 Scout","created":1743881519,"description":"Llama 4 Scout 17B Instruct (16E) is a mixture-of-experts (MoE) language model developed by Meta, activating 17 billion parameters out of a total of 109B. It supports native multimodal input...","context_length":327680,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Llama4","instruct_type":null},"pricing":{"prompt":"0.00000008","completion":"0.0000003"},"top_provider":{"context_length":327680,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-08-31","expiration_date":null,"links":{"details":"/api/v1/models/meta-llama/llama-4-scout-17b-16e-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.08,"completion":0.3}},{"id":"qwen/qwen2.5-vl-32b-instruct","canonical_slug":"qwen/qwen2.5-vl-32b-instruct","hugging_face_id":"Qwen/Qwen2.5-VL-32B-Instruct","name":"Qwen: Qwen2.5 VL 32B Instruct","created":1742839838,"description":"Qwen2.5-VL-32B is a multimodal vision-language model fine-tuned through reinforcement learning for enhanced mathematical reasoning, structured outputs, and visual problem-solving capabilities. It excels at visual analysis tasks, including object recognition, textual...","context_length":128000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":null},"pricing":{"prompt":"0.0000002","completion":"0.0000006"},"top_provider":{"context_length":128000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"knowledge_cutoff":"2024-06-30","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen2.5-vl-32b-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.2,"completion":0.6}},{"id":"deepseek/deepseek-chat-v3-0324","canonical_slug":"deepseek/deepseek-chat-v3-0324","hugging_face_id":"deepseek-ai/DeepSeek-V3-0324","name":"DeepSeek: DeepSeek V3 0324","created":1742824755,"description":"DeepSeek V3, a 685B-parameter, mixture-of-experts model, is the latest iteration of the flagship chat model family from the DeepSeek team.\r\n\r\nIt succeeds the [DeepSeek V3](/deepseek/deepseek-chat-v3) model and performs really well on a variety of tasks.","context_length":163840,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"DeepSeek","instruct_type":null},"pricing":{"prompt":"0.0000002","completion":"0.00000077","input_cache_read":"0.000000135"},"top_provider":{"context_length":163840,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-07-31","expiration_date":null,"links":{"details":"/api/v1/models/deepseek/deepseek-chat-v3-0324/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.19,"completion":0.87}},{"id":"openai/o1-pro","canonical_slug":"openai/o1-pro","hugging_face_id":"","name":"OpenAI: o1-pro","created":1742423211,"description":"The o1 series of models are trained with reinforcement learning to think before they answer and perform complex reasoning. The o1-pro model uses more compute to think harder and provide...","context_length":200000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00015","completion":"0.0006"},"top_provider":{"context_length":200000,"max_completion_tokens":100000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-10-31","expiration_date":null,"links":{"details":"/api/v1/models/openai/o1-pro/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":150.0,"completion":600.0}},{"id":"mistralai/mistral-small-3.1-24b-instruct:free","canonical_slug":"mistralai/mistral-small-3.1-24b-instruct-2503","hugging_face_id":"mistralai/Mistral-Small-3.1-24B-Instruct-2503","name":"Mistral: Mistral Small 3.1 24B (free)","created":1742238937,"description":"Mistral Small 3.1 24B Instruct is an upgraded variant of Mistral Small 3 (2501), featuring 24 billion parameters with advanced multimodal capabilities. It provides state-of-the-art performance in text-based reasoning and vision tasks, including image analysis, programming, mathematical reasoning, and multilingual support across dozens of languages. Equipped with an extensive 128k token context window and optimized for efficient local inference, it supports use cases such as conversational agents, function calling, long-document comprehension, and privacy-sensitive deployments. The updated version is [Mistral Small 3.2](mistralai/mistral-small-3.2-24b-instruct)","context_length":128000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":128000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.3},"knowledge_cutoff":"2023-10-31","expiration_date":"2026-03-29","object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"mistralai/mistral-small-3.1-24b-instruct","canonical_slug":"mistralai/mistral-small-3.1-24b-instruct-2503","hugging_face_id":"mistralai/Mistral-Small-3.1-24B-Instruct-2503","name":"Mistral: Mistral Small 3.1 24B","created":1742238937,"description":"Mistral Small 3.1 24B Instruct is an upgraded variant of Mistral Small 3 (2501), featuring 24 billion parameters with advanced multimodal capabilities. It provides state-of-the-art performance in text-based reasoning and...","context_length":128000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.00000035","completion":"0.00000056"},"top_provider":{"context_length":128000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","temperature","top_k","top_p"],"default_parameters":{"temperature":0.3},"supported_voices":null,"knowledge_cutoff":"2023-10-31","expiration_date":null,"links":{"details":"/api/v1/models/mistralai/mistral-small-3.1-24b-instruct-2503/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.35,"completion":0.56}},{"id":"allenai/olmo-2-0325-32b-instruct","canonical_slug":"allenai/olmo-2-0325-32b-instruct","hugging_face_id":"allenai/OLMo-2-0325-32B-Instruct","name":"AllenAI: Olmo 2 32B Instruct","created":1741988556,"description":"OLMo-2 32B Instruct is a supervised instruction-finetuned variant of the OLMo-2 32B March 2025 base model. It excels in complex reasoning and instruction-following tasks across diverse benchmarks such as GSM8K,...","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000005","completion":"0.0000002"},"top_provider":{"context_length":128000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":[],"default_parameters":{},"knowledge_cutoff":"2024-10-31","expiration_date":null,"links":{"details":"/api/v1/models/allenai/olmo-2-0325-32b-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.05,"completion":0.2}},{"id":"google/gemma-3-4b-it:free","canonical_slug":"google/gemma-3-4b-it","hugging_face_id":"google/gemma-3-4b-it","name":"Google: Gemma 3 4B (free)","created":1741905510,"description":"Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities,...","context_length":32768,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Gemini","instruct_type":"gemma"},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":32768,"max_completion_tokens":8192,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","response_format","seed","stop","temperature","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-08-31","expiration_date":null,"links":{"details":"/api/v1/models/google/gemma-3-4b-it/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"google/gemma-3-4b-it","canonical_slug":"google/gemma-3-4b-it","hugging_face_id":"google/gemma-3-4b-it","name":"Google: Gemma 3 4B","created":1741905510,"description":"Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities,...","context_length":131072,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Gemini","instruct_type":"gemma"},"pricing":{"prompt":"0.00000004","completion":"0.00000008"},"top_provider":{"context_length":131072,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-08-31","expiration_date":null,"links":{"details":"/api/v1/models/google/gemma-3-4b-it/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.04,"completion":0.08}},{"id":"google/gemma-3-12b-it:free","canonical_slug":"google/gemma-3-12b-it","hugging_face_id":"google/gemma-3-12b-it","name":"Google: Gemma 3 12B (free)","created":1741902625,"description":"Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities,...","context_length":32768,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Gemini","instruct_type":"gemma"},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":32768,"max_completion_tokens":8192,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","seed","stop","temperature","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-08-31","expiration_date":null,"links":{"details":"/api/v1/models/google/gemma-3-12b-it/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"google/gemma-3-12b-it","canonical_slug":"google/gemma-3-12b-it","hugging_face_id":"google/gemma-3-12b-it","name":"Google: Gemma 3 12B","created":1741902625,"description":"Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities,...","context_length":131072,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Gemini","instruct_type":"gemma"},"pricing":{"prompt":"0.00000004","completion":"0.00000013"},"top_provider":{"context_length":131072,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-08-31","expiration_date":null,"links":{"details":"/api/v1/models/google/gemma-3-12b-it/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.04,"completion":0.13}},{"id":"cohere/command-a","canonical_slug":"cohere/command-a-03-2025","hugging_face_id":"CohereForAI/c4ai-command-a-03-2025","name":"Cohere: Command A","created":1741894342,"description":"Command A is an open-weights 111B parameter model with a 256k context window focused on delivering great performance across agentic, multilingual, and coding use cases. Compared to other leading proprietary...","context_length":256000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000025","completion":"0.00001"},"top_provider":{"context_length":256000,"max_completion_tokens":8192,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-08-31","expiration_date":null,"links":{"details":"/api/v1/models/cohere/command-a-03-2025/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.5,"completion":10.0}},{"id":"openai/gpt-4o-mini-search-preview","canonical_slug":"openai/gpt-4o-mini-search-preview-2025-03-11","hugging_face_id":"","name":"OpenAI: GPT-4o-mini Search Preview","created":1741818122,"description":"GPT-4o mini Search Preview is a specialized model for web search in Chat Completions. It is trained to understand and execute web search queries.","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00000015","completion":"0.0000006","web_search":"0.0275"},"top_provider":{"context_length":128000,"max_completion_tokens":16384,"is_moderated":true},"per_request_limits":null,"supported_parameters":["max_tokens","response_format","structured_outputs","web_search_options"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-10-31","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-4o-mini-search-preview-2025-03-11/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.15,"completion":0.6}},{"id":"openai/gpt-4o-search-preview","canonical_slug":"openai/gpt-4o-search-preview-2025-03-11","hugging_face_id":"","name":"OpenAI: GPT-4o Search Preview","created":1741817949,"description":"GPT-4o Search Previewis a specialized model for web search in Chat Completions. It is trained to understand and execute web search queries.","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.0000025","completion":"0.00001","web_search":"0.035"},"top_provider":{"context_length":128000,"max_completion_tokens":16384,"is_moderated":true},"per_request_limits":null,"supported_parameters":["max_tokens","response_format","structured_outputs","web_search_options"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-10-31","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-4o-search-preview-2025-03-11/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.5,"completion":10.0}},{"id":"google/gemma-3-27b-it:free","canonical_slug":"google/gemma-3-27b-it","hugging_face_id":"google/gemma-3-27b-it","name":"Google: Gemma 3 27B (free)","created":1741756359,"description":"Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities,...","context_length":131072,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Gemini","instruct_type":"gemma"},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":131072,"max_completion_tokens":8192,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","response_format","seed","stop","temperature","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-08-31","expiration_date":null,"links":{"details":"/api/v1/models/google/gemma-3-27b-it/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"google/gemma-3-27b-it","canonical_slug":"google/gemma-3-27b-it","hugging_face_id":"google/gemma-3-27b-it","name":"Google: Gemma 3 27B","created":1741756359,"description":"Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities,...","context_length":131072,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Gemini","instruct_type":"gemma"},"pricing":{"prompt":"0.00000008","completion":"0.00000016"},"top_provider":{"context_length":131072,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-08-31","expiration_date":null,"links":{"details":"/api/v1/models/google/gemma-3-27b-it/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.08,"completion":0.16}},{"id":"thedrummer/skyfall-36b-v2","canonical_slug":"thedrummer/skyfall-36b-v2","hugging_face_id":"TheDrummer/Skyfall-36B-v2","name":"TheDrummer: Skyfall 36B V2","created":1741636566,"description":"Skyfall 36B v2 is an enhanced iteration of Mistral Small 2501, specifically fine-tuned for improved creativity, nuanced writing, role-playing, and coherent storytelling.","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000055","completion":"0.0000008","input_cache_read":"0.00000025"},"top_provider":{"context_length":32768,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-06-30","expiration_date":null,"links":{"details":"/api/v1/models/thedrummer/skyfall-36b-v2/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.55,"completion":0.8}},{"id":"microsoft/phi-4-multimodal-instruct","canonical_slug":"microsoft/phi-4-multimodal-instruct","hugging_face_id":"microsoft/Phi-4-multimodal-instruct","name":"Microsoft: Phi 4 Multimodal Instruct","created":1741396284,"description":"Phi-4 Multimodal Instruct is a versatile 5.6B parameter foundation model that combines advanced reasoning and instruction-following capabilities across both text and visual inputs, providing accurate text outputs. The unified architecture enables efficient, low-latency inference, suitable for edge and mobile deployments. Phi-4 Multimodal Instruct supports text inputs in multiple languages including Arabic, Chinese, English, French, German, Japanese, Spanish, and more, with visual input optimized primarily for English. It delivers impressive performance on multimodal tasks involving mathematical, scientific, and document reasoning, providing developers and enterprises a powerful yet compact model for sophisticated interactive applications. For more information, see the [Phi-4 Multimodal blog post](https://azure.microsoft.com/en-us/blog/empowering-innovation-the-next-generation-of-the-phi-family/).","context_length":131072,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000005","completion":"0.0000001","request":"0","image":"0.00017685","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.05,"completion":0.1}},{"id":"perplexity/sonar-reasoning-pro","canonical_slug":"perplexity/sonar-reasoning-pro","hugging_face_id":"","name":"Perplexity: Sonar Reasoning Pro","created":1741313308,"description":"Note: Sonar Pro pricing includes Perplexity search pricing. See [details here](https://docs.perplexity.ai/guides/pricing#detailed-pricing-breakdown-for-sonar-reasoning-pro-and-sonar-pro) Sonar Reasoning Pro is a premier reasoning model powered by DeepSeek R1 with Chain of Thought (CoT). Designed for...","context_length":128000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":"deepseek-r1"},"pricing":{"prompt":"0.000002","completion":"0.000008","web_search":"0.005"},"top_provider":{"context_length":128000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","temperature","top_k","top_p","web_search_options"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/perplexity/sonar-reasoning-pro/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.0,"completion":8.0}},{"id":"perplexity/sonar-pro","canonical_slug":"perplexity/sonar-pro","hugging_face_id":"","name":"Perplexity: Sonar Pro","created":1741312423,"description":"Note: Sonar Pro pricing includes Perplexity search pricing. See [details here](https://docs.perplexity.ai/guides/pricing#detailed-pricing-breakdown-for-sonar-reasoning-pro-and-sonar-pro) For enterprises seeking more advanced capabilities, the Sonar Pro API can handle in-depth, multi-step queries with added extensibility, like...","context_length":200000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.000003","completion":"0.000015","web_search":"0.005"},"top_provider":{"context_length":200000,"max_completion_tokens":8000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","temperature","top_k","top_p","web_search_options"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/perplexity/sonar-pro/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":3.0,"completion":15.0}},{"id":"perplexity/sonar-deep-research","canonical_slug":"perplexity/sonar-deep-research","hugging_face_id":"","name":"Perplexity: Sonar Deep Research","created":1741311246,"description":"Sonar Deep Research is a research-focused model designed for multi-step retrieval, synthesis, and reasoning across complex topics. It autonomously searches, reads, and evaluates sources, refining its approach as it gathers...","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":"deepseek-r1"},"pricing":{"prompt":"0.000002","completion":"0.000008","web_search":"0.005","internal_reasoning":"0.000003"},"top_provider":{"context_length":128000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","temperature","top_k","top_p","web_search_options"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/perplexity/sonar-deep-research/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.0,"completion":8.0}},{"id":"qwen/qwq-32b","canonical_slug":"qwen/qwq-32b","hugging_face_id":"Qwen/QwQ-32B","name":"Qwen: QwQ 32B","created":1741208814,"description":"QwQ is the reasoning model of the Qwen series. Compared with conventional instruction-tuned models, QwQ, which is capable of thinking and reasoning, can achieve significantly enhanced performance in downstream tasks,...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":"qwq"},"pricing":{"prompt":"0.00000015","completion":"0.00000058"},"top_provider":{"context_length":131072,"max_completion_tokens":131072,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","reasoning","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"knowledge_cutoff":"2024-06-30","expiration_date":"2026-04-29","links":{"details":"/api/v1/models/qwen/qwq-32b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.15,"completion":0.58}},{"id":"google/gemini-2.0-flash-lite-001","canonical_slug":"google/gemini-2.0-flash-lite-001","hugging_face_id":"","name":"Google: Gemini 2.0 Flash Lite","created":1740506212,"description":"Gemini 2.0 Flash Lite offers a significantly faster time to first token (TTFT) compared to [Gemini Flash 1.5](/google/gemini-flash-1.5), while maintaining quality on par with larger models like [Gemini Pro 1.5](/google/gemini-pro-1.5),...","context_length":1048576,"architecture":{"modality":"text+image+file+audio+video->text","input_modalities":["text","image","file","audio","video"],"output_modalities":["text"],"tokenizer":"Gemini","instruct_type":null},"pricing":{"prompt":"0.000000075","completion":"0.0000003","image":"0.000000075","audio":"0.000000075","web_search":"0.014","internal_reasoning":"0.0000003"},"top_provider":{"context_length":1048576,"max_completion_tokens":8192,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-08-31","expiration_date":"2026-06-01","links":{"details":"/api/v1/models/google/gemini-2.0-flash-lite-001/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.075,"completion":0.3}},{"id":"anthropic/claude-3.7-sonnet:thinking","canonical_slug":"anthropic/claude-3-7-sonnet-20250219","hugging_face_id":"","name":"Anthropic: Claude 3.7 Sonnet (thinking)","created":1740422110,"description":"Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and...","context_length":200000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"Claude","instruct_type":null},"pricing":{"prompt":"0.000003","completion":"0.000015","web_search":"0.01","input_cache_read":"0.0000003","input_cache_write":"0.00000375"},"top_provider":{"context_length":200000,"max_completion_tokens":64000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","stop","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-10-31","expiration_date":"2026-05-11","links":{"details":"/api/v1/models/anthropic/claude-3-7-sonnet-20250219/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":3.0,"completion":15.0}},{"id":"anthropic/claude-3.7-sonnet","canonical_slug":"anthropic/claude-3-7-sonnet-20250219","hugging_face_id":"","name":"Anthropic: Claude 3.7 Sonnet","created":1740422110,"description":"Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and...","context_length":200000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"Claude","instruct_type":null},"pricing":{"prompt":"0.000003","completion":"0.000015","web_search":"0.01","input_cache_read":"0.0000003","input_cache_write":"0.00000375"},"top_provider":{"context_length":200000,"max_completion_tokens":64000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","stop","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-10-31","expiration_date":"2026-05-11","links":{"details":"/api/v1/models/anthropic/claude-3-7-sonnet-20250219/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":3.0,"completion":15.0}},{"id":"mistralai/mistral-saba","canonical_slug":"mistralai/mistral-saba-2502","hugging_face_id":"","name":"Mistral: Saba","created":1739803239,"description":"Mistral Saba is a 24B-parameter language model specifically designed for the Middle East and South Asia, delivering accurate and contextually relevant responses while maintaining efficient performance. Trained on curated regional...","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.0000002","completion":"0.0000006","input_cache_read":"0.00000002"},"top_provider":{"context_length":32768,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.3},"supported_voices":null,"knowledge_cutoff":"2024-09-30","expiration_date":null,"links":{"details":"/api/v1/models/mistralai/mistral-saba-2502/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.2,"completion":0.6}},{"id":"meta-llama/llama-guard-3-8b","canonical_slug":"meta-llama/llama-guard-3-8b","hugging_face_id":"meta-llama/Llama-Guard-3-8B","name":"Llama Guard 3 8B","created":1739401318,"description":"Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification. Similar to previous versions, it can be used to classify content in both LLM inputs (prompt classification)...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"none"},"pricing":{"prompt":"0.00000048","completion":"0.00000003"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":null,"links":{"details":"/api/v1/models/meta-llama/llama-guard-3-8b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.48,"completion":0.03}},{"id":"openai/o3-mini-high","canonical_slug":"openai/o3-mini-high-2025-01-31","hugging_face_id":"","name":"OpenAI: o3 Mini High","created":1739372611,"description":"OpenAI o3-mini-high is the same model as [o3-mini](/openai/o3-mini) with reasoning_effort set to high. o3-mini is a cost-efficient language model optimized for STEM reasoning tasks, particularly excelling in science, mathematics, and...","context_length":200000,"architecture":{"modality":"text+file->text","input_modalities":["text","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.0000011","completion":"0.0000044","input_cache_read":"0.00000055"},"top_provider":{"context_length":200000,"max_completion_tokens":100000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2023-10-31","expiration_date":null,"links":{"details":"/api/v1/models/openai/o3-mini-high-2025-01-31/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.1,"completion":4.4}},{"id":"google/gemini-2.0-flash-001","canonical_slug":"google/gemini-2.0-flash-001","hugging_face_id":"","name":"Google: Gemini 2.0 Flash","created":1738769413,"description":"Gemini Flash 2.0 offers a significantly faster time to first token (TTFT) compared to [Gemini Flash 1.5](/google/gemini-flash-1.5), while maintaining quality on par with larger models like [Gemini Pro 1.5](/google/gemini-pro-1.5). It...","context_length":1048576,"architecture":{"modality":"text+image+file+audio+video->text","input_modalities":["text","image","file","audio","video"],"output_modalities":["text"],"tokenizer":"Gemini","instruct_type":null},"pricing":{"prompt":"0.0000001","completion":"0.0000004","image":"0.0000001","audio":"0.0000007","web_search":"0.014","internal_reasoning":"0.0000004","input_cache_read":"0.000000025","input_cache_write":"0.00000008333333333333334"},"top_provider":{"context_length":1048576,"max_completion_tokens":8192,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-08-31","expiration_date":"2026-06-01","links":{"details":"/api/v1/models/google/gemini-2.0-flash-001/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.1,"completion":0.4}},{"id":"qwen/qwen-vl-plus","canonical_slug":"qwen/qwen-vl-plus","hugging_face_id":"","name":"Qwen: Qwen VL Plus","created":1738731255,"description":"Qwen's Enhanced Large Visual Language Model. Significantly upgraded for detailed recognition capabilities and text recognition abilities, supporting ultra-high pixel resolutions up to millions of pixels and extreme aspect ratios for...","context_length":131072,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":null},"pricing":{"prompt":"0.0000001365","completion":"0.0000004095","input_cache_read":"0.0000000273"},"top_provider":{"context_length":131072,"max_completion_tokens":8192,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","presence_penalty","response_format","seed","temperature","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":"2026-05-13","links":{"details":"/api/v1/models/qwen/qwen-vl-plus/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.1365,"completion":0.4095}},{"id":"aion-labs/aion-1.0","canonical_slug":"aion-labs/aion-1.0","hugging_face_id":"","name":"AionLabs: Aion-1.0","created":1738697557,"description":"Aion-1.0 is a multi-model system designed for high performance across various tasks, including reasoning and coding. It is built on DeepSeek-R1, augmented with additional models and techniques such as Tree...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.000004","completion":"0.000008"},"top_provider":{"context_length":131072,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","temperature","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/aion-labs/aion-1.0/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":4.0,"completion":8.0}},{"id":"aion-labs/aion-1.0-mini","canonical_slug":"aion-labs/aion-1.0-mini","hugging_face_id":"FuseAI/FuseO1-DeepSeekR1-QwQ-SkyT1-32B-Preview","name":"AionLabs: Aion-1.0-Mini","created":1738697107,"description":"Aion-1.0-Mini 32B parameter model is a distilled version of the DeepSeek-R1 model, designed for strong performance in reasoning domains such as mathematics, coding, and logic. It is a modified variant...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000007","completion":"0.0000014"},"top_provider":{"context_length":131072,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","temperature","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/aion-labs/aion-1.0-mini/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.7,"completion":1.4}},{"id":"aion-labs/aion-rp-llama-3.1-8b","canonical_slug":"aion-labs/aion-rp-llama-3.1-8b","hugging_face_id":"","name":"AionLabs: Aion-RP 1.0 (8B)","created":1738696718,"description":"Aion-RP-Llama-3.1-8B ranks the highest in the character evaluation portion of the RPBench-Auto benchmark, a roleplaying-specific variant of Arena-Hard-Auto, where LLMs evaluate each other’s responses. It is a fine-tuned base model...","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000008","completion":"0.0000016"},"top_provider":{"context_length":32768,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","temperature","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":null,"links":{"details":"/api/v1/models/aion-labs/aion-rp-llama-3.1-8b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.8,"completion":1.6}},{"id":"qwen/qwen-vl-max","canonical_slug":"qwen/qwen-vl-max-2025-01-25","hugging_face_id":"","name":"Qwen: Qwen VL Max","created":1738434304,"description":"Qwen VL Max is a visual understanding model with 7500 tokens context length. It excels in delivering optimal performance for a broader spectrum of complex tasks.","context_length":131072,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":null},"pricing":{"prompt":"0.00000052","completion":"0.00000208"},"top_provider":{"context_length":131072,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","presence_penalty","response_format","seed","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":"2026-05-13","links":{"details":"/api/v1/models/qwen/qwen-vl-max-2025-01-25/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.52,"completion":2.08}},{"id":"qwen/qwen-turbo","canonical_slug":"qwen/qwen-turbo-2024-11-01","hugging_face_id":"","name":"Qwen: Qwen-Turbo","created":1738410974,"description":"Qwen-Turbo, based on Qwen2.5, is a 1M context model that provides fast speed and low cost, suitable for simple tasks.","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":null},"pricing":{"prompt":"0.0000000325","completion":"0.00000013","input_cache_read":"0.0000000065"},"top_provider":{"context_length":131072,"max_completion_tokens":8192,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","presence_penalty","response_format","seed","temperature","tool_choice","tools","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":"2026-05-13","links":{"details":"/api/v1/models/qwen/qwen-turbo-2024-11-01/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0325,"completion":0.13}},{"id":"qwen/qwen2.5-vl-72b-instruct","canonical_slug":"qwen/qwen2.5-vl-72b-instruct","hugging_face_id":"Qwen/Qwen2.5-VL-72B-Instruct","name":"Qwen: Qwen2.5 VL 72B Instruct","created":1738410311,"description":"Qwen2.5-VL is proficient in recognizing common objects such as flowers, birds, fish, and insects. It is also highly capable of analyzing texts, charts, icons, graphics, and layouts within images.","context_length":32000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":null},"pricing":{"prompt":"0.00000025","completion":"0.00000075"},"top_provider":{"context_length":32000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-06-30","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen2.5-vl-72b-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.25,"completion":0.75}},{"id":"qwen/qwen-plus","canonical_slug":"qwen/qwen-plus-2025-01-25","hugging_face_id":"","name":"Qwen: Qwen-Plus","created":1738409840,"description":"Qwen-Plus, based on the Qwen2.5 foundation model, is a 131K context model with a balanced performance, speed, and cost combination.","context_length":1000000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":null},"pricing":{"prompt":"0.00000026","completion":"0.00000078","input_cache_read":"0.000000052","input_cache_write":"0.000000325"},"top_provider":{"context_length":1000000,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","presence_penalty","response_format","seed","temperature","tool_choice","tools","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen-plus-2025-01-25/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.26,"completion":0.78}},{"id":"qwen/qwen-max","canonical_slug":"qwen/qwen-max-2025-01-25","hugging_face_id":"","name":"Qwen: Qwen-Max ","created":1738402289,"description":"Qwen-Max, based on Qwen2.5, provides the best inference performance among [Qwen models](/qwen), especially for complex multi-step tasks. It's a large-scale MoE model that has been pretrained on over 20 trillion...","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":null},"pricing":{"prompt":"0.00000104","completion":"0.00000416","input_cache_read":"0.000000208"},"top_provider":{"context_length":32768,"max_completion_tokens":8192,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","presence_penalty","response_format","seed","temperature","tool_choice","tools","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-03-31","expiration_date":"2026-05-13","links":{"details":"/api/v1/models/qwen/qwen-max-2025-01-25/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.04,"completion":4.16}},{"id":"openai/o3-mini","canonical_slug":"openai/o3-mini-2025-01-31","hugging_face_id":"","name":"OpenAI: o3 Mini","created":1738351721,"description":"OpenAI o3-mini is a cost-efficient language model optimized for STEM reasoning tasks, particularly excelling in science, mathematics, and coding. This model supports the `reasoning_effort` parameter, which can be set to...","context_length":200000,"architecture":{"modality":"text+file->text","input_modalities":["text","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.0000011","completion":"0.0000044","input_cache_read":"0.00000055"},"top_provider":{"context_length":200000,"max_completion_tokens":100000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2023-10-31","expiration_date":null,"links":{"details":"/api/v1/models/openai/o3-mini-2025-01-31/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.1,"completion":4.4}},{"id":"mistralai/mistral-small-24b-instruct-2501","canonical_slug":"mistralai/mistral-small-24b-instruct-2501","hugging_face_id":"mistralai/Mistral-Small-24B-Instruct-2501","name":"Mistral: Mistral Small 3","created":1738255409,"description":"Mistral Small 3 is a 24B-parameter language model optimized for low-latency performance across common AI tasks. Released under the Apache 2.0 license, it features both pre-trained and instruction-tuned versions designed...","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.00000005","completion":"0.00000008"},"top_provider":{"context_length":32768,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"],"default_parameters":{"temperature":0.3,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2023-10-31","expiration_date":null,"links":{"details":"/api/v1/models/mistralai/mistral-small-24b-instruct-2501/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.05,"completion":0.08}},{"id":"deepseek/deepseek-r1-distill-qwen-32b","canonical_slug":"deepseek/deepseek-r1-distill-qwen-32b","hugging_face_id":"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B","name":"DeepSeek: R1 Distill Qwen 32B","created":1738194830,"description":"DeepSeek R1 Distill Qwen 32B is a distilled large language model based on [Qwen 2.5 32B](https://huggingface.co/Qwen/Qwen2.5-32B), using outputs from [DeepSeek R1](/deepseek/deepseek-r1). It outperforms OpenAI's o1-mini across various benchmarks, achieving new...","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":"deepseek-r1"},"pricing":{"prompt":"0.00000029","completion":"0.00000029"},"top_provider":{"context_length":32768,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logprobs","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-07-31","expiration_date":null,"links":{"details":"/api/v1/models/deepseek/deepseek-r1-distill-qwen-32b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.29,"completion":0.29}},{"id":"deepseek/deepseek-r1-distill-qwen-14b","canonical_slug":"deepseek/deepseek-r1-distill-qwen-14b","hugging_face_id":"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B","name":"DeepSeek: R1 Distill Qwen 14B","created":1738193940,"description":"DeepSeek R1 Distill Qwen 14B is a distilled large language model based on [Qwen 2.5 14B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B), using outputs from [DeepSeek R1](/deepseek/deepseek-r1). It outperforms OpenAI's o1-mini across various benchmarks, achieving new state-of-the-art results for dense models.\n\nOther benchmark results include:\n\n- AIME 2024 pass@1: 69.7\n- MATH-500 pass@1: 93.9\n- CodeForces Rating: 1481\n\nThe model leverages fine-tuning from DeepSeek R1's outputs, enabling competitive performance comparable to larger frontier models.","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":"deepseek-r1"},"pricing":{"prompt":"0.00000015","completion":"0.00000015","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":32768,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"],"default_parameters":{},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.15,"completion":0.15}},{"id":"perplexity/sonar-reasoning","canonical_slug":"perplexity/sonar-reasoning","hugging_face_id":"","name":"Perplexity: Sonar Reasoning","created":1738131107,"description":"Sonar Reasoning is a reasoning model provided by Perplexity based on [DeepSeek R1](/deepseek/deepseek-r1).\n\nIt allows developers to utilize long chain of thought with built-in web search. Sonar Reasoning is uncensored and hosted in US datacenters. ","context_length":127000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":"deepseek-r1"},"pricing":{"prompt":"0.000001","completion":"0.000005","request":"0.005","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":127000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","temperature","top_k","top_p","web_search_options"],"default_parameters":{},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.0,"completion":5.0}},{"id":"perplexity/sonar","canonical_slug":"perplexity/sonar","hugging_face_id":"","name":"Perplexity: Sonar","created":1738013808,"description":"Sonar is lightweight, affordable, fast, and simple to use — now featuring citations and the ability to customize sources. It is designed for companies seeking to integrate lightweight question-and-answer features...","context_length":127072,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.000001","completion":"0.000001","web_search":"0.005"},"top_provider":{"context_length":127072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","temperature","top_k","top_p","web_search_options"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/perplexity/sonar/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.0,"completion":1.0}},{"id":"deepseek/deepseek-r1-distill-llama-70b","canonical_slug":"deepseek/deepseek-r1-distill-llama-70b","hugging_face_id":"deepseek-ai/DeepSeek-R1-Distill-Llama-70B","name":"DeepSeek: R1 Distill Llama 70B","created":1737663169,"description":"DeepSeek R1 Distill Llama 70B is a distilled large language model based on [Llama-3.3-70B-Instruct](/meta-llama/llama-3.3-70b-instruct), using outputs from [DeepSeek R1](/deepseek/deepseek-r1). The model combines advanced distillation techniques to achieve high performance across...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"deepseek-r1"},"pricing":{"prompt":"0.0000007","completion":"0.0000008"},"top_provider":{"context_length":131072,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-07-31","expiration_date":null,"links":{"details":"/api/v1/models/deepseek/deepseek-r1-distill-llama-70b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.7,"completion":0.8}},{"id":"deepseek/deepseek-r1","canonical_slug":"deepseek/deepseek-r1","hugging_face_id":"deepseek-ai/DeepSeek-R1","name":"DeepSeek: R1","created":1737381095,"description":"DeepSeek R1 is here: Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active in an inference pass.\r\n\r\nFully open-source model & [technical report](https://api-docs.deepseek.com/news/news250120).\r\n\r\nMIT licensed: Distill & commercialize freely!","context_length":64000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"DeepSeek","instruct_type":"deepseek-r1"},"pricing":{"prompt":"0.0000007","completion":"0.0000025"},"top_provider":{"context_length":64000,"max_completion_tokens":16000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_completion_tokens","max_tokens","presence_penalty","reasoning","repetition_penalty","seed","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-07-31","expiration_date":null,"links":{"details":"/api/v1/models/deepseek/deepseek-r1/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.7,"completion":2.5}},{"id":"minimax/minimax-01","canonical_slug":"minimax/minimax-01","hugging_face_id":"MiniMaxAI/MiniMax-Text-01","name":"MiniMax: MiniMax-01","created":1736915462,"description":"MiniMax-01 is a combines MiniMax-Text-01 for text generation and MiniMax-VL-01 for image understanding. It has 456 billion parameters, with 45.9 billion parameters activated per inference, and can handle a context...","context_length":1000192,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000002","completion":"0.0000011"},"top_provider":{"context_length":1000192,"max_completion_tokens":1000192,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","temperature","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-03-31","expiration_date":null,"links":{"details":"/api/v1/models/minimax/minimax-01/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.2,"completion":1.1}},{"id":"mistralai/codestral-2501","canonical_slug":"mistralai/codestral-2501","hugging_face_id":"","name":"Mistral: Codestral 2501","created":1736895522,"description":"[Mistral](/mistralai)'s cutting-edge language model for coding. Codestral specializes in low-latency, high-frequency tasks such as fill-in-the-middle (FIM), code correction and test generation. \n\nLearn more on their blog post: https://mistral.ai/news/codestral-2501/","context_length":256000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.0000003","completion":"0.0000009","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":256000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.3},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.3,"completion":0.9}},{"id":"microsoft/phi-4","canonical_slug":"microsoft/phi-4","hugging_face_id":"microsoft/phi-4","name":"Microsoft: Phi 4","created":1736489872,"description":"[Microsoft Research](/microsoft) Phi-4 is designed to perform well in complex reasoning tasks and can operate efficiently in situations with limited memory or where quick responses are needed. At 14 billion...","context_length":16384,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.000000065","completion":"0.00000014"},"top_provider":{"context_length":16384,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-06-30","expiration_date":null,"links":{"details":"/api/v1/models/microsoft/phi-4/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.065,"completion":0.14}},{"id":"sao10k/l3.1-70b-hanami-x1","canonical_slug":"sao10k/l3.1-70b-hanami-x1","hugging_face_id":"Sao10K/L3.1-70B-Hanami-x1","name":"Sao10K: Llama 3.1 70B Hanami x1","created":1736302854,"description":"This is [Sao10K](/sao10k)'s experiment over [Euryale v2.2](/sao10k/l3.1-euryale-70b).","context_length":16000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":null},"pricing":{"prompt":"0.000003","completion":"0.000003"},"top_provider":{"context_length":16000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":null,"links":{"details":"/api/v1/models/sao10k/l3.1-70b-hanami-x1/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":3.0,"completion":3.0}},{"id":"deepseek/deepseek-chat","canonical_slug":"deepseek/deepseek-chat-v3","hugging_face_id":"deepseek-ai/DeepSeek-V3","name":"DeepSeek: DeepSeek V3","created":1735241320,"description":"DeepSeek-V3 is the latest model from the DeepSeek team, building upon the instruction following and coding abilities of the previous versions. Pre-trained on nearly 15 trillion tokens, the reported evaluations reveal that the model outperforms other open-source models and rivals leading closed-source models.\r\n\r\nFor model details, please visit [the DeepSeek-V3 repo](https://github.com/deepseek-ai/DeepSeek-V3) for more information, or see the [launch announcement](https://api-docs.deepseek.com/news/news1226).","context_length":163840,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"DeepSeek","instruct_type":null},"pricing":{"prompt":"0.00000032","completion":"0.00000089"},"top_provider":{"context_length":163840,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-07-31","expiration_date":null,"links":{"details":"/api/v1/models/deepseek/deepseek-chat-v3/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.3,"completion":1.2}},{"id":"sao10k/l3.3-euryale-70b","canonical_slug":"sao10k/l3.3-euryale-70b-v2.3","hugging_face_id":"Sao10K/L3.3-70B-Euryale-v2.3","name":"Sao10K: Llama 3.3 Euryale 70B","created":1734535928,"description":"Euryale L3.3 70B is a model focused on creative roleplay from [Sao10k](https://ko-fi.com/sao10k). It is the successor of [Euryale L3 70B v2.2](/models/sao10k/l3-euryale-70b).","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"llama3"},"pricing":{"prompt":"0.00000065","completion":"0.00000075"},"top_provider":{"context_length":131072,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logprobs","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":null,"links":{"details":"/api/v1/models/sao10k/l3.3-euryale-70b-v2.3/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.65,"completion":0.75}},{"id":"openai/o1","canonical_slug":"openai/o1-2024-12-17","hugging_face_id":"","name":"OpenAI: o1","created":1734459999,"description":"The latest and strongest model family from OpenAI, o1 is designed to spend more time thinking before responding. The o1 model series is trained with large-scale reinforcement learning to reason...","context_length":200000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.000015","completion":"0.00006","input_cache_read":"0.0000075"},"top_provider":{"context_length":200000,"max_completion_tokens":100000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2023-10-31","expiration_date":null,"links":{"details":"/api/v1/models/openai/o1-2024-12-17/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":15.0,"completion":60.0}},{"id":"cohere/command-r7b-12-2024","canonical_slug":"cohere/command-r7b-12-2024","hugging_face_id":"","name":"Cohere: Command R7B (12-2024)","created":1734158152,"description":"Command R7B (12-2024) is a small, fast update of the Command R+ model, delivered in December 2024. It excels at RAG, tool use, agents, and similar tasks requiring complex reasoning...","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Cohere","instruct_type":null},"pricing":{"prompt":"0.0000000375","completion":"0.00000015"},"top_provider":{"context_length":128000,"max_completion_tokens":4000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-08-31","expiration_date":null,"links":{"details":"/api/v1/models/cohere/command-r7b-12-2024/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0375,"completion":0.15}},{"id":"google/gemini-2.0-flash-exp:free","canonical_slug":"google/gemini-2.0-flash-exp","hugging_face_id":"","name":"Google: Gemini 2.0 Flash Experimental (free)","created":1733937523,"description":"Gemini Flash 2.0 offers a significantly faster time to first token (TTFT) compared to [Gemini Flash 1.5](/google/gemini-flash-1.5), while maintaining quality on par with larger models like [Gemini Pro 1.5](/google/gemini-pro-1.5). It introduces notable enhancements in multimodal understanding, coding capabilities, complex instruction following, and function calling. These advancements come together to deliver more seamless and robust agentic experiences.","context_length":1048576,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Gemini","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":1048576,"max_completion_tokens":8192,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","response_format","seed","stop","temperature","tool_choice","tools","top_p"],"default_parameters":{},"expiration_date":"2026-03-03","object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"meta-llama/llama-3.3-70b-instruct:free","canonical_slug":"meta-llama/llama-3.3-70b-instruct","hugging_face_id":"meta-llama/Llama-3.3-70B-Instruct","name":"Meta: Llama 3.3 70B Instruct (free)","created":1733506137,"description":"The Meta Llama 3.3 multilingual large language model (LLM) is a pretrained and instruction tuned generative model in 70B (text in/text out). The Llama 3.3 instruction tuned text only model...","context_length":65536,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"llama3"},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":65536,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":null,"links":{"details":"/api/v1/models/meta-llama/llama-3.3-70b-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"meta-llama/llama-3.3-70b-instruct","canonical_slug":"meta-llama/llama-3.3-70b-instruct","hugging_face_id":"meta-llama/Llama-3.3-70B-Instruct","name":"Meta: Llama 3.3 70B Instruct","created":1733506137,"description":"The Meta Llama 3.3 multilingual large language model (LLM) is a pretrained and instruction tuned generative model in 70B (text in/text out). The Llama 3.3 instruction tuned text only model...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"llama3"},"pricing":{"prompt":"0.0000001","completion":"0.00000032"},"top_provider":{"context_length":131072,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":null,"links":{"details":"/api/v1/models/meta-llama/llama-3.3-70b-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.1,"completion":0.32}},{"id":"amazon/nova-lite-v1","canonical_slug":"amazon/nova-lite-v1","hugging_face_id":"","name":"Amazon: Nova Lite 1.0","created":1733437363,"description":"Amazon Nova Lite 1.0 is a very low-cost multimodal model from Amazon that focused on fast processing of image, video, and text inputs to generate text output. Amazon Nova Lite...","context_length":300000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Nova","instruct_type":null},"pricing":{"prompt":"0.00000006","completion":"0.00000024"},"top_provider":{"context_length":300000,"max_completion_tokens":5120,"is_moderated":true},"per_request_limits":null,"supported_parameters":["max_tokens","stop","temperature","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-10-31","expiration_date":null,"links":{"details":"/api/v1/models/amazon/nova-lite-v1/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.06,"completion":0.24}},{"id":"amazon/nova-micro-v1","canonical_slug":"amazon/nova-micro-v1","hugging_face_id":"","name":"Amazon: Nova Micro 1.0","created":1733437237,"description":"Amazon Nova Micro 1.0 is a text-only model that delivers the lowest latency responses in the Amazon Nova family of models at a very low cost. With a context length...","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Nova","instruct_type":null},"pricing":{"prompt":"0.000000035","completion":"0.00000014"},"top_provider":{"context_length":128000,"max_completion_tokens":5120,"is_moderated":true},"per_request_limits":null,"supported_parameters":["max_tokens","stop","temperature","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-10-31","expiration_date":null,"links":{"details":"/api/v1/models/amazon/nova-micro-v1/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.035,"completion":0.14}},{"id":"amazon/nova-pro-v1","canonical_slug":"amazon/nova-pro-v1","hugging_face_id":"","name":"Amazon: Nova Pro 1.0","created":1733436303,"description":"Amazon Nova Pro 1.0 is a capable multimodal model from Amazon focused on providing a combination of accuracy, speed, and cost for a wide range of tasks. As of December...","context_length":300000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Nova","instruct_type":null},"pricing":{"prompt":"0.0000008","completion":"0.0000032"},"top_provider":{"context_length":300000,"max_completion_tokens":5120,"is_moderated":true},"per_request_limits":null,"supported_parameters":["max_tokens","stop","temperature","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-10-31","expiration_date":null,"links":{"details":"/api/v1/models/amazon/nova-pro-v1/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.8,"completion":3.2}},{"id":"openai/gpt-4o-2024-11-20","canonical_slug":"openai/gpt-4o-2024-11-20","hugging_face_id":"","name":"OpenAI: GPT-4o (2024-11-20)","created":1732127594,"description":"The 2024-11-20 version of GPT-4o offers a leveled-up creative writing ability with more natural, engaging, and tailored writing to improve relevance & readability. It’s also better at working with uploaded...","context_length":128000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.0000025","completion":"0.00001","input_cache_read":"0.00000125"},"top_provider":{"context_length":128000,"max_completion_tokens":16384,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p","web_search_options"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-10-31","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-4o-2024-11-20/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.5,"completion":10.0}},{"id":"mistralai/mistral-large-2411","canonical_slug":"mistralai/mistral-large-2411","hugging_face_id":"","name":"Mistral Large 2411","created":1731978685,"description":"Mistral Large 2 2411 is an update of [Mistral Large 2](/mistralai/mistral-large) released together with [Pixtral Large 2411](/mistralai/pixtral-large-2411) It provides a significant upgrade on the previous [Mistral Large 24.07](/mistralai/mistral-large-2407), with notable...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.000002","completion":"0.000006","input_cache_read":"0.0000002"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.3},"supported_voices":null,"knowledge_cutoff":"2024-07-31","expiration_date":null,"links":{"details":"/api/v1/models/mistralai/mistral-large-2411/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.0,"completion":6.0}},{"id":"mistralai/mistral-large-2407","canonical_slug":"mistralai/mistral-large-2407","hugging_face_id":"","name":"Mistral Large 2407","created":1731978415,"description":"This is Mistral AI's flagship model, Mistral Large 2 (version mistral-large-2407). It's a proprietary weights-available model and excels at reasoning, code, JSON, chat, and more. Read the launch announcement [here](https://mistral.ai/news/mistral-large-2407/)....","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.000002","completion":"0.000006","input_cache_read":"0.0000002"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.3},"supported_voices":null,"knowledge_cutoff":"2024-03-31","expiration_date":null,"links":{"details":"/api/v1/models/mistralai/mistral-large-2407/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.0,"completion":6.0}},{"id":"mistralai/pixtral-large-2411","canonical_slug":"mistralai/pixtral-large-2411","hugging_face_id":"","name":"Mistral: Pixtral Large 2411","created":1731977388,"description":"Pixtral Large is a 124B parameter, open-weight, multimodal model built on top of [Mistral Large 2](/mistralai/mistral-large-2411). The model is able to understand documents, charts and natural images. The model is...","context_length":131072,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.000002","completion":"0.000006","input_cache_read":"0.0000002"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.3},"supported_voices":null,"knowledge_cutoff":"2024-07-31","expiration_date":null,"links":{"details":"/api/v1/models/mistralai/pixtral-large-2411/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.0,"completion":6.0}},{"id":"qwen/qwen-2.5-coder-32b-instruct","canonical_slug":"qwen/qwen-2.5-coder-32b-instruct","hugging_face_id":"Qwen/Qwen2.5-Coder-32B-Instruct","name":"Qwen2.5 Coder 32B Instruct","created":1731368400,"description":"Qwen2.5-Coder is the latest series of Code-Specific Qwen large language models (formerly known as CodeQwen). Qwen2.5-Coder brings the following improvements upon CodeQwen1.5: - Significantly improvements in **code generation**, **code reasoning**...","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":"chatml"},"pricing":{"prompt":"0.00000066","completion":"0.000001"},"top_provider":{"context_length":32768,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-06-30","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen-2.5-coder-32b-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.66,"completion":1.0}},{"id":"raifle/sorcererlm-8x22b","canonical_slug":"raifle/sorcererlm-8x22b","hugging_face_id":"rAIfle/SorcererLM-8x22b-bf16","name":"SorcererLM 8x22B","created":1731105083,"description":"SorcererLM is an advanced RP and storytelling model, built as a Low-rank 16-bit LoRA fine-tuned on [WizardLM-2 8x22B](/microsoft/wizardlm-2-8x22b).\n\n- Advanced reasoning and emotional intelligence for engaging and immersive interactions\n- Vivid writing capabilities enriched with spatial and contextual awareness\n- Enhanced narrative depth, promoting creative and dynamic storytelling","context_length":16000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":"vicuna"},"pricing":{"prompt":"0.0000045","completion":"0.0000045"},"top_provider":{"context_length":16000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":4.5,"completion":4.5}},{"id":"thedrummer/unslopnemo-12b","canonical_slug":"thedrummer/unslopnemo-12b","hugging_face_id":"TheDrummer/UnslopNemo-12B-v4.1","name":"TheDrummer: UnslopNemo 12B","created":1731103448,"description":"UnslopNemo v4.1 is the latest addition from the creator of Rocinante, designed for adventure writing and role-play scenarios.","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":"mistral"},"pricing":{"prompt":"0.0000004","completion":"0.0000004"},"top_provider":{"context_length":32768,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logprobs","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-04-30","expiration_date":null,"links":{"details":"/api/v1/models/thedrummer/unslopnemo-12b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.4,"completion":0.4}},{"id":"anthropic/claude-3.5-haiku-20241022","canonical_slug":"anthropic/claude-3-5-haiku-20241022","hugging_face_id":null,"name":"Anthropic: Claude 3.5 Haiku (2024-10-22)","created":1730678400,"description":"Claude 3.5 Haiku features enhancements across all skill sets including coding, tool use, and reasoning. As the fastest model in the Anthropic lineup, it offers rapid response times suitable for applications that require high interactivity and low latency, such as user-facing chatbots and on-the-fly code completions. It also excels in specialized tasks like data extraction and real-time content moderation, making it a versatile tool for a broad range of industries.\n\nIt does not support image inputs.\n\nSee the launch announcement and benchmark results [here](https://www.anthropic.com/news/3-5-models-and-computer-use)","context_length":200000,"architecture":{"modality":"text+image->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"Claude","instruct_type":null},"pricing":{"prompt":"0.0000008","completion":"0.000004","request":"0","image":"0","web_search":"0","internal_reasoning":"0","input_cache_read":"0.00000008","input_cache_write":"0.000001"},"top_provider":{"context_length":200000,"max_completion_tokens":8192,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.8,"completion":4.0}},{"id":"anthropic/claude-3.5-haiku","canonical_slug":"anthropic/claude-3-5-haiku","hugging_face_id":null,"name":"Anthropic: Claude 3.5 Haiku","created":1730678400,"description":"Claude 3.5 Haiku features offers enhanced capabilities in speed, coding accuracy, and tool use. Engineered to excel in real-time applications, it delivers quick response times that are essential for dynamic...","context_length":200000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Claude","instruct_type":null},"pricing":{"prompt":"0.0000008","completion":"0.000004","web_search":"0.01","input_cache_read":"0.00000008","input_cache_write":"0.000001"},"top_provider":{"context_length":200000,"max_completion_tokens":8192,"is_moderated":true},"per_request_limits":null,"supported_parameters":["max_tokens","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-07-31","expiration_date":null,"links":{"details":"/api/v1/models/anthropic/claude-3-5-haiku/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.8,"completion":4.0}},{"id":"anthropic/claude-3.5-sonnet","canonical_slug":"anthropic/claude-3.5-sonnet","hugging_face_id":null,"name":"Anthropic: Claude 3.5 Sonnet","created":1729555200,"description":"New Claude 3.5 Sonnet delivers better-than-Opus capabilities, faster-than-Sonnet speeds, at the same Sonnet prices. Sonnet is particularly good at:\n\n- Coding: Scores ~49% on SWE-Bench Verified, higher than the last best score, and without any fancy prompt scaffolding\n- Data science: Augments human data science expertise; navigates unstructured data while using multiple tools for insights\n- Visual processing: excelling at interpreting charts, graphs, and images, accurately transcribing text to derive insights beyond just the text alone\n- Agentic tasks: exceptional tool use, making it great at agentic tasks (i.e. complex, multi-step problem solving tasks that require engaging with other systems)\n\n#multimodal","context_length":200000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"Claude","instruct_type":null},"pricing":{"prompt":"0.000006","completion":"0.00003","input_cache_read":"0.0000006","input_cache_write":"0.0000075"},"top_provider":{"context_length":200000,"max_completion_tokens":8192,"is_moderated":true},"per_request_limits":null,"supported_parameters":["max_tokens","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"knowledge_cutoff":"2024-04-30","expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":6.0,"completion":30.0}},{"id":"anthracite-org/magnum-v4-72b","canonical_slug":"anthracite-org/magnum-v4-72b","hugging_face_id":"anthracite-org/magnum-v4-72b","name":"Magnum v4 72B","created":1729555200,"description":"This is a series of models designed to replicate the prose quality of the Claude 3 models, specifically Sonnet(https://openrouter.ai/anthropic/claude-3.5-sonnet) and Opus(https://openrouter.ai/anthropic/claude-3-opus).\n\nThe model is fine-tuned on top of [Qwen2.5 72B](https://openrouter.ai/qwen/qwen-2.5-72b-instruct).","context_length":16384,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":"chatml"},"pricing":{"prompt":"0.000003","completion":"0.000005"},"top_provider":{"context_length":16384,"max_completion_tokens":2048,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","temperature","top_a","top_k","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-06-30","expiration_date":null,"links":{"details":"/api/v1/models/anthracite-org/magnum-v4-72b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":3.0,"completion":5.0}},{"id":"mistralai/ministral-8b","canonical_slug":"mistralai/ministral-8b","hugging_face_id":null,"name":"Mistral: Ministral 8B","created":1729123200,"description":"Ministral 8B is an 8B parameter model featuring a unique interleaved sliding-window attention pattern for faster, memory-efficient inference. Designed for edge use cases, it supports up to 128k context length and excels in knowledge and reasoning tasks. It outperforms peers in the sub-10B category, making it perfect for low-latency, privacy-first applications.","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.0000001","completion":"0.0000001"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.3},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.1,"completion":0.1}},{"id":"mistralai/ministral-3b","canonical_slug":"mistralai/ministral-3b","hugging_face_id":null,"name":"Mistral: Ministral 3B","created":1729123200,"description":"Ministral 3B is a 3B parameter model optimized for on-device and edge computing. It excels in knowledge, commonsense reasoning, and function-calling, outperforming larger models like Mistral 7B on most benchmarks. Supporting up to 128k context length, it’s ideal for orchestrating agentic workflows and specialist tasks with efficient inference.","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.00000004","completion":"0.00000004"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.3},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.04,"completion":0.04}},{"id":"qwen/qwen-2.5-7b-instruct","canonical_slug":"qwen/qwen-2.5-7b-instruct","hugging_face_id":"Qwen/Qwen2.5-7B-Instruct","name":"Qwen: Qwen2.5 7B Instruct","created":1729036800,"description":"Qwen2.5 7B is the latest series of Qwen large language models. Qwen2.5 brings the following improvements upon Qwen2: - Significantly more knowledge and has greatly improved capabilities in coding and...","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":"chatml"},"pricing":{"prompt":"0.00000004","completion":"0.0000001"},"top_provider":{"context_length":32768,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-06-30","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen-2.5-7b-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.04,"completion":0.1}},{"id":"nvidia/llama-3.1-nemotron-70b-instruct","canonical_slug":"nvidia/llama-3.1-nemotron-70b-instruct","hugging_face_id":"nvidia/Llama-3.1-Nemotron-70B-Instruct-HF","name":"NVIDIA: Llama 3.1 Nemotron 70B Instruct","created":1728950400,"description":"NVIDIA's Llama 3.1 Nemotron 70B is a language model designed for generating precise and useful responses. Leveraging [Llama 3.1 70B](/models/meta-llama/llama-3.1-70b-instruct) architecture and Reinforcement Learning from Human Feedback (RLHF), it excels...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"llama3"},"pricing":{"prompt":"0.0000012","completion":"0.0000012"},"top_provider":{"context_length":131072,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":"2026-05-07","links":{"details":"/api/v1/models/nvidia/llama-3.1-nemotron-70b-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.2,"completion":1.2}},{"id":"inflection/inflection-3-productivity","canonical_slug":"inflection/inflection-3-productivity","hugging_face_id":null,"name":"Inflection: Inflection 3 Productivity","created":1728604800,"description":"Inflection 3 Productivity is optimized for following instructions. It is better for tasks requiring JSON output or precise adherence to provided guidelines. It has access to recent news. For emotional...","context_length":8000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000025","completion":"0.00001"},"top_provider":{"context_length":8000,"max_completion_tokens":1024,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","stop","temperature","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-10-31","expiration_date":null,"links":{"details":"/api/v1/models/inflection/inflection-3-productivity/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.5,"completion":10.0}},{"id":"inflection/inflection-3-pi","canonical_slug":"inflection/inflection-3-pi","hugging_face_id":null,"name":"Inflection: Inflection 3 Pi","created":1728604800,"description":"Inflection 3 Pi powers Inflection's [Pi](https://pi.ai) chatbot, including backstory, emotional intelligence, productivity, and safety. It has access to recent news, and excels in scenarios like customer support and roleplay. Pi...","context_length":8000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000025","completion":"0.00001"},"top_provider":{"context_length":8000,"max_completion_tokens":1024,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","stop","temperature","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-10-31","expiration_date":null,"links":{"details":"/api/v1/models/inflection/inflection-3-pi/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.5,"completion":10.0}},{"id":"thedrummer/rocinante-12b","canonical_slug":"thedrummer/rocinante-12b","hugging_face_id":"TheDrummer/Rocinante-12B-v1.1","name":"TheDrummer: Rocinante 12B","created":1727654400,"description":"Rocinante 12B is designed for engaging storytelling and rich prose. Early testers have reported: - Expanded vocabulary with unique and expressive word choices - Enhanced creativity for vivid narratives -...","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":"chatml"},"pricing":{"prompt":"0.00000017","completion":"0.00000043"},"top_provider":{"context_length":32768,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-04-30","expiration_date":null,"links":{"details":"/api/v1/models/thedrummer/rocinante-12b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.17,"completion":0.43}},{"id":"meta-llama/llama-3.2-1b-instruct","canonical_slug":"meta-llama/llama-3.2-1b-instruct","hugging_face_id":"meta-llama/Llama-3.2-1B-Instruct","name":"Meta: Llama 3.2 1B Instruct","created":1727222400,"description":"Llama 3.2 1B is a 1-billion-parameter language model focused on efficiently performing natural language tasks, such as summarization, dialogue, and multilingual text analysis. Its smaller size allows it to operate...","context_length":60000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"llama3"},"pricing":{"prompt":"0.000000027","completion":"0.0000002"},"top_provider":{"context_length":60000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":null,"links":{"details":"/api/v1/models/meta-llama/llama-3.2-1b-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.027,"completion":0.2}},{"id":"meta-llama/llama-3.2-90b-vision-instruct","canonical_slug":"meta-llama/llama-3.2-90b-vision-instruct","hugging_face_id":"meta-llama/Llama-3.2-90B-Vision-Instruct","name":"Meta: Llama 3.2 90B Vision Instruct","created":1727222400,"description":"The Llama 90B Vision model is a top-tier, 90-billion-parameter multimodal model designed for the most challenging visual reasoning and language tasks. It offers unparalleled accuracy in image captioning, visual question answering, and advanced image-text comprehension. Pre-trained on vast multimodal datasets and fine-tuned with human feedback, the Llama 90B Vision is engineered to handle the most demanding image-based AI tasks.\n\nThis model is perfect for industries requiring cutting-edge multimodal AI capabilities, particularly those dealing with complex, real-time visual and textual analysis.\n\nClick here for the [original model card](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD_VISION.md).\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://www.llama.com/llama3/use-policy/).","context_length":32768,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"llama3"},"pricing":{"prompt":"0.00000035","completion":"0.0000004","request":"0","image":"0.0005058","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":32768,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.35,"completion":0.4}},{"id":"meta-llama/llama-3.2-3b-instruct:free","canonical_slug":"meta-llama/llama-3.2-3b-instruct","hugging_face_id":"meta-llama/Llama-3.2-3B-Instruct","name":"Meta: Llama 3.2 3B Instruct (free)","created":1727222400,"description":"Llama 3.2 3B is a 3-billion-parameter multilingual large language model, optimized for advanced natural language processing tasks like dialogue generation, reasoning, and summarization. Designed with the latest transformer architecture, it...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"llama3"},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","stop","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":null,"links":{"details":"/api/v1/models/meta-llama/llama-3.2-3b-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"meta-llama/llama-3.2-3b-instruct","canonical_slug":"meta-llama/llama-3.2-3b-instruct","hugging_face_id":"meta-llama/Llama-3.2-3B-Instruct","name":"Meta: Llama 3.2 3B Instruct","created":1727222400,"description":"Llama 3.2 3B is a 3-billion-parameter multilingual large language model, optimized for advanced natural language processing tasks like dialogue generation, reasoning, and summarization. Designed with the latest transformer architecture, it...","context_length":80000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"llama3"},"pricing":{"prompt":"0.000000051","completion":"0.00000034"},"top_provider":{"context_length":80000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":null,"links":{"details":"/api/v1/models/meta-llama/llama-3.2-3b-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.051,"completion":0.34}},{"id":"meta-llama/llama-3.2-11b-vision-instruct","canonical_slug":"meta-llama/llama-3.2-11b-vision-instruct","hugging_face_id":"meta-llama/Llama-3.2-11B-Vision-Instruct","name":"Meta: Llama 3.2 11B Vision Instruct","created":1727222400,"description":"Llama 3.2 11B Vision is a multimodal model with 11 billion parameters, designed to handle tasks combining visual and textual data. It excels in tasks such as image captioning and...","context_length":131072,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"llama3"},"pricing":{"prompt":"0.000000245","completion":"0.000000245"},"top_provider":{"context_length":131072,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":null,"links":{"details":"/api/v1/models/meta-llama/llama-3.2-11b-vision-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.245,"completion":0.245}},{"id":"qwen/qwen-2.5-72b-instruct","canonical_slug":"qwen/qwen-2.5-72b-instruct","hugging_face_id":"Qwen/Qwen2.5-72B-Instruct","name":"Qwen2.5 72B Instruct","created":1726704000,"description":"Qwen2.5 72B is the latest series of Qwen large language models. Qwen2.5 brings the following improvements upon Qwen2: - Significantly more knowledge and has greatly improved capabilities in coding and...","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":"chatml"},"pricing":{"prompt":"0.00000036","completion":"0.0000004"},"top_provider":{"context_length":32768,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-06-30","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen-2.5-72b-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.36,"completion":0.4}},{"id":"neversleep/llama-3.1-lumimaid-8b","canonical_slug":"neversleep/llama-3.1-lumimaid-8b","hugging_face_id":"NeverSleep/Lumimaid-v0.2-8B","name":"NeverSleep: Lumimaid v0.2 8B","created":1726358400,"description":"Lumimaid v0.2 8B is a finetune of [Llama 3.1 8B](/models/meta-llama/llama-3.1-8b-instruct) with a \"HUGE step up dataset wise\" compared to Lumimaid v0.1. Sloppy chats output were purged.\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"llama3"},"pricing":{"prompt":"0.00000009","completion":"0.0000006"},"top_provider":{"context_length":32768,"max_completion_tokens":4096,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logprobs","max_tokens","presence_penalty","response_format","stop","structured_outputs","temperature","top_logprobs","top_p"],"default_parameters":{},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.09,"completion":0.6}},{"id":"mistralai/pixtral-12b","canonical_slug":"mistralai/pixtral-12b","hugging_face_id":"mistralai/Pixtral-12B-2409","name":"Mistral: Pixtral 12B","created":1725926400,"description":"The first multi-modal, text+image-to-text model from Mistral AI. Its weights were launched via torrent: https://x.com/mistralai/status/1833758285167722836.","context_length":32768,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.00000010000000000000001","completion":"0.00000010000000000000001"},"top_provider":{"context_length":32768,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{"temperature":0.3},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.1,"completion":0.1}},{"id":"cohere/command-r-plus-08-2024","canonical_slug":"cohere/command-r-plus-08-2024","hugging_face_id":null,"name":"Cohere: Command R+ (08-2024)","created":1724976000,"description":"command-r-plus-08-2024 is an update of the [Command R+](/models/cohere/command-r-plus) with roughly 50% higher throughput and 25% lower latencies as compared to the previous Command R+ version, while keeping the hardware footprint...","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Cohere","instruct_type":null},"pricing":{"prompt":"0.0000025","completion":"0.00001"},"top_provider":{"context_length":128000,"max_completion_tokens":4000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-03-31","expiration_date":null,"links":{"details":"/api/v1/models/cohere/command-r-plus-08-2024/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.5,"completion":10.0}},{"id":"cohere/command-r-08-2024","canonical_slug":"cohere/command-r-08-2024","hugging_face_id":null,"name":"Cohere: Command R (08-2024)","created":1724976000,"description":"command-r-08-2024 is an update of the [Command R](/models/cohere/command-r) with improved performance for multilingual retrieval-augmented generation (RAG) and tool use. More broadly, it is better at math, code and reasoning and...","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Cohere","instruct_type":null},"pricing":{"prompt":"0.00000015","completion":"0.0000006"},"top_provider":{"context_length":128000,"max_completion_tokens":4000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-03-31","expiration_date":null,"links":{"details":"/api/v1/models/cohere/command-r-08-2024/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.15,"completion":0.6}},{"id":"qwen/qwen-2.5-vl-7b-instruct","canonical_slug":"qwen/qwen-2-vl-7b-instruct","hugging_face_id":"Qwen/Qwen2.5-VL-7B-Instruct","name":"Qwen: Qwen2.5-VL 7B Instruct","created":1724803200,"description":"Qwen2.5 VL 7B is a multimodal LLM from the Qwen Team with the following key enhancements:\n\n- SoTA understanding of images of various resolution & ratio: Qwen2.5-VL achieves state-of-the-art performance on visual understanding benchmarks, including MathVista, DocVQA, RealWorldQA, MTVQA, etc.\n\n- Understanding videos of 20min+: Qwen2.5-VL can understand videos over 20 minutes for high-quality video-based question answering, dialog, content creation, etc.\n\n- Agent that can operate your mobiles, robots, etc.: with the abilities of complex reasoning and decision making, Qwen2.5-VL can be integrated with devices like mobile phones, robots, etc., for automatic operation based on visual environment and text instructions.\n\n- Multilingual Support: to serve global users, besides English and Chinese, Qwen2.5-VL now supports the understanding of texts in different languages inside images, including most European languages, Japanese, Korean, Arabic, Vietnamese, etc.\n\nFor more details, see this [blog post](https://qwenlm.github.io/blog/qwen2-vl/) and [GitHub repo](https://github.com/QwenLM/Qwen2-VL).\n\nUsage of this model is subject to [Tongyi Qianwen LICENSE AGREEMENT](https://huggingface.co/Qwen/Qwen1.5-110B-Chat/blob/main/LICENSE).","context_length":32768,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":null},"pricing":{"prompt":"0.00000020000000000000002","completion":"0.00000020000000000000002"},"top_provider":{"context_length":32768,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.2,"completion":0.2}},{"id":"sao10k/l3.1-euryale-70b","canonical_slug":"sao10k/l3.1-euryale-70b","hugging_face_id":"Sao10K/L3.1-70B-Euryale-v2.2","name":"Sao10K: Llama 3.1 Euryale 70B v2.2","created":1724803200,"description":"Euryale L3.1 70B v2.2 is a model focused on creative roleplay from [Sao10k](https://ko-fi.com/sao10k). It is the successor of [Euryale L3 70B v2.1](/models/sao10k/l3-euryale-70b).","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"llama3"},"pricing":{"prompt":"0.00000085","completion":"0.00000085"},"top_provider":{"context_length":131072,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":null,"links":{"details":"/api/v1/models/sao10k/l3.1-euryale-70b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.85,"completion":0.85}},{"id":"microsoft/phi-3.5-mini-128k-instruct","canonical_slug":"microsoft/phi-3.5-mini-128k-instruct","hugging_face_id":"microsoft/Phi-3.5-mini-instruct","name":"Microsoft: Phi-3.5 Mini 128K Instruct","created":1724198400,"description":"Phi-3.5 models are lightweight, state-of-the-art open models. These models were trained with Phi-3 datasets that include both synthetic data and the filtered, publicly available websites data, with a focus on high quality and reasoning-dense properties. Phi-3.5 Mini uses 3.8B parameters, and is a dense decoder-only transformer model using the same tokenizer as [Phi-3 Mini](/models/microsoft/phi-3-mini-128k-instruct).\n\nThe models underwent a rigorous enhancement process, incorporating both supervised fine-tuning, proximal policy optimization, and direct preference optimization to ensure precise instruction adherence and robust safety measures. When assessed against benchmarks that test common sense, language understanding, math, code, long context and logical reasoning, Phi-3.5 models showcased robust and state-of-the-art performance among models with less than 13 billion parameters.","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":"phi3"},"pricing":{"prompt":"0.0000001","completion":"0.0000001","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":128000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","temperature","tool_choice","tools","top_p"],"default_parameters":{},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.1,"completion":0.1}},{"id":"nousresearch/hermes-3-llama-3.1-70b","canonical_slug":"nousresearch/hermes-3-llama-3.1-70b","hugging_face_id":"NousResearch/Hermes-3-Llama-3.1-70B","name":"Nous: Hermes 3 70B Instruct","created":1723939200,"description":"Hermes 3 is a generalist language model with many improvements over [Hermes 2](/models/nousresearch/nous-hermes-2-mistral-7b-dpo), including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"chatml"},"pricing":{"prompt":"0.0000003","completion":"0.0000003"},"top_provider":{"context_length":131072,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":null,"links":{"details":"/api/v1/models/nousresearch/hermes-3-llama-3.1-70b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.3,"completion":0.3}},{"id":"nousresearch/hermes-3-llama-3.1-405b:free","canonical_slug":"nousresearch/hermes-3-llama-3.1-405b","hugging_face_id":"NousResearch/Hermes-3-Llama-3.1-405B","name":"Nous: Hermes 3 405B Instruct (free)","created":1723766400,"description":"Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"chatml"},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","stop","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":null,"links":{"details":"/api/v1/models/nousresearch/hermes-3-llama-3.1-405b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"nousresearch/hermes-3-llama-3.1-405b","canonical_slug":"nousresearch/hermes-3-llama-3.1-405b","hugging_face_id":"NousResearch/Hermes-3-Llama-3.1-405B","name":"Nous: Hermes 3 405B Instruct","created":1723766400,"description":"Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"chatml"},"pricing":{"prompt":"0.000001","completion":"0.000001"},"top_provider":{"context_length":131072,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":null,"links":{"details":"/api/v1/models/nousresearch/hermes-3-llama-3.1-405b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.0,"completion":1.0}},{"id":"openai/chatgpt-4o-latest","canonical_slug":"openai/chatgpt-4o-latest","hugging_face_id":null,"name":"OpenAI: ChatGPT-4o","created":1723593600,"description":"OpenAI ChatGPT 4o is continually updated by OpenAI to point to the current version of GPT-4o used by ChatGPT. It therefore differs slightly from the API version of [GPT-4o](/models/openai/gpt-4o) in that it has additional RLHF. It is intended for research and evaluation.\r\n\r\nOpenAI notes that this model is not suited for production use-cases as it may be removed or redirected to another model in the future.","context_length":128000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.000005","completion":"0.000015"},"top_provider":{"context_length":128000,"max_completion_tokens":16384,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","top_logprobs","top_p"],"default_parameters":{},"expiration_date":"2026-02-17","object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":5.0,"completion":15.0}},{"id":"sao10k/l3-lunaris-8b","canonical_slug":"sao10k/l3-lunaris-8b","hugging_face_id":"Sao10K/L3-8B-Lunaris-v1","name":"Sao10K: Llama 3 8B Lunaris","created":1723507200,"description":"Lunaris 8B is a versatile generalist and roleplaying model based on Llama 3. It's a strategic merge of multiple models, designed to balance creativity with improved logic and general knowledge....","context_length":8192,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"llama3"},"pricing":{"prompt":"0.00000004","completion":"0.00000005"},"top_provider":{"context_length":8192,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":null,"links":{"details":"/api/v1/models/sao10k/l3-lunaris-8b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.04,"completion":0.05}},{"id":"openai/gpt-4o-2024-08-06","canonical_slug":"openai/gpt-4o-2024-08-06","hugging_face_id":null,"name":"OpenAI: GPT-4o (2024-08-06)","created":1722902400,"description":"The 2024-08-06 version of GPT-4o offers improved performance in structured outputs, with the ability to supply a JSON schema in the respone_format. Read more [here](https://openai.com/index/introducing-structured-outputs-in-the-api/). GPT-4o (\"o\" for \"omni\") is...","context_length":128000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.0000025","completion":"0.00001","input_cache_read":"0.00000125"},"top_provider":{"context_length":128000,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_completion_tokens","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p","web_search_options"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-10-31","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-4o-2024-08-06/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.5,"completion":10.0}},{"id":"meta-llama/llama-3.1-405b","canonical_slug":"meta-llama/llama-3.1-405b","hugging_face_id":"meta-llama/llama-3.1-405B","name":"Meta: Llama 3.1 405B (base)","created":1722556800,"description":"Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This is the base 405B pre-trained version.\n\nIt has demonstrated strong performance compared to leading closed-source models in human evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"none"},"pricing":{"prompt":"0.000004","completion":"0.000004"},"top_provider":{"context_length":32768,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":4.0,"completion":4.0}},{"id":"meta-llama/llama-3.1-8b-instruct","canonical_slug":"meta-llama/llama-3.1-8b-instruct","hugging_face_id":"meta-llama/Meta-Llama-3.1-8B-Instruct","name":"Meta: Llama 3.1 8B Instruct","created":1721692800,"description":"Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This 8B instruct-tuned version is fast and efficient. It has demonstrated strong performance compared to...","context_length":16384,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"llama3"},"pricing":{"prompt":"0.00000002","completion":"0.00000005"},"top_provider":{"context_length":16384,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":null,"links":{"details":"/api/v1/models/meta-llama/llama-3.1-8b-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.02,"completion":0.05}},{"id":"meta-llama/llama-3.1-405b-instruct","canonical_slug":"meta-llama/llama-3.1-405b-instruct","hugging_face_id":"meta-llama/Meta-Llama-3.1-405B-Instruct","name":"Meta: Llama 3.1 405B Instruct","created":1721692800,"description":"The highly anticipated 400B class of Llama3 is here! Clocking in at 128k context with impressive eval scores, the Meta AI team continues to push the frontier of open-source LLMs.\n\nMeta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This 405B instruct-tuned version is optimized for high quality dialogue usecases.\n\nIt has demonstrated strong performance compared to leading closed-source models including GPT-4o and Claude 3.5 Sonnet in evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3-1/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).","context_length":131000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"llama3"},"pricing":{"prompt":"0.000004","completion":"0.000004"},"top_provider":{"context_length":131000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":4.0,"completion":4.0}},{"id":"meta-llama/llama-3.1-70b-instruct","canonical_slug":"meta-llama/llama-3.1-70b-instruct","hugging_face_id":"meta-llama/Meta-Llama-3.1-70B-Instruct","name":"Meta: Llama 3.1 70B Instruct","created":1721692800,"description":"Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This 70B instruct-tuned version is optimized for high quality dialogue usecases. It has demonstrated strong...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"llama3"},"pricing":{"prompt":"0.0000004","completion":"0.0000004"},"top_provider":{"context_length":131072,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":null,"links":{"details":"/api/v1/models/meta-llama/llama-3.1-70b-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.4,"completion":0.4}},{"id":"mistralai/mistral-nemo","canonical_slug":"mistralai/mistral-nemo","hugging_face_id":"mistralai/Mistral-Nemo-Instruct-2407","name":"Mistral: Mistral Nemo","created":1721347200,"description":"A 12B parameter model with a 128k token context length built by Mistral in collaboration with NVIDIA. The model is multilingual, supporting English, French, German, Spanish, Italian, Portuguese, Chinese, Japanese,...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":"mistral"},"pricing":{"prompt":"0.00000002","completion":"0.00000003"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{"temperature":0.3},"supported_voices":null,"knowledge_cutoff":"2024-04-30","expiration_date":null,"links":{"details":"/api/v1/models/mistralai/mistral-nemo/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.02,"completion":0.03}},{"id":"openai/gpt-4o-mini","canonical_slug":"openai/gpt-4o-mini","hugging_face_id":null,"name":"OpenAI: GPT-4o-mini","created":1721260800,"description":"GPT-4o mini is OpenAI's newest model after [GPT-4 Omni](/models/openai/gpt-4o), supporting both text and image inputs with text outputs. As their most advanced small model, it is many multiples more affordable...","context_length":128000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00000015","completion":"0.0000006","input_cache_read":"0.000000075"},"top_provider":{"context_length":128000,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_completion_tokens","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p","web_search_options"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-10-31","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-4o-mini/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.15,"completion":0.6}},{"id":"openai/gpt-4o-mini-2024-07-18","canonical_slug":"openai/gpt-4o-mini-2024-07-18","hugging_face_id":null,"name":"OpenAI: GPT-4o-mini (2024-07-18)","created":1721260800,"description":"GPT-4o mini is OpenAI's newest model after [GPT-4 Omni](/models/openai/gpt-4o), supporting both text and image inputs with text outputs. As their most advanced small model, it is many multiples more affordable...","context_length":128000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00000015","completion":"0.0000006","input_cache_read":"0.000000075"},"top_provider":{"context_length":128000,"max_completion_tokens":16384,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p","web_search_options"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-10-31","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-4o-mini-2024-07-18/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.15,"completion":0.6}},{"id":"google/gemma-2-27b-it","canonical_slug":"google/gemma-2-27b-it","hugging_face_id":"google/gemma-2-27b-it","name":"Google: Gemma 2 27B","created":1720828800,"description":"Gemma 2 27B by Google is an open model built from the same research and technology used to create the [Gemini models](/models?q=gemini). Gemma models are well-suited for a variety of...","context_length":8192,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Gemini","instruct_type":"gemma"},"pricing":{"prompt":"0.00000065","completion":"0.00000065"},"top_provider":{"context_length":8192,"max_completion_tokens":2048,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-06-30","expiration_date":null,"links":{"details":"/api/v1/models/google/gemma-2-27b-it/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.65,"completion":0.65}},{"id":"google/gemma-2-9b-it","canonical_slug":"google/gemma-2-9b-it","hugging_face_id":"google/gemma-2-9b-it","name":"Google: Gemma 2 9B","created":1719532800,"description":"Gemma 2 9B by Google is an advanced, open-source language model that sets a new standard for efficiency and performance in its size class. Designed for a wide variety of...","context_length":8192,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Gemini","instruct_type":"gemma"},"pricing":{"prompt":"0.00000003","completion":"0.00000009"},"top_provider":{"context_length":8192,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","temperature","top_k","top_p"],"default_parameters":{},"knowledge_cutoff":"2024-06-30","expiration_date":null,"links":{"details":"/api/v1/models/google/gemma-2-9b-it/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.03,"completion":0.09}},{"id":"sao10k/l3-euryale-70b","canonical_slug":"sao10k/l3-euryale-70b","hugging_face_id":"Sao10K/L3-70B-Euryale-v2.1","name":"Sao10k: Llama 3 Euryale 70B v2.1","created":1718668800,"description":"Euryale 70B v2.1 is a model focused on creative roleplay from [Sao10k](https://ko-fi.com/sao10k). - Better prompt adherence. - Better anatomy / spatial awareness. - Adapts much better to unique and custom...","context_length":8192,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"llama3"},"pricing":{"prompt":"0.00000148","completion":"0.00000148"},"top_provider":{"context_length":8192,"max_completion_tokens":8192,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":null,"links":{"details":"/api/v1/models/sao10k/l3-euryale-70b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.48,"completion":1.48}},{"id":"mistralai/mistral-7b-instruct:free","canonical_slug":"mistralai/mistral-7b-instruct","hugging_face_id":"mistralai/Mistral-7B-Instruct-v0.3","name":"Mistral: Mistral 7B Instruct (free)","created":1716768000,"description":"A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.\n\n*Mistral 7B Instruct has multiple version variants, and this is intended to be the latest version.*","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":"mistral"},"pricing":{"prompt":"0","completion":"0","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":32768,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.3},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"mistralai/mistral-7b-instruct","canonical_slug":"mistralai/mistral-7b-instruct","hugging_face_id":"mistralai/Mistral-7B-Instruct-v0.3","name":"Mistral: Mistral 7B Instruct","created":1716768000,"description":"A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.\n\n*Mistral 7B Instruct has multiple version variants, and this is intended to be the latest version.*","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":"mistral"},"pricing":{"prompt":"0.0000002","completion":"0.0000002"},"top_provider":{"context_length":32768,"max_completion_tokens":4096,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","stop","temperature","top_k","top_p"],"default_parameters":{"temperature":0.3},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.2,"completion":0.2}},{"id":"nousresearch/hermes-2-pro-llama-3-8b","canonical_slug":"nousresearch/hermes-2-pro-llama-3-8b","hugging_face_id":"NousResearch/Hermes-2-Pro-Llama-3-8B","name":"NousResearch: Hermes 2 Pro - Llama-3 8B","created":1716768000,"description":"Hermes 2 Pro is an upgraded, retrained version of Nous Hermes 2, consisting of an updated and cleaned version of the OpenHermes 2.5 Dataset, as well as a newly introduced...","context_length":8192,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"chatml"},"pricing":{"prompt":"0.00000014","completion":"0.00000014"},"top_provider":{"context_length":8192,"max_completion_tokens":8192,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":null,"links":{"details":"/api/v1/models/nousresearch/hermes-2-pro-llama-3-8b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.14,"completion":0.14}},{"id":"mistralai/mistral-7b-instruct-v0.3","canonical_slug":"mistralai/mistral-7b-instruct-v0.3","hugging_face_id":"mistralai/Mistral-7B-Instruct-v0.3","name":"Mistral: Mistral 7B Instruct v0.3","created":1716768000,"description":"A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.\n\nAn improved version of [Mistral 7B Instruct v0.2](/models/mistralai/mistral-7b-instruct-v0.2), with the following changes:\n\n- Extended vocabulary to 32768\n- Supports v3 Tokenizer\n- Supports function calling\n\nNOTE: Support for function calling depends on the provider.","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":"mistral"},"pricing":{"prompt":"0.0000002","completion":"0.0000002"},"top_provider":{"context_length":32768,"max_completion_tokens":4096,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","stop","temperature","top_k","top_p"],"default_parameters":{"temperature":0.3},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.2,"completion":0.2}},{"id":"microsoft/phi-3-mini-128k-instruct","canonical_slug":"microsoft/phi-3-mini-128k-instruct","hugging_face_id":"microsoft/Phi-3-mini-128k-instruct","name":"Microsoft: Phi-3 Mini 128K Instruct","created":1716681600,"description":"Phi-3 Mini is a powerful 3.8B parameter model designed for advanced language understanding, reasoning, and instruction following. Optimized through supervised fine-tuning and preference adjustments, it excels in tasks involving common sense, mathematics, logical reasoning, and code processing.\n\nAt time of release, Phi-3 Medium demonstrated state-of-the-art performance among lightweight models. This model is static, trained on an offline dataset with an October 2023 cutoff date.","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":"phi3"},"pricing":{"prompt":"0.0000001","completion":"0.0000001","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":128000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","temperature","tool_choice","tools","top_p"],"default_parameters":{},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.1,"completion":0.1}},{"id":"microsoft/phi-3-medium-128k-instruct","canonical_slug":"microsoft/phi-3-medium-128k-instruct","hugging_face_id":"microsoft/Phi-3-medium-128k-instruct","name":"Microsoft: Phi-3 Medium 128K Instruct","created":1716508800,"description":"Phi-3 128K Medium is a powerful 14-billion parameter model designed for advanced language understanding, reasoning, and instruction following. Optimized through supervised fine-tuning and preference adjustments, it excels in tasks involving common sense, mathematics, logical reasoning, and code processing.\n\nAt time of release, Phi-3 Medium demonstrated state-of-the-art performance among lightweight models. In the MMLU-Pro eval, the model even comes close to a Llama3 70B level of performance.\n\nFor 4k context length, try [Phi-3 Medium 4K](/models/microsoft/phi-3-medium-4k-instruct).","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":"phi3"},"pricing":{"prompt":"0.000001","completion":"0.000001","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":128000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","temperature","tool_choice","tools","top_p"],"default_parameters":{},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.0,"completion":1.0}},{"id":"openai/gpt-4o","canonical_slug":"openai/gpt-4o","hugging_face_id":null,"name":"OpenAI: GPT-4o","created":1715558400,"description":"GPT-4o (\"o\" for \"omni\") is OpenAI's latest AI model, supporting both text and image inputs with text outputs. It maintains the intelligence level of [GPT-4 Turbo](/models/openai/gpt-4-turbo) while being twice as...","context_length":128000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.0000025","completion":"0.00001"},"top_provider":{"context_length":128000,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_completion_tokens","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p","web_search_options"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-10-31","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-4o/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.5,"completion":10.0}},{"id":"openai/gpt-4o:extended","canonical_slug":"openai/gpt-4o","hugging_face_id":null,"name":"OpenAI: GPT-4o (extended)","created":1715558400,"description":"GPT-4o (\"o\" for \"omni\") is OpenAI's latest AI model, supporting both text and image inputs with text outputs. It maintains the intelligence level of [GPT-4 Turbo](/models/openai/gpt-4-turbo) while being twice as...","context_length":128000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.000006","completion":"0.000018"},"top_provider":{"context_length":128000,"max_completion_tokens":64000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p","web_search_options"],"default_parameters":{},"knowledge_cutoff":"2023-10-31","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-4o/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":6.0,"completion":18.0}},{"id":"openai/gpt-4o-2024-05-13","canonical_slug":"openai/gpt-4o-2024-05-13","hugging_face_id":null,"name":"OpenAI: GPT-4o (2024-05-13)","created":1715558400,"description":"GPT-4o (\"o\" for \"omni\") is OpenAI's latest AI model, supporting both text and image inputs with text outputs. It maintains the intelligence level of [GPT-4 Turbo](/models/openai/gpt-4-turbo) while being twice as...","context_length":128000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.000005","completion":"0.000015"},"top_provider":{"context_length":128000,"max_completion_tokens":4096,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_completion_tokens","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p","web_search_options"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-10-31","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-4o-2024-05-13/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":5.0,"completion":15.0}},{"id":"meta-llama/llama-guard-2-8b","canonical_slug":"meta-llama/llama-guard-2-8b","hugging_face_id":"meta-llama/Meta-Llama-Guard-2-8B","name":"Meta: LlamaGuard 2 8B","created":1715558400,"description":"This safeguard model has 8B parameters and is based on the Llama 3 family. Just like is predecessor, [LlamaGuard 1](https://huggingface.co/meta-llama/LlamaGuard-7b), it can do both prompt and response classification.\n\nLlamaGuard 2 acts as a normal LLM would, generating text that indicates whether the given input/output is safe/unsafe. If deemed unsafe, it will also share the content categories violated.\n\nFor best results, please use raw prompt input or the `/completions` endpoint, instead of the chat API.\n\nIt has demonstrated strong performance compared to leading closed-source models in human evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).","context_length":8192,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"none"},"pricing":{"prompt":"0.0000002","completion":"0.0000002"},"top_provider":{"context_length":8192,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","stop","temperature","top_k","top_p"],"default_parameters":{},"expiration_date":"2026-02-25","object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.2,"completion":0.2}},{"id":"meta-llama/llama-3-8b-instruct","canonical_slug":"meta-llama/llama-3-8b-instruct","hugging_face_id":"meta-llama/Meta-Llama-3-8B-Instruct","name":"Meta: Llama 3 8B Instruct","created":1713398400,"description":"Meta's latest class of model (Llama 3) launched with a variety of sizes & flavors. This 8B instruct-tuned version was optimized for high quality dialogue usecases. It has demonstrated strong...","context_length":8192,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"llama3"},"pricing":{"prompt":"0.00000004","completion":"0.00000004"},"top_provider":{"context_length":8192,"max_completion_tokens":8192,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":null,"links":{"details":"/api/v1/models/meta-llama/llama-3-8b-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.04,"completion":0.04}},{"id":"meta-llama/llama-3-70b-instruct","canonical_slug":"meta-llama/llama-3-70b-instruct","hugging_face_id":"meta-llama/Meta-Llama-3-70B-Instruct","name":"Meta: Llama 3 70B Instruct","created":1713398400,"description":"Meta's latest class of model (Llama 3) launched with a variety of sizes & flavors. This 70B instruct-tuned version was optimized for high quality dialogue usecases. It has demonstrated strong...","context_length":8192,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"llama3"},"pricing":{"prompt":"0.00000051","completion":"0.00000074"},"top_provider":{"context_length":8192,"max_completion_tokens":8000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":null,"links":{"details":"/api/v1/models/meta-llama/llama-3-70b-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.51,"completion":0.74}},{"id":"mistralai/mixtral-8x22b-instruct","canonical_slug":"mistralai/mixtral-8x22b-instruct","hugging_face_id":"mistralai/Mixtral-8x22B-Instruct-v0.1","name":"Mistral: Mixtral 8x22B Instruct","created":1713312000,"description":"Mistral's official instruct fine-tuned version of [Mixtral 8x22B](/models/mistralai/mixtral-8x22b). It uses 39B active parameters out of 141B, offering unparalleled cost efficiency for its size. Its strengths include: - strong math, coding,...","context_length":65536,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":"mistral"},"pricing":{"prompt":"0.000002","completion":"0.000006","input_cache_read":"0.0000002"},"top_provider":{"context_length":65536,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.3},"supported_voices":null,"knowledge_cutoff":"2024-01-31","expiration_date":null,"links":{"details":"/api/v1/models/mistralai/mixtral-8x22b-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.0,"completion":6.0}},{"id":"microsoft/wizardlm-2-8x22b","canonical_slug":"microsoft/wizardlm-2-8x22b","hugging_face_id":"microsoft/WizardLM-2-8x22B","name":"WizardLM-2 8x22B","created":1713225600,"description":"WizardLM-2 8x22B is Microsoft AI's most advanced Wizard model. It demonstrates highly competitive performance compared to leading proprietary models, and it consistently outperforms all existing state-of-the-art opensource models. It is...","context_length":65535,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":"vicuna"},"pricing":{"prompt":"0.00000062","completion":"0.00000062"},"top_provider":{"context_length":65535,"max_completion_tokens":8000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2024-04-30","expiration_date":null,"links":{"details":"/api/v1/models/microsoft/wizardlm-2-8x22b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.62,"completion":0.62}},{"id":"openai/gpt-4-turbo","canonical_slug":"openai/gpt-4-turbo","hugging_face_id":null,"name":"OpenAI: GPT-4 Turbo","created":1712620800,"description":"The latest GPT-4 Turbo model with vision capabilities. Vision requests can now use JSON mode and function calling.\n\nTraining data: up to December 2023.","context_length":128000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00001","completion":"0.00003"},"top_provider":{"context_length":128000,"max_completion_tokens":4096,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-4-turbo/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":10.0,"completion":30.0}},{"id":"anthropic/claude-3-haiku","canonical_slug":"anthropic/claude-3-haiku","hugging_face_id":null,"name":"Anthropic: Claude 3 Haiku","created":1710288000,"description":"Claude 3 Haiku is Anthropic's fastest and most compact model for\nnear-instant responsiveness. Quick and accurate targeted performance.\n\nSee the launch announcement and benchmark results [here](https://www.anthropic.com/news/claude-3-haiku)\n\n#multimodal","context_length":200000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Claude","instruct_type":null},"pricing":{"prompt":"0.00000025","completion":"0.00000125","input_cache_read":"0.00000003","input_cache_write":"0.0000003"},"top_provider":{"context_length":200000,"max_completion_tokens":4096,"is_moderated":true},"per_request_limits":null,"supported_parameters":["max_tokens","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-08-31","expiration_date":null,"links":{"details":"/api/v1/models/anthropic/claude-3-haiku/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.25,"completion":1.25}},{"id":"anthropic/claude-3-opus","canonical_slug":"anthropic/claude-3-opus","hugging_face_id":null,"name":"Anthropic: Claude 3 Opus","created":1709596800,"description":"Claude 3 Opus is Anthropic's most powerful model for highly complex tasks. It boasts top-level performance, intelligence, fluency, and understanding.\n\nSee the launch announcement and benchmark results [here](https://www.anthropic.com/news/claude-3-family)\n\n#multimodal","context_length":200000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Claude","instruct_type":null},"pricing":{"prompt":"0.000015","completion":"0.000075","request":"0","image":"0.024","web_search":"0","internal_reasoning":"0","input_cache_read":"0.0000015","input_cache_write":"0.00001875"},"top_provider":{"context_length":200000,"max_completion_tokens":4096,"is_moderated":true},"per_request_limits":null,"supported_parameters":["max_tokens","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":15.0,"completion":75.0}},{"id":"mistralai/mistral-large","canonical_slug":"mistralai/mistral-large","hugging_face_id":null,"name":"Mistral Large","created":1708905600,"description":"This is Mistral AI's flagship model, Mistral Large 2 (version `mistral-large-2407`). It's a proprietary weights-available model and excels at reasoning, code, JSON, chat, and more. Read the launch announcement [here](https://mistral.ai/news/mistral-large-2407/)....","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.000002","completion":"0.000006","input_cache_read":"0.0000002"},"top_provider":{"context_length":128000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.3},"supported_voices":null,"knowledge_cutoff":"2024-11-30","expiration_date":null,"links":{"details":"/api/v1/models/mistralai/mistral-large/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.0,"completion":6.0}},{"id":"openai/gpt-4-turbo-preview","canonical_slug":"openai/gpt-4-turbo-preview","hugging_face_id":null,"name":"OpenAI: GPT-4 Turbo Preview","created":1706140800,"description":"The preview GPT-4 model with improved instruction following, JSON mode, reproducible outputs, parallel function calling, and more. Training data: up to Dec 2023. **Note:** heavily rate limited by OpenAI while...","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00001","completion":"0.00003"},"top_provider":{"context_length":128000,"max_completion_tokens":4096,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-4-turbo-preview/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":10.0,"completion":30.0}},{"id":"openai/gpt-3.5-turbo-0613","canonical_slug":"openai/gpt-3.5-turbo-0613","hugging_face_id":null,"name":"OpenAI: GPT-3.5 Turbo (older v0613)","created":1706140800,"description":"GPT-3.5 Turbo is OpenAI's fastest model. It can understand and generate natural language or code, and is optimized for chat and traditional completion tasks.\n\nTraining data up to Sep 2021.","context_length":4095,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.000001","completion":"0.000002"},"top_provider":{"context_length":4095,"max_completion_tokens":4096,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_completion_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2021-09-30","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-3.5-turbo-0613/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.0,"completion":2.0}},{"id":"mistralai/mistral-small","canonical_slug":"mistralai/mistral-small","hugging_face_id":null,"name":"Mistral Small","created":1704844800,"description":"With 22 billion parameters, Mistral Small v24.09 offers a convenient mid-point between (Mistral NeMo 12B)[/mistralai/mistral-nemo] and (Mistral Large 2)[/mistralai/mistral-large], providing a cost-effective solution that can be deployed across various platforms and environments. It has better reasoning, exhibits more capabilities, can produce and reason about code, and is multiligual, supporting English, French, German, Italian, and Spanish.","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.0000002","completion":"0.0000006","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":32768,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.3},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.2,"completion":0.6}},{"id":"mistralai/mistral-tiny","canonical_slug":"mistralai/mistral-tiny","hugging_face_id":null,"name":"Mistral Tiny","created":1704844800,"description":"Note: This model is being deprecated. Recommended replacement is the newer [Ministral 8B](/mistral/ministral-8b)\n\nThis model is currently powered by Mistral-7B-v0.2, and incorporates a \"better\" fine-tuning than [Mistral 7B](/models/mistralai/mistral-7b-instruct-v0.1), inspired by community work. It's best used for large batch processing tasks where cost is a significant factor but reasoning capabilities are not crucial.","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.00000025","completion":"0.00000025"},"top_provider":{"context_length":32768,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.3},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.25,"completion":0.25}},{"id":"mistralai/mistral-7b-instruct-v0.2","canonical_slug":"mistralai/mistral-7b-instruct-v0.2","hugging_face_id":"mistralai/Mistral-7B-Instruct-v0.2","name":"Mistral: Mistral 7B Instruct v0.2","created":1703721600,"description":"A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.\n\nAn improved version of [Mistral 7B Instruct](/modelsmistralai/mistral-7b-instruct-v0.1), with the following changes:\n\n- 32k context window (vs 8k context in v0.1)\n- Rope-theta = 1e6\n- No Sliding-Window Attention","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":"mistral"},"pricing":{"prompt":"0.0000002","completion":"0.0000002"},"top_provider":{"context_length":32768,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","stop","temperature","top_k","top_p"],"default_parameters":{"temperature":0.3},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.2,"completion":0.2}},{"id":"mistralai/mixtral-8x7b-instruct","canonical_slug":"mistralai/mixtral-8x7b-instruct","hugging_face_id":"mistralai/Mixtral-8x7B-Instruct-v0.1","name":"Mistral: Mixtral 8x7B Instruct","created":1702166400,"description":"Mixtral 8x7B Instruct is a pretrained generative Sparse Mixture of Experts, by Mistral AI, for chat and instruction use. Incorporates 8 experts (feed-forward networks) for a total of 47 billion...","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":"mistral"},"pricing":{"prompt":"0.00000054","completion":"0.00000054"},"top_provider":{"context_length":32768,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.3},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":"2026-05-07","links":{"details":"/api/v1/models/mistralai/mixtral-8x7b-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.54,"completion":0.54}},{"id":"neversleep/noromaid-20b","canonical_slug":"neversleep/noromaid-20b","hugging_face_id":"NeverSleep/Noromaid-20b-v0.1.1","name":"Noromaid 20B","created":1700956800,"description":"A collab between IkariDev and Undi. This merge is suitable for RP, ERP, and general knowledge.\n\n#merge #uncensored","context_length":4096,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama2","instruct_type":"alpaca"},"pricing":{"prompt":"0.000001","completion":"0.00000175"},"top_provider":{"context_length":4096,"max_completion_tokens":2048,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","stop","structured_outputs","temperature","top_p"],"default_parameters":{},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.0,"completion":1.75}},{"id":"alpindale/goliath-120b","canonical_slug":"alpindale/goliath-120b","hugging_face_id":"alpindale/goliath-120b","name":"Goliath 120B","created":1699574400,"description":"A large LLM created by combining two fine-tuned Llama 70B models into one 120B model. Combines Xwin and Euryale. Credits to - [@chargoddard](https://huggingface.co/chargoddard) for developing the framework used to merge...","context_length":6144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama2","instruct_type":"airoboros"},"pricing":{"prompt":"0.00000375","completion":"0.0000075"},"top_provider":{"context_length":6144,"max_completion_tokens":1024,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","temperature","top_a","top_k","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-12-31","expiration_date":null,"links":{"details":"/api/v1/models/alpindale/goliath-120b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":3.75,"completion":7.5}},{"id":"openrouter/auto","canonical_slug":"openrouter/auto","hugging_face_id":null,"name":"Auto Router","created":1699401600,"description":"Your prompt will be processed by a meta-model and routed to one of dozens of models (see below), optimizing for the best possible output. To see which model was used,...","context_length":2000000,"architecture":{"modality":"text+image+file+audio+video->text+image","input_modalities":["text","image","audio","file","video"],"output_modalities":["text","image"],"tokenizer":"Router","instruct_type":null},"pricing":{"prompt":"-1","completion":"-1"},"top_provider":{"context_length":null,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_completion_tokens","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p","web_search_options"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openrouter/auto/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":-1000000.0,"completion":-1000000.0}},{"id":"openai/gpt-4-1106-preview","canonical_slug":"openai/gpt-4-1106-preview","hugging_face_id":null,"name":"OpenAI: GPT-4 Turbo (older v1106)","created":1699228800,"description":"The latest GPT-4 Turbo model with vision capabilities. Vision requests can now use JSON mode and function calling.\n\nTraining data: up to April 2023.","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00001","completion":"0.00003"},"top_provider":{"context_length":128000,"max_completion_tokens":4096,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-04-30","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-4-1106-preview/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":10.0,"completion":30.0}},{"id":"openai/gpt-3.5-turbo-instruct","canonical_slug":"openai/gpt-3.5-turbo-instruct","hugging_face_id":null,"name":"OpenAI: GPT-3.5 Turbo Instruct","created":1695859200,"description":"This model is a variant of GPT-3.5 Turbo tuned for instructional prompts and omitting chat-related optimizations. Training data: up to Sep 2021.","context_length":4095,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":"chatml"},"pricing":{"prompt":"0.0000015","completion":"0.000002"},"top_provider":{"context_length":4095,"max_completion_tokens":4096,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2021-09-30","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-3.5-turbo-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.5,"completion":2.0}},{"id":"mistralai/mistral-7b-instruct-v0.1","canonical_slug":"mistralai/mistral-7b-instruct-v0.1","hugging_face_id":"mistralai/Mistral-7B-Instruct-v0.1","name":"Mistral: Mistral 7B Instruct v0.1","created":1695859200,"description":"A 7.3B parameter model that outperforms Llama 2 13B on all benchmarks, with optimizations for speed and context length.","context_length":2824,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":"mistral"},"pricing":{"prompt":"0.00000011","completion":"0.00000019"},"top_provider":{"context_length":2824,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","temperature","top_k","top_p"],"default_parameters":{"temperature":0.3},"supported_voices":null,"knowledge_cutoff":"2023-09-30","expiration_date":"2026-05-30","links":{"details":"/api/v1/models/mistralai/mistral-7b-instruct-v0.1/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.11,"completion":0.19}},{"id":"openai/gpt-3.5-turbo-16k","canonical_slug":"openai/gpt-3.5-turbo-16k","hugging_face_id":null,"name":"OpenAI: GPT-3.5 Turbo 16k","created":1693180800,"description":"This model offers four times the context length of gpt-3.5-turbo, allowing it to support approximately 20 pages of text in a single request at a higher cost. Training data: up...","context_length":16385,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.000003","completion":"0.000004"},"top_provider":{"context_length":16385,"max_completion_tokens":4096,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_completion_tokens","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2021-09-30","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-3.5-turbo-16k/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":3.0,"completion":4.0}},{"id":"mancer/weaver","canonical_slug":"mancer/weaver","hugging_face_id":null,"name":"Mancer: Weaver (alpha)","created":1690934400,"description":"An attempt to recreate Claude-style verbosity, but don't expect the same level of coherence or memory. Meant for use in roleplay/narrative situations.","context_length":8000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama2","instruct_type":"alpaca"},"pricing":{"prompt":"0.00000075","completion":"0.000001"},"top_provider":{"context_length":8000,"max_completion_tokens":2000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","temperature","top_a","top_k","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-06-30","expiration_date":null,"links":{"details":"/api/v1/models/mancer/weaver/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.75,"completion":1.0}},{"id":"undi95/remm-slerp-l2-13b","canonical_slug":"undi95/remm-slerp-l2-13b","hugging_face_id":"Undi95/ReMM-SLERP-L2-13B","name":"ReMM SLERP 13B","created":1689984000,"description":"A recreation trial of the original MythoMax-L2-B13 but with updated models. #merge","context_length":6144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama2","instruct_type":"alpaca"},"pricing":{"prompt":"0.00000045","completion":"0.00000065"},"top_provider":{"context_length":6144,"max_completion_tokens":4096,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_a","top_k","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-06-30","expiration_date":null,"links":{"details":"/api/v1/models/undi95/remm-slerp-l2-13b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.45,"completion":0.65}},{"id":"gryphe/mythomax-l2-13b","canonical_slug":"gryphe/mythomax-l2-13b","hugging_face_id":"Gryphe/MythoMax-L2-13b","name":"MythoMax 13B","created":1688256000,"description":"One of the highest performing and most popular fine-tunes of Llama 2 13B, with rich descriptions and roleplay. #merge","context_length":4096,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama2","instruct_type":"alpaca"},"pricing":{"prompt":"0.00000006","completion":"0.00000006"},"top_provider":{"context_length":4096,"max_completion_tokens":4096,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_a","top_k","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2023-06-30","expiration_date":null,"links":{"details":"/api/v1/models/gryphe/mythomax-l2-13b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.06,"completion":0.06}},{"id":"openai/gpt-4-0314","canonical_slug":"openai/gpt-4-0314","hugging_face_id":null,"name":"OpenAI: GPT-4 (older v0314)","created":1685232000,"description":"GPT-4-0314 is the first version of GPT-4 released, with a context length of 8,192 tokens, and was supported until June 14. Training data: up to Sep 2021.","context_length":8191,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00003","completion":"0.00006"},"top_provider":{"context_length":8191,"max_completion_tokens":4096,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2021-09-30","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-4-0314/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":30.0,"completion":60.0}},{"id":"openai/gpt-3.5-turbo","canonical_slug":"openai/gpt-3.5-turbo","hugging_face_id":null,"name":"OpenAI: GPT-3.5 Turbo","created":1685232000,"description":"GPT-3.5 Turbo is OpenAI's fastest model. It can understand and generate natural language or code, and is optimized for chat and traditional completion tasks.\n\nTraining data up to Sep 2021.","context_length":16385,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.0000005","completion":"0.0000015"},"top_provider":{"context_length":16385,"max_completion_tokens":4096,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2021-09-30","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-3.5-turbo/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.5,"completion":1.5}},{"id":"openai/gpt-4","canonical_slug":"openai/gpt-4","hugging_face_id":null,"name":"OpenAI: GPT-4","created":1685232000,"description":"OpenAI's flagship model, GPT-4 is a large-scale multimodal language model capable of solving difficult problems with greater accuracy than previous models due to its broader general knowledge and advanced reasoning...","context_length":8191,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00003","completion":"0.00006"},"top_provider":{"context_length":8191,"max_completion_tokens":4096,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_completion_tokens","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2021-09-30","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-4/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":30.0,"completion":60.0}},{"id":"deepseek/deepseek-v3.2-speciale","canonical_slug":"deepseek/deepseek-v3.2-speciale-20251201","hugging_face_id":"deepseek-ai/DeepSeek-V3.2-Speciale","name":"DeepSeek: DeepSeek V3.2 Speciale","created":1764594837,"description":"DeepSeek-V3.2-Speciale is a high-compute variant of DeepSeek-V3.2 optimized for maximum reasoning and agentic performance. It builds on DeepSeek Sparse Attention (DSA) for efficient long-context processing, then scales post-training reinforcement learning...","context_length":163840,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"DeepSeek","instruct_type":null},"pricing":{"prompt":"0.000000287","completion":"0.000000431","input_cache_read":"0.000000058"},"top_provider":{"context_length":163840,"max_completion_tokens":163840,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/deepseek/deepseek-v3.2-speciale-20251201/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.287,"completion":0.431}},{"id":"deepseek/deepseek-v3.2","canonical_slug":"deepseek/deepseek-v3.2-20251201","hugging_face_id":"deepseek-ai/DeepSeek-V3.2","name":"DeepSeek: DeepSeek V3.2","created":1764594642,"description":"DeepSeek-V3.2 is a large language model designed to harmonize high computational efficiency with strong reasoning and agentic tool-use performance. It introduces DeepSeek Sparse Attention (DSA), a fine-grained sparse attention mechanism that reduces training and inference cost while preserving quality in long-context scenarios. A scalable reinforcement learning post-training framework further improves reasoning, with reported performance in the GPT-5 class, and the model has demonstrated gold-medal results on the 2025 IMO and IOI. V3.2 also uses a large-scale agentic task synthesis pipeline to better integrate reasoning into tool-use settings, boosting compliance and generalization in interactive environments.\r\n\r\nUsers can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config)","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"DeepSeek","instruct_type":null},"pricing":{"prompt":"0.000000252","completion":"0.000000378","input_cache_read":"0.0000000252"},"top_provider":{"context_length":131072,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/deepseek/deepseek-v3.2-20251201/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.25,"completion":0.38}},{"id":"arcee-ai/trinity-mini:free","canonical_slug":"arcee-ai/trinity-mini-20251201","hugging_face_id":"arcee-ai/Trinity-Mini","name":"Arcee AI: Trinity Mini (free)","created":1764601720,"description":"Trinity Mini is a 26B-parameter (3B active) sparse mixture-of-experts language model featuring 128 experts with 8 active per token. Engineered for efficient reasoning over long contexts (131k) with robust function calling and multi-step agent workflows.","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.15,"top_p":0.75,"frequency_penalty":null},"knowledge_cutoff":null,"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"arcee-ai/trinity-mini","canonical_slug":"arcee-ai/trinity-mini-20251201","hugging_face_id":"arcee-ai/Trinity-Mini","name":"Arcee AI: Trinity Mini","created":1764601720,"description":"Trinity Mini is a 26B-parameter (3B active) sparse mixture-of-experts language model featuring 128 experts with 8 active per token. Engineered for efficient reasoning over long contexts (131k) with robust function...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.000000045","completion":"0.00000015"},"top_provider":{"context_length":131072,"max_completion_tokens":131072,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.15,"top_p":0.75,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/arcee-ai/trinity-mini-20251201/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.045,"completion":0.15}},{"id":"mistralai/mistral-large-2512","canonical_slug":"mistralai/mistral-large-2512","hugging_face_id":"","name":"Mistral: Mistral Large 3 2512","created":1764624472,"description":"Mistral Large 3 2512 is Mistral’s most capable model to date, featuring a sparse mixture-of-experts architecture with 41B active parameters (675B total), and released under the Apache 2.0 license.","context_length":262144,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.0000005","completion":"0.0000015","input_cache_read":"0.00000005"},"top_provider":{"context_length":262144,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.0645,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/mistralai/mistral-large-2512/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.5,"completion":1.5}},{"id":"amazon/nova-2-lite-v1:free","canonical_slug":"amazon/nova-2-lite-v1","hugging_face_id":"","name":"Amazon: Nova 2 Lite (free)","created":1764696672,"description":"Nova 2 Lite is a fast, cost-effective reasoning model for everyday workloads that can process text, images, and videos to generate text. \n\nNova 2 Lite demonstrates standout capabilities in processing documents, extracting information from videos, generating code, providing accurate grounded answers, and automating multi-step agentic workflows.","context_length":1000000,"architecture":{"modality":"text+image->text","input_modalities":["text","image","video","file"],"output_modalities":["text"],"tokenizer":"Nova","instruct_type":null},"pricing":{"prompt":"0","completion":"0","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":1000000,"max_completion_tokens":65535,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","seed","stop","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"amazon/nova-2-lite-v1","canonical_slug":"amazon/nova-2-lite-v1","hugging_face_id":"","name":"Amazon: Nova 2 Lite","created":1764696672,"description":"Nova 2 Lite is a fast, cost-effective reasoning model for everyday workloads that can process text, images, and videos to generate text. Nova 2 Lite demonstrates standout capabilities in processing...","context_length":1000000,"architecture":{"modality":"text+image+file+video->text","input_modalities":["text","image","video","file"],"output_modalities":["text"],"tokenizer":"Nova","instruct_type":null},"pricing":{"prompt":"0.0000003","completion":"0.0000025"},"top_provider":{"context_length":1000000,"max_completion_tokens":65535,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/amazon/nova-2-lite-v1/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.3,"completion":2.5}},{"id":"allenai/olmo-3-32b-think:free","canonical_slug":"allenai/olmo-3-32b-think-20251121","hugging_face_id":"allenai/Olmo-3-32B-Think","name":"AllenAI: Olmo 3 32B Think (free)","created":1763758276,"description":"Olmo 3 32B Think is a large-scale, 32-billion-parameter model purpose-built for deep reasoning, complex logic chains and advanced instruction-following scenarios. Its capacity enables strong performance on demanding evaluation tasks and highly nuanced conversational reasoning. Developed by Ai2 under the Apache 2.0 license, Olmo 3 32B Think embodies the Olmo initiative’s commitment to openness, offering full transparency across weights, code and training methodology.","context_length":65536,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":65536,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"],"default_parameters":{"temperature":0.6,"top_p":0.95,"frequency_penalty":null},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"x-ai/grok-4.1-fast","canonical_slug":"x-ai/grok-4.1-fast","hugging_face_id":"","name":"xAI: Grok 4.1 Fast","created":1763587502,"description":"Grok 4.1 Fast is xAI's best agentic tool calling model that shines in real-world use cases like customer support and deep research. 2M context window.\r\n\r\nReasoning can be enabled/disabled using the `reasoning` `enabled` parameter in the API. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#controlling-reasoning-tokens)","context_length":2000000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"Grok","instruct_type":null},"pricing":{"prompt":"0.0000002","completion":"0.0000005","web_search":"0.005","input_cache_read":"0.00000005"},"top_provider":{"context_length":2000000,"max_completion_tokens":30000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","logprobs","max_tokens","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{"temperature":0.7,"top_p":0.95,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":"2026-05-15","links":{"details":"/api/v1/models/x-ai/grok-4.1-fast/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.2,"completion":0.5}},{"id":"mistralai/ministral-14b-2512","canonical_slug":"mistralai/ministral-14b-2512","hugging_face_id":"mistralai/Ministral-3-14B-Instruct-2512","name":"Mistral: Ministral 3 14B 2512","created":1764681735,"description":"The largest model in the Ministral 3 family, Ministral 3 14B offers frontier capabilities and performance comparable to its larger Mistral Small 3.2 24B counterpart. A powerful and efficient language...","context_length":262144,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.0000002","completion":"0.0000002","input_cache_read":"0.00000002"},"top_provider":{"context_length":262144,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logprobs","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{"temperature":0.3,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/mistralai/ministral-14b-2512/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.2,"completion":0.2}},{"id":"mistralai/ministral-8b-2512","canonical_slug":"mistralai/ministral-8b-2512","hugging_face_id":"mistralai/Ministral-3-8B-Instruct-2512","name":"Mistral: Ministral 3 8B 2512","created":1764681654,"description":"A balanced model in the Ministral 3 family, Ministral 3 8B is a powerful, efficient tiny language model with vision capabilities.","context_length":262144,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.00000015","completion":"0.00000015","input_cache_read":"0.000000015"},"top_provider":{"context_length":262144,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logprobs","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{"temperature":0.3,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/mistralai/ministral-8b-2512/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.15,"completion":0.15}},{"id":"mistralai/ministral-3b-2512","canonical_slug":"mistralai/ministral-3b-2512","hugging_face_id":"mistralai/Ministral-3-3B-Instruct-2512","name":"Mistral: Ministral 3 3B 2512","created":1764681560,"description":"The smallest model in the Ministral 3 family, Ministral 3 3B is a powerful, efficient tiny language model with vision capabilities.","context_length":131072,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.0000001","completion":"0.0000001","input_cache_read":"0.00000001"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logprobs","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{"temperature":0.3,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/mistralai/ministral-3b-2512/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.1,"completion":0.1}},{"id":"openrouter/bodybuilder","canonical_slug":"openrouter/bodybuilder","hugging_face_id":"","name":"Body Builder (beta)","created":1764903653,"description":"Transform your natural language requests into structured OpenRouter API request objects. Describe what you want to accomplish with AI models, and Body Builder will construct the appropriate API calls. Example:...","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Router","instruct_type":null},"pricing":{"prompt":"-1","completion":"-1"},"top_provider":{"context_length":null,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":[],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openrouter/bodybuilder/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":-1000000.0,"completion":-1000000.0}},{"id":"openai/gpt-oss-120b:free","canonical_slug":"openai/gpt-oss-120b","hugging_face_id":"openai/gpt-oss-120b","name":"OpenAI: gpt-oss-120b (free)","created":1754414231,"description":"gpt-oss-120b is an open-weight, 117B-parameter Mixture-of-Experts (MoE) language model from OpenAI designed for high-reasoning, agentic, and general-purpose production use cases. It activates 5.1B parameters per forward pass and is optimized...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":131072,"max_completion_tokens":131072,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","seed","stop","temperature","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":"2024-06-30","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-oss-120b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"openai/gpt-5.1-codex-max","canonical_slug":"openai/gpt-5.1-codex-max-20251204","hugging_face_id":"","name":"OpenAI: GPT-5.1-Codex-Max","created":1764878934,"description":"GPT-5.1-Codex-Max is OpenAI’s latest agentic coding model, designed for long-running, high-context software development tasks. It is based on an updated version of the 5.1 reasoning stack and trained on agentic...","context_length":400000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00000125","completion":"0.00001","web_search":"0.01","input_cache_read":"0.000000125"},"top_provider":{"context_length":400000,"max_completion_tokens":128000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5.1-codex-max-20251204/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.25,"completion":10.0}},{"id":"essentialai/rnj-1-instruct","canonical_slug":"essentialai/rnj-1-instruct","hugging_face_id":"EssentialAI/rnj-1-instruct","name":"EssentialAI: Rnj 1 Instruct","created":1765094847,"description":"Rnj-1 is an 8B-parameter, dense, open-weight model family developed by Essential AI and trained from scratch with a focus on programming, math, and scientific reasoning. The model demonstrates strong performance...","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000015","completion":"0.00000015"},"top_provider":{"context_length":32768,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/essentialai/rnj-1-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.15,"completion":0.15}},{"id":"relace/relace-search","canonical_slug":"relace/relace-search-20251208","hugging_face_id":null,"name":"Relace: Relace Search","created":1765213560,"description":"The relace-search model uses 4-12 `view_file` and `grep` tools in parallel to explore a codebase and return relevant files to the user request. In contrast to RAG, relace-search performs agentic...","context_length":256000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.000001","completion":"0.000003"},"top_provider":{"context_length":256000,"max_completion_tokens":128000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","seed","stop","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/relace/relace-search-20251208/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.0,"completion":3.0}},{"id":"z-ai/glm-4.6v","canonical_slug":"z-ai/glm-4.6-20251208","hugging_face_id":"zai-org/GLM-4.6V","name":"Z.ai: GLM 4.6V","created":1765207462,"description":"GLM-4.6V is a large multimodal model designed for high-fidelity visual understanding and long-context reasoning across images, documents, and mixed media. It supports up to 128K tokens, processes complex page layouts...","context_length":131072,"architecture":{"modality":"text+image+video->text","input_modalities":["image","text","video"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000003","completion":"0.0000009","input_cache_read":"0.00000005"},"top_provider":{"context_length":131072,"max_completion_tokens":24000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","seed","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.8,"top_p":0.6,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/z-ai/glm-4.6-20251208/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.3,"completion":0.9}},{"id":"mistralai/devstral-2512:free","canonical_slug":"mistralai/devstral-2512","hugging_face_id":"mistralai/Devstral-2-123B-Instruct-2512","name":"Mistral: Devstral 2 2512 (free)","created":1765285419,"description":"Devstral 2 is a state-of-the-art open-source model by Mistral AI specializing in agentic coding. It is a 123B-parameter dense transformer model supporting a 256K context window.\n\nDevstral 2 supports exploring codebases and orchestrating changes across multiple files while maintaining architecture-level context. It tracks framework dependencies, detects failures, and retries with corrections—solving challenges like bug fixing and modernizing legacy systems. The model can be fine-tuned to prioritize specific languages or optimize for large enterprise codebases. It is available under a modified MIT license.","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":262144,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.3,"top_p":null,"frequency_penalty":null},"expiration_date":"2026-01-27","object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"nex-agi/deepseek-v3.1-nex-n1:free","canonical_slug":"nex-agi/deepseek-v3.1-nex-n1","hugging_face_id":"nex-agi/DeepSeek-V3.1-Nex-N1","name":"Nex AGI: DeepSeek V3.1 Nex N1 (free)","created":1765204393,"description":"DeepSeek V3.1 Nex-N1 is the flagship release of the Nex-N1 series — a post-trained model designed to highlight agent autonomy, tool use, and real-world productivity. \n\nNex-N1 demonstrates competitive performance across all evaluation scenarios, showing particularly strong results in practical coding and HTML generation tasks.","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"DeepSeek","instruct_type":null},"pricing":{"prompt":"0","completion":"0","request":"0","image":"0","web_search":"0","internal_reasoning":"0","input_cache_read":"0","input_cache_write":"0"},"top_provider":{"context_length":131072,"max_completion_tokens":163840,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","response_format","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"mistralai/devstral-2512","canonical_slug":"mistralai/devstral-2512","hugging_face_id":"mistralai/Devstral-2-123B-Instruct-2512","name":"Mistral: Devstral 2 2512","created":1765285419,"description":"Devstral 2 is a state-of-the-art open-source model by Mistral AI specializing in agentic coding. It is a 123B-parameter dense transformer model supporting a 256K context window. Devstral 2 supports exploring...","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.0000004","completion":"0.000002","input_cache_read":"0.00000004"},"top_provider":{"context_length":262144,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.3,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/mistralai/devstral-2512/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.4,"completion":2.0}},{"id":"openai/gpt-5.2-chat","canonical_slug":"openai/gpt-5.2-chat-20251211","hugging_face_id":"","name":"OpenAI: GPT-5.2 Chat","created":1765389783,"description":"GPT-5.2 Chat (AKA Instant) is the fast, lightweight member of the 5.2 family, optimized for low-latency chat while retaining strong general intelligence. It uses adaptive reasoning to selectively “think” on harder queries, improving accuracy on math, coding, and multi-step tasks without slowing down typical conversations. The model is warmer and more conversational by default, with better instruction following and more stable short-form reasoning. GPT-5.2 Chat is designed for high-throughput, interactive workloads where responsiveness and consistency matter more than deep deliberation.","context_length":128000,"architecture":{"modality":"text+image+file->text","input_modalities":["file","image","text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00000175","completion":"0.000014","input_cache_read":"0.000000175"},"top_provider":{"context_length":128000,"max_completion_tokens":32000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_completion_tokens","max_tokens","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5.2-chat-20251211/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.75,"completion":14.0}},{"id":"openai/gpt-5.2-pro","canonical_slug":"openai/gpt-5.2-pro-20251211","hugging_face_id":"","name":"OpenAI: GPT-5.2 Pro","created":1765389780,"description":"GPT-5.2 Pro is OpenAI’s most advanced model, offering major improvements in agentic coding and long context performance over GPT-5 Pro. It is optimized for complex tasks that require step-by-step reasoning,...","context_length":400000,"architecture":{"modality":"text+image+file->text","input_modalities":["image","text","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.000021","completion":"0.000168","web_search":"0.01"},"top_provider":{"context_length":400000,"max_completion_tokens":128000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5.2-pro-20251211/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":21.0,"completion":168.0}},{"id":"openai/gpt-5.2","canonical_slug":"openai/gpt-5.2-20251211","hugging_face_id":"","name":"OpenAI: GPT-5.2","created":1765389775,"description":"GPT-5.2 is the latest frontier-grade model in the GPT-5 series, offering stronger agentic and long context perfomance compared to GPT-5.1. It uses adaptive reasoning to allocate computation dynamically, responding quickly to simple queries while spending more depth on complex tasks.\r\n\r\nBuilt for broad task coverage, GPT-5.2 delivers consistent gains across math, coding, sciende, and tool calling workloads, with more coherent long-form answers and improved tool-use reliability.","context_length":400000,"architecture":{"modality":"text+image+file->text","input_modalities":["file","image","text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00000175","completion":"0.000014","input_cache_read":"0.000000175"},"top_provider":{"context_length":400000,"max_completion_tokens":128000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5.2-20251211/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.75,"completion":14.0}},{"id":"meta-llama/llama-guard-4-12b:free","canonical_slug":"meta-llama/llama-guard-4-12b","hugging_face_id":"meta-llama/Llama-Guard-4-12B","name":"Meta: Llama Guard 4 12B (free)","created":1745975193,"description":"Llama Guard 4 is a Llama 4 Scout-derived multimodal pretrained model, fine-tuned for content safety classification. Similar to previous versions, it can be used to classify content in both LLM...","context_length":163840,"architecture":{"modality":"text+image->text","input_modalities":["image","text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":163840,"max_completion_tokens":65000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","seed","temperature","top_p"],"default_parameters":{},"knowledge_cutoff":"2024-08-31","expiration_date":null,"links":{"details":"/api/v1/models/meta-llama/llama-guard-4-12b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"nvidia/nemotron-3-nano-30b-a3b:free","canonical_slug":"nvidia/nemotron-3-nano-30b-a3b","hugging_face_id":"nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16","name":"NVIDIA: Nemotron 3 Nano 30B A3B (free)","created":1765731275,"description":"NVIDIA Nemotron 3 Nano 30B A3B is a small language MoE model with highest compute efficiency and accuracy for developers to build specialized agentic AI systems. The model is fully...","context_length":256000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":256000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","seed","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/nvidia/nemotron-3-nano-30b-a3b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"xiaomi/mimo-v2-flash:free","canonical_slug":"xiaomi/mimo-v2-flash-20251210","hugging_face_id":"XiaomiMiMo/MiMo-V2-Flash","name":"Xiaomi: MiMo-V2-Flash (free)","created":1765731308,"description":"MiMo-V2-Flash is an open-source foundation language model developed by Xiaomi. It is a Mixture-of-Experts model with 309B total parameters and 15B active parameters, adopting hybrid attention architecture. MiMo-V2-Flash supports a hybrid-thinking toggle and a 256K context window, and excels at reasoning, coding, and agent scenarios. On SWE-bench Verified and SWE-bench Multilingual, MiMo-V2-Flash ranks as the top #1 open-source model globally, delivering performance comparable to Claude Sonnet 4.5 while costing only about 3.5% as much.\n\nUsers can control the reasoning behaviour with the `reasoning` `enabled` boolean. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#enable-reasoning-with-default-config).","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":262144,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","response_format","stop","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":0.95,"frequency_penalty":null},"expiration_date":"2026-01-26","object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"allenai/olmo-3.1-32b-think:free","canonical_slug":"allenai/olmo-3.1-32b-think-20251215","hugging_face_id":"allenai/Olmo-3.1-32B-Think","name":"AllenAI: Olmo 3.1 32B Think (free)","created":1765907719,"description":"Olmo 3.1 32B Think is a large-scale, 32-billion-parameter model designed for deep reasoning, complex multi-step logic, and advanced instruction following. Building on the Olmo 3 series, version 3.1 delivers refined reasoning behavior and stronger performance across demanding evaluations and nuanced conversational tasks. Developed by Ai2 under the Apache 2.0 license, Olmo 3.1 32B Think continues the Olmo initiative’s commitment to openness, providing full transparency across model weights, code, and training methodology.","context_length":65536,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":65536,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"],"default_parameters":{"temperature":0.6,"top_p":0.95,"frequency_penalty":null},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"mistralai/mistral-small-creative","canonical_slug":"mistralai/mistral-small-creative-20251216","hugging_face_id":null,"name":"Mistral: Mistral Small Creative","created":1765908653,"description":"Mistral Small Creative is an experimental small model designed for creative writing, narrative generation, roleplay and character-driven dialogue, general-purpose instruction following, and conversational agents.","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.0000001","completion":"0.0000003","input_cache_read":"0.00000001"},"top_provider":{"context_length":32768,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["tool_choice","tools"],"default_parameters":{"temperature":0.3,"top_p":0.95,"frequency_penalty":null},"knowledge_cutoff":null,"expiration_date":"2026-04-30","links":{"details":"/api/v1/models/mistralai/mistral-small-creative-20251216/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.1,"completion":0.3}},{"id":"deepseek/deepseek-r1-0528:free","canonical_slug":"deepseek/deepseek-r1-0528","hugging_face_id":"deepseek-ai/DeepSeek-R1-0528","name":"DeepSeek: R1 0528 (free)","created":1748455170,"description":"May 28th update to the [original DeepSeek R1](/deepseek/deepseek-r1) Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active in an inference pass.\n\nFully open-source model.","context_length":163840,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"DeepSeek","instruct_type":"deepseek-r1"},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":163840,"max_completion_tokens":163840,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","stop","temperature","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"expiration_date":"2026-02-24","object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"meta-llama/llama-3.1-405b-instruct:free","canonical_slug":"meta-llama/llama-3.1-405b-instruct","hugging_face_id":"meta-llama/Meta-Llama-3.1-405B-Instruct","name":"Meta: Llama 3.1 405B Instruct (free)","created":1721692800,"description":"The highly anticipated 400B class of Llama3 is here! Clocking in at 128k context with impressive eval scores, the Meta AI team continues to push the frontier of open-source LLMs.\n\nMeta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This 405B instruct-tuned version is optimized for high quality dialogue usecases.\n\nIt has demonstrated strong performance compared to leading closed-source models including GPT-4o and Claude 3.5 Sonnet in evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3-1/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"llama3"},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","temperature"],"default_parameters":{},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"qwen/qwen3-vl-32b-instruct","canonical_slug":"qwen/qwen3-vl-32b-instruct","hugging_face_id":"Qwen/Qwen3-VL-32B-Instruct","name":"Qwen: Qwen3 VL 32B Instruct","created":1761231332,"description":"Qwen3-VL-32B-Instruct is a large-scale multimodal vision-language model designed for high-precision understanding and reasoning across text, images, and video. With 32 billion parameters, it combines deep visual perception with advanced text...","context_length":131072,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":null},"pricing":{"prompt":"0.000000104","completion":"0.000000416"},"top_provider":{"context_length":131072,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","presence_penalty","response_format","seed","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.7,"top_p":0.8,"top_k":20,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":1},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-vl-32b-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.104,"completion":0.416}},{"id":"qwen/qwen-2.5-vl-7b-instruct:free","canonical_slug":"qwen/qwen-2-vl-7b-instruct","hugging_face_id":"Qwen/Qwen2.5-VL-7B-Instruct","name":"Qwen: Qwen2.5-VL 7B Instruct (free)","created":1724803200,"description":"Qwen2.5 VL 7B is a multimodal LLM from the Qwen Team with the following key enhancements:\n\n- SoTA understanding of images of various resolution & ratio: Qwen2.5-VL achieves state-of-the-art performance on visual understanding benchmarks, including MathVista, DocVQA, RealWorldQA, MTVQA, etc.\n\n- Understanding videos of 20min+: Qwen2.5-VL can understand videos over 20 minutes for high-quality video-based question answering, dialog, content creation, etc.\n\n- Agent that can operate your mobiles, robots, etc.: with the abilities of complex reasoning and decision making, Qwen2.5-VL can be integrated with devices like mobile phones, robots, etc., for automatic operation based on visual environment and text instructions.\n\n- Multilingual Support: to serve global users, besides English and Chinese, Qwen2.5-VL now supports the understanding of texts in different languages inside images, including most European languages, Japanese, Korean, Arabic, Vietnamese, etc.\n\nFor more details, see this [blog post](https://qwenlm.github.io/blog/qwen2-vl/) and [GitHub repo](https://github.com/QwenLM/Qwen2-VL).\n\nUsage of this model is subject to [Tongyi Qianwen LICENSE AGREEMENT](https://huggingface.co/Qwen/Qwen1.5-110B-Chat/blob/main/LICENSE).","context_length":32768,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":32768,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","temperature"],"default_parameters":{},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"google/gemini-3-flash-preview","canonical_slug":"google/gemini-3-flash-preview-20251217","hugging_face_id":"","name":"Google: Gemini 3 Flash Preview","created":1765987078,"description":"Gemini 3 Flash Preview is a high speed, high value thinking model designed for agentic workflows, multi turn chat, and coding assistance. It delivers near Pro level reasoning and tool...","context_length":1048576,"architecture":{"modality":"text+image+file+audio+video->text","input_modalities":["text","image","file","audio","video"],"output_modalities":["text"],"tokenizer":"Gemini","instruct_type":null},"pricing":{"prompt":"0.0000005","completion":"0.000003","image":"0.0000005","audio":"0.000001","web_search":"0.014","internal_reasoning":"0.000003","input_cache_read":"0.00000005","input_cache_write":"0.00000008333333333333334"},"top_provider":{"context_length":1048576,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/google/gemini-3-flash-preview-20251217/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.5,"completion":3.0}},{"id":"nvidia/nemotron-3-nano-30b-a3b","canonical_slug":"nvidia/nemotron-3-nano-30b-a3b","hugging_face_id":"nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16","name":"NVIDIA: Nemotron 3 Nano 30B A3B","created":1765731275,"description":"NVIDIA Nemotron 3 Nano 30B A3B is a small language MoE model with highest compute efficiency and accuracy for developers to build specialized agentic AI systems. The model is fully...","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000005","completion":"0.0000002"},"top_provider":{"context_length":262144,"max_completion_tokens":228000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/nvidia/nemotron-3-nano-30b-a3b/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.05,"completion":0.2}},{"id":"z-ai/glm-4.7","canonical_slug":"z-ai/glm-4.7-20251222","hugging_face_id":"zai-org/GLM-4.7","name":"Z.ai: GLM 4.7","created":1766378014,"description":"GLM-4.7 is Z.ai’s latest flagship model, featuring upgrades in two key areas: enhanced programming capabilities and more stable multi-step reasoning/execution. It demonstrates significant improvements in executing complex agent tasks while...","context_length":202752,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000004","completion":"0.00000175","input_cache_read":"0.00000008"},"top_provider":{"context_length":202752,"max_completion_tokens":131072,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/z-ai/glm-4.7-20251222/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.4,"completion":1.75}},{"id":"minimax/minimax-m2.1","canonical_slug":"minimax/minimax-m2.1","hugging_face_id":"MiniMaxAI/MiniMax-M2.1","name":"MiniMax: MiniMax M2.1","created":1766454997,"description":"MiniMax-M2.1 is a lightweight, state-of-the-art large language model optimized for coding, agentic workflows, and modern application development. With only 10 billion activated parameters, it delivers a major jump in real-world...","context_length":196608,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000029","completion":"0.00000095","input_cache_read":"0.00000003"},"top_provider":{"context_length":196608,"max_completion_tokens":196608,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":1,"top_p":0.9,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/minimax/minimax-m2.1/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.29,"completion":0.95}},{"id":"bytedance-seed/seed-1.6-flash","canonical_slug":"bytedance-seed/seed-1.6-flash-20250625","hugging_face_id":"","name":"ByteDance Seed: Seed 1.6 Flash","created":1766505011,"description":"Seed 1.6 Flash is an ultra-fast multimodal deep thinking model by ByteDance Seed, supporting both text and visual understanding. It features a 256k context window and can generate outputs of...","context_length":262144,"architecture":{"modality":"text+image+video->text","input_modalities":["image","text","video"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.000000075","completion":"0.0000003"},"top_provider":{"context_length":262144,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/bytedance-seed/seed-1.6-flash-20250625/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.075,"completion":0.3}},{"id":"bytedance-seed/seed-1.6","canonical_slug":"bytedance-seed/seed-1.6-20250625","hugging_face_id":"","name":"ByteDance Seed: Seed 1.6","created":1766504997,"description":"Seed 1.6 is a general-purpose model released by the ByteDance Seed team. It incorporates multimodal capabilities and adaptive deep thinking with a 256K context window.","context_length":262144,"architecture":{"modality":"text+image+video->text","input_modalities":["image","text","video"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000025","completion":"0.000002"},"top_provider":{"context_length":262144,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/bytedance-seed/seed-1.6-20250625/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.25,"completion":2.0}},{"id":"test-model","object":"model","owned_by":"proxy","name":"test-model","type":"chat","pricing":{},"pricing_per_million":{"prompt":1.0,"completion":1.0}},{"id":"kwaipilot/kat-coder-pro","canonical_slug":"kwaipilot/kat-coder-pro-v1","hugging_face_id":"","name":"Kwaipilot: KAT-Coder-Pro V1","created":1762745912,"description":"KAT-Coder-Pro V1 is KwaiKAT's most advanced agentic coding model in the KAT-Coder series. Designed specifically for agentic coding tasks, it excels in real-world software engineering scenarios, achieving 73.4% solve rate on the SWE-Bench Verified benchmark. \n\nThe model has been optimized for tool-use capability, multi-turn interaction, instruction following, generalization, and comprehensive capabilities through a multi-stage training process, including mid-training, supervised fine-tuning (SFT), reinforcement fine-tuning (RFT), and scalable agentic RL.","context_length":256000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.000000207","completion":"0.000000828","input_cache_read":"0.0000000414"},"top_provider":{"context_length":256000,"max_completion_tokens":128000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"knowledge_cutoff":null,"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.207,"completion":0.828}},{"id":"allenai/olmo-3.1-32b-think","canonical_slug":"allenai/olmo-3.1-32b-think-20251215","hugging_face_id":"allenai/Olmo-3.1-32B-Think","name":"AllenAI: Olmo 3.1 32B Think","created":1765907719,"description":"Olmo 3.1 32B Think is a large-scale, 32-billion-parameter model designed for deep reasoning, complex multi-step logic, and advanced instruction following. Building on the Olmo 3 series, version 3.1 delivers refined reasoning behavior and stronger performance across demanding evaluations and nuanced conversational tasks. Developed by Ai2 under the Apache 2.0 license, Olmo 3.1 32B Think continues the Olmo initiative’s commitment to openness, providing full transparency across model weights, code, and training methodology.","context_length":65536,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000015","completion":"0.0000005"},"top_provider":{"context_length":65536,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"],"default_parameters":{"temperature":0.6,"top_p":0.95,"frequency_penalty":null},"knowledge_cutoff":null,"expiration_date":"2026-04-06","object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.15,"completion":0.5}},{"canonical_slug":"openai/gpt-oss-120b","hugging_face_id":"openai/gpt-oss-120b","created":1754414231,"description":"gpt-oss-120b is an open-weight, 117B-parameter Mixture-of-Experts (MoE) language model from OpenAI designed for high-reasoning, agentic, and general-purpose production use cases. It activates 5.1B parameters per forward pass and is optimized to run on a single H100 GPU with native MXFP4 quantization. The model supports configurable reasoning depth, full chain-of-thought access, and native tool use, including function calling, browsing, and structured output generation.","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"top_provider":{"context_length":131072,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","reasoning_effort","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"id":"openai/gpt-oss-120b:groq","object":"model","owned_by":"proxy","name":"OpenAI: gpt-oss-120b (groq)","type":"chat","pricing":{},"pricing_per_million":{"prompt":0.02,"completion":0.1}},{"id":"text-embedding-3-large","object":"model","owned_by":"proxy","name":"OpenAI: Text Embedding 3 Large","description":"text-embedding-3-large is OpenAI's most capable embedding model for both english and non-english tasks. Embeddings are a numerical representation of text that can be used to measure the relatedness between two pieces of text. Embeddings are useful for search, clustering, recommendations, anomaly detection, and classification tasks.","type":"embedding","pricing":{},"pricing_per_million":{"prompt":0.13,"completion":0.0}},{"id":"allenai/olmo-3.1-32b-instruct","canonical_slug":"allenai/olmo-3.1-32b-instruct-20251215","hugging_face_id":"allenai/Olmo-3.1-32B-Instruct","name":"AllenAI: Olmo 3.1 32B Instruct","created":1767728554,"description":"Olmo 3.1 32B Instruct is a large-scale, 32-billion-parameter instruction-tuned language model engineered for high-performance conversational AI, multi-turn dialogue, and practical instruction following. As part of the Olmo 3.1 family, this...","context_length":65536,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000002","completion":"0.0000006"},"top_provider":{"context_length":65536,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.6,"top_p":0.95,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":"2026-05-07","links":{"details":"/api/v1/models/allenai/olmo-3.1-32b-instruct-20251215/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.2,"completion":0.6}},{"id":"nex-agi/deepseek-v3.1-nex-n1","canonical_slug":"nex-agi/deepseek-v3.1-nex-n1","hugging_face_id":"nex-agi/DeepSeek-V3.1-Nex-N1","name":"Nex AGI: DeepSeek V3.1 Nex N1","created":1765204393,"description":"DeepSeek V3.1 Nex-N1 is the flagship release of the Nex-N1 series — a post-trained model designed to highlight agent autonomy, tool use, and real-world productivity. Nex-N1 demonstrates competitive performance across...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"DeepSeek","instruct_type":null},"pricing":{"prompt":"0.000000135","completion":"0.0000005"},"top_provider":{"context_length":131072,"max_completion_tokens":163840,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","response_format","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/nex-agi/deepseek-v3.1-nex-n1/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.135,"completion":0.5}},{"id":"allenai/molmo-2-8b","canonical_slug":"allenai/molmo-2-8b-20260109","hugging_face_id":"allenai/Molmo2-8B","name":"AllenAI: Molmo2 8B","created":1767996672,"description":"Molmo2-8B is an open vision-language model developed by the Allen Institute for AI (Ai2) as part of the Molmo2 family, supporting image, video, and multi-image understanding and grounding. It is based on Qwen3-8B and uses SigLIP 2 as its vision backbone, outperforming other open-weight, open-data models on short videos, counting, and captioning, while remaining competitive on long-video tasks.","context_length":36864,"architecture":{"modality":"text+image+video->text","input_modalities":["text","image","video"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000002","completion":"0.0000002"},"top_provider":{"context_length":36864,"max_completion_tokens":36864,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"expiration_date":"2026-03-23","object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.2,"completion":0.2}},{"id":"allenai/molmo-2-8b:free","canonical_slug":"allenai/molmo-2-8b-20260109","hugging_face_id":"allenai/Molmo2-8B","name":"AllenAI: Molmo2 8B (free)","created":1767996672,"description":"Molmo2-8B is an open vision-language model developed by the Allen Institute for AI (Ai2) as part of the Molmo2 family, supporting image, video, and multi-image understanding and grounding. It is based on Qwen3-8B and uses SigLIP 2 as its vision backbone, outperforming other open-weight, open-data models on short videos, counting, and captioning, while remaining competitive on long-video tasks.","context_length":36864,"architecture":{"modality":"text+image+video->text","input_modalities":["text","image","video"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":36864,"max_completion_tokens":36864,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","temperature","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"openai/gpt-5.2-codex","canonical_slug":"openai/gpt-5.2-codex-20260114","hugging_face_id":"","name":"OpenAI: GPT-5.2-Codex","created":1768409315,"description":"GPT-5.2-Codex is an upgraded version of GPT-5.1-Codex optimized for software engineering and coding workflows. It is designed for both interactive development sessions and long, independent execution of complex engineering tasks....","context_length":400000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00000175","completion":"0.000014","input_cache_read":"0.000000175"},"top_provider":{"context_length":400000,"max_completion_tokens":128000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5.2-codex-20260114/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.75,"completion":14.0}},{"id":"xiaomi/mimo-v2-flash","canonical_slug":"xiaomi/mimo-v2-flash-20251210","hugging_face_id":"XiaomiMiMo/MiMo-V2-Flash","name":"Xiaomi: MiMo-V2-Flash","created":1765731308,"description":"MiMo-V2-Flash is an open-source foundation language model developed by Xiaomi. It is a Mixture-of-Experts model with 309B total parameters and 15B active parameters, adopting hybrid attention architecture. MiMo-V2-Flash supports a...","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000001","completion":"0.0000003","input_cache_read":"0.00000001"},"top_provider":{"context_length":262144,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":0.95,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/xiaomi/mimo-v2-flash-20251210/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.1,"completion":0.3}},{"id":"qwen/qwen3-next-80b-a3b-instruct:free","canonical_slug":"qwen/qwen3-next-80b-a3b-instruct-2509","hugging_face_id":"Qwen/Qwen3-Next-80B-A3B-Instruct","name":"Qwen: Qwen3 Next 80B A3B Instruct (free)","created":1757612213,"description":"Qwen3-Next-80B-A3B-Instruct is an instruction-tuned chat model in the Qwen3-Next series optimized for fast, stable responses without “thinking” traces. It targets complex tasks across reasoning, code generation, knowledge QA, and multilingual...","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":262144,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":"2025-09-30","expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-next-80b-a3b-instruct-2509/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"z-ai/glm-4.7-flash","canonical_slug":"z-ai/glm-4.7-flash-20260119","hugging_face_id":"zai-org/GLM-4.7-Flash","name":"Z.ai: GLM 4.7 Flash","created":1768833913,"description":"As a 30B-class SOTA model, GLM-4.7-Flash offers a new option that balances performance and efficiency. It is further optimized for agentic coding use cases, strengthening coding capabilities, long-horizon task planning,...","context_length":202752,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000006","completion":"0.0000004","input_cache_read":"0.00000001"},"top_provider":{"context_length":202752,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/z-ai/glm-4.7-flash-20260119/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.06,"completion":0.4}},{"id":"openai/gpt-audio-mini","canonical_slug":"openai/gpt-audio-mini","hugging_face_id":"","name":"OpenAI: GPT Audio Mini","created":1768859419,"description":"A cost-efficient version of GPT Audio. The new snapshot features an upgraded decoder for more natural sounding voices and maintains better voice consistency. Input is priced at $0.60 per million...","context_length":128000,"architecture":{"modality":"text+audio->text+audio","input_modalities":["text","audio"],"output_modalities":["text","audio"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.0000006","completion":"0.0000024","audio":"0.0000006"},"top_provider":{"context_length":128000,"max_completion_tokens":16384,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-audio-mini/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.6,"completion":2.4}},{"id":"openai/gpt-audio","canonical_slug":"openai/gpt-audio","hugging_face_id":"","name":"OpenAI: GPT Audio","created":1768862569,"description":"The gpt-audio model is OpenAI's first generally available audio model. The new snapshot features an upgraded decoder for more natural sounding voices and maintains better voice consistency. Audio is priced...","context_length":128000,"architecture":{"modality":"text+audio->text+audio","input_modalities":["text","audio"],"output_modalities":["text","audio"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.0000025","completion":"0.00001","audio":"0.000032"},"top_provider":{"context_length":128000,"max_completion_tokens":16384,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-audio/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.5,"completion":10.0}},{"id":"liquid/lfm-2.5-1.2b-thinking:free","canonical_slug":"liquid/lfm-2.5-1.2b-thinking-20260120","hugging_face_id":"LiquidAI/LFM2.5-1.2B-Thinking","name":"LiquidAI: LFM2.5-1.2B-Thinking (free)","created":1768927527,"description":"LFM2.5-1.2B-Thinking is a lightweight reasoning-focused model optimized for agentic tasks, data extraction, and RAG—while still running comfortably on edge devices. It supports long context (up to 32K tokens) and is...","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":32768,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/liquid/lfm-2.5-1.2b-thinking-20260120/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"liquid/lfm-2.5-1.2b-instruct:free","canonical_slug":"liquid/lfm-2.5-1.2b-instruct-20260120","hugging_face_id":"LiquidAI/LFM2.5-1.2B-Instruct","name":"LiquidAI: LFM2.5-1.2B-Instruct (free)","created":1768927521,"description":"LFM2.5-1.2B-Instruct is a compact, high-performance instruction-tuned model built for fast on-device AI. It delivers strong chat quality in a 1.2B parameter footprint, with efficient edge inference and broad runtime support.","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":32768,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","min_p","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/liquid/lfm-2.5-1.2b-instruct-20260120/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"writer/palmyra-x5","canonical_slug":"writer/palmyra-x5-20250428","hugging_face_id":"","name":"Writer: Palmyra X5","created":1769003823,"description":"Palmyra X5 is Writer's most advanced model, purpose-built for building and scaling AI agents across the enterprise. It delivers industry-leading speed and efficiency on context windows up to 1 million...","context_length":1040000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000006","completion":"0.000006"},"top_provider":{"context_length":1040000,"max_completion_tokens":8192,"is_moderated":true},"per_request_limits":null,"supported_parameters":["max_tokens","stop","temperature","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/writer/palmyra-x5-20250428/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.6,"completion":6.0}},{"id":"minimax/minimax-m2-her","canonical_slug":"minimax/minimax-m2-her-20260123","hugging_face_id":"","name":"MiniMax: MiniMax M2-her","created":1769177239,"description":"MiniMax M2-her is a dialogue-first large language model built for immersive roleplay, character-driven chat, and expressive multi-turn conversations. Designed to stay consistent in tone and personality, it supports rich message...","context_length":65536,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000003","completion":"0.0000012","input_cache_read":"0.00000003"},"top_provider":{"context_length":65536,"max_completion_tokens":2048,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","temperature","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/minimax/minimax-m2-her-20260123/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.3,"completion":1.2}},{"id":"upstage/solar-pro-3:free","canonical_slug":"upstage/solar-pro-3","hugging_face_id":"","name":"Upstage: Solar Pro 3 (free)","created":1769481200,"description":"Solar Pro 3 is Upstage's powerful Mixture-of-Experts (MoE) language model. With 102B total parameters and 12B active parameters per forward pass, it delivers exceptional performance while maintaining computational efficiency. Optimized for Korean with English and Japanese support.","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":128000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","structured_outputs","temperature","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"expiration_date":"2026-03-02","object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"moonshotai/kimi-k2.5","canonical_slug":"moonshotai/kimi-k2.5-0127","hugging_face_id":"moonshotai/Kimi-K2.5","name":"MoonshotAI: Kimi K2.5","created":1769487076,"description":"Kimi K2.5 is Moonshot AI's native multimodal model, delivering state-of-the-art visual coding capability and a self-directed agent swarm paradigm. Built on Kimi K2 with continued pretraining over approximately 15T mixed visual and text tokens, it delivers strong performance in general reasoning, visual coding, and agentic tool-calling.","context_length":262144,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000004","completion":"0.0000019","input_cache_read":"0.00000009"},"top_provider":{"context_length":262144,"max_completion_tokens":262144,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/moonshotai/kimi-k2.5-0127/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.6,"completion":3.0}},{"id":"arcee-ai/trinity-large-preview:free","canonical_slug":"arcee-ai/trinity-large-preview","hugging_face_id":"arcee-ai/Trinity-Large-Preview","name":"Arcee AI: Trinity Large Preview (free)","created":1769552670,"description":"Trinity-Large-Preview is a frontier-scale open-weight language model from Arcee, built as a 400B-parameter sparse Mixture-of-Experts with 13B active parameters per token using 4-of-256 expert routing. It excels in creative writing,...","context_length":131000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":131000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","response_format","structured_outputs","temperature","tools","top_k","top_p"],"default_parameters":{"temperature":0.8,"top_p":0.8,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"knowledge_cutoff":null,"expiration_date":"2026-04-22","links":{"details":"/api/v1/models/arcee-ai/trinity-large-preview/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"openrouter/free","canonical_slug":"openrouter/free","hugging_face_id":"","name":"Free Models Router","created":1769917427,"description":"The simplest way to get free inference. openrouter/free is a router that selects free models at random from the models available on OpenRouter. The router smartly filters for models that...","context_length":200000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Router","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":null,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openrouter/free/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"stepfun/step-3.5-flash:free","canonical_slug":"stepfun/step-3.5-flash","hugging_face_id":"stepfun-ai/Step-3.5-Flash","name":"StepFun: Step 3.5 Flash (free)","created":1769728337,"description":"Step 3.5 Flash is StepFun's most capable open-source foundation model. Built on a sparse Mixture of Experts (MoE) architecture, it selectively activates only 11B of its 196B parameters per token. It is a reasoning model that is incredibly speed efficient even at long contexts.","context_length":256000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":256000,"max_completion_tokens":256000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","reasoning","stop","temperature","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"knowledge_cutoff":null,"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"qwen/qwen3-coder-next","canonical_slug":"qwen/qwen3-coder-next-2025-02-03","hugging_face_id":"Qwen/Qwen3-Coder-Next","name":"Qwen: Qwen3 Coder Next","created":1770164101,"description":"Qwen3-Coder-Next is an open-weight causal language model optimized for coding agents and local development workflows. It uses a sparse MoE design with 80B total parameters and only 3B activated per...","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":null},"pricing":{"prompt":"0.00000011","completion":"0.0000008","input_cache_read":"0.00000007"},"top_provider":{"context_length":262144,"max_completion_tokens":262144,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-coder-next-2025-02-03/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.11,"completion":0.8}},{"id":"anthropic/claude-opus-4.6","canonical_slug":"anthropic/claude-4.6-opus-20260205","hugging_face_id":"","name":"Anthropic: Claude Opus 4.6","created":1770219050,"description":"Opus 4.6 is Anthropic’s strongest model for coding and long-running professional tasks. It is built for agents that operate across entire workflows rather than single prompts, making it especially effective for large codebases, complex refactors, and multi-step debugging that unfolds over time. The model shows deeper contextual understanding, stronger problem decomposition, and greater reliability on hard engineering tasks than prior generations.\r\n\r\nBeyond coding, Opus 4.6 excels at sustained knowledge work. It produces near-production-ready documents, plans, and analyses in a single pass, and maintains coherence across very long outputs and extended sessions. This makes it a strong default for tasks that require persistence, judgment, and follow-through, such as technical design, migration planning, and end-to-end project execution.\r\n\r\nFor users upgrading from earlier Opus versions, see our [official migration guide here](https://openrouter.ai/docs/guides/guides/model-migrations/claude-4-6-opus)","context_length":1000000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"Claude","instruct_type":null},"pricing":{"prompt":"0.000005","completion":"0.000025","web_search":"0.01","input_cache_read":"0.0000005","input_cache_write":"0.00000625"},"top_provider":{"context_length":1000000,"max_completion_tokens":128000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p","verbosity"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/anthropic/claude-4.6-opus-20260205/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":5.0,"completion":25.0}},{"id":"openrouter/pony-alpha","canonical_slug":"openrouter/pony-alpha","hugging_face_id":"","name":"Pony Alpha","created":1770393855,"description":"Pony is a cutting-edge foundation model with strong performance in coding, agentic workflows, reasoning, and roleplay, making it well suited for hands-on coding and real-world use.\n\n**Note:** All prompts and completions for this model are logged by the provider and may be used to improve the model.","context_length":200000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":200000,"max_completion_tokens":131000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","structured_outputs","temperature","tools","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"frequency_penalty":null},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"openrouter/aurora-alpha","canonical_slug":"openrouter/aurora-alpha","hugging_face_id":"","name":"Aurora Alpha","created":1770611225,"description":"This is a cloaked model provided to the community to gather feedback. A reasoning model designed for speed. It is built for coding assistants, real-time conversational applications, and agentic workflows.\n\nDefault reasoning effort is set to medium for fast responses. For agentic coding use cases, we recommend changing effort to high. \n\nNote: All prompts and completions for this model are logged by the provider and may be used to improve the model.","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":128000,"max_completion_tokens":50000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","reasoning_effort","response_format","structured_outputs","temperature","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"expiration_date":"2026-02-19","object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"qwen/qwen3-max-thinking","canonical_slug":"qwen/qwen3-max-thinking-20260123","hugging_face_id":null,"name":"Qwen: Qwen3 Max Thinking","created":1770671901,"description":"Qwen3-Max-Thinking is the flagship reasoning model in the Qwen3 series, designed for high-stakes cognitive tasks that require deep, multi-step reasoning. By significantly scaling model capacity and reinforcement learning compute, it...","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":null},"pricing":{"prompt":"0.00000078","completion":"0.0000039"},"top_provider":{"context_length":262144,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3-max-thinking-20260123/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.78,"completion":3.9}},{"id":"lodestones/Chroma1-HD","object":"model","owned_by":"proxy","name":"lodestones/Chroma1-HD","description":"Chroma1-HD is an 8.9B parameter text-to-image foundational model based on FLUX.1-schnell. It is fully Apache 2.0 licensed, ensuring that anyone can use, modify, and build upon it.\r\n\r\nAs a base model, Chroma1 is intentionally designed to be an excellent starting point for finetuning. It provides a strong, neutral foundation for developers, researchers, and artists to create specialized models.","type":"image","pricing":{},"pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"stepfun/step-3.5-flash","canonical_slug":"stepfun/step-3.5-flash","hugging_face_id":"stepfun-ai/Step-3.5-Flash","name":"StepFun: Step 3.5 Flash","created":1769728337,"description":"Step 3.5 Flash is StepFun's most capable open-source foundation model. Built on a sparse Mixture of Experts (MoE) architecture, it selectively activates only 11B of its 196B parameters per token....","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000001","completion":"0.0000003"},"top_provider":{"context_length":262144,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/stepfun/step-3.5-flash/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.1,"completion":0.3}},{"id":"z-ai/glm-5","canonical_slug":"z-ai/glm-5-20260211","hugging_face_id":"zai-org/GLM-5","name":"Z.ai: GLM 5","created":1770829182,"description":"GLM-5 is Z.ai’s flagship open-source foundation model engineered for complex systems design and long-horizon agent workflows. Built for expert developers, it delivers production-grade performance on large-scale programming tasks, rivaling leading closed-source models. With advanced agentic planning, deep backend reasoning, and iterative self-correction, GLM-5 moves beyond code generation to full-system construction and autonomous execution.","context_length":202752,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000006","completion":"0.00000192","input_cache_read":"0.00000012"},"top_provider":{"context_length":202752,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/z-ai/glm-5-20260211/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.0,"completion":3.2}},{"id":"minimax/minimax-m2.5","canonical_slug":"minimax/minimax-m2.5-20260211","hugging_face_id":"MiniMaxAI/MiniMax-M2.5","name":"MiniMax: MiniMax M2.5","created":1770908502,"description":"MiniMax-M2.5 is a SOTA large language model designed for real-world productivity. Trained in a diverse range of complex real-world digital working environments, M2.5 builds upon the coding expertise of M2.1...","context_length":196608,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000015","completion":"0.00000115"},"top_provider":{"context_length":196608,"max_completion_tokens":196608,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","parallel_tool_calls","presence_penalty","reasoning","reasoning_effort","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/minimax/minimax-m2.5-20260211/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.15,"completion":1.15}},{"id":"qwen/qwen3-4b","canonical_slug":"qwen/qwen3-4b-04-28","hugging_face_id":"Qwen/Qwen3-4B","name":"Qwen: Qwen3 4B","created":1746031104,"description":"Qwen3-4B is a 4 billion parameter dense language model from the Qwen3 series, designed to support both general-purpose and reasoning-intensive tasks. It introduces a dual-mode architecture—thinking and non-thinking—allowing dynamic switching between high-precision logical reasoning and efficient dialogue generation. This makes it well-suited for multi-turn chat, instruction following, and complex agent workflows.","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":"qwen3"},"pricing":{"prompt":"0.0000000715","completion":"0.000000273"},"top_provider":{"context_length":131072,"max_completion_tokens":8192,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","temperature","tool_choice","tools","top_p"],"default_parameters":{},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0715,"completion":0.273}},{"id":"qwen/qwen3.5-plus-02-15","canonical_slug":"qwen/qwen3.5-plus-20260216","hugging_face_id":"","name":"Qwen: Qwen3.5 Plus 2026-02-15","created":1771229416,"description":"The Qwen3.5 native vision-language series Plus models are built on a hybrid architecture that integrates linear attention mechanisms with sparse mixture-of-experts models, achieving higher inference efficiency. In a variety of...","context_length":1000000,"architecture":{"modality":"text+image+video->text","input_modalities":["text","image","video"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.00000026","completion":"0.00000156","input_cache_write":"0.000000325"},"top_provider":{"context_length":1000000,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3.5-plus-20260216/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.26,"completion":1.56}},{"id":"qwen/qwen3.5-397b-a17b","canonical_slug":"qwen/qwen3.5-397b-a17b-20260216","hugging_face_id":"Qwen/Qwen3.5-397B-A17B","name":"Qwen: Qwen3.5 397B A17B","created":1771223018,"description":"The Qwen3.5 series 397B-A17B native vision-language model is built on a hybrid architecture that integrates a linear attention mechanism with a sparse mixture-of-experts model, achieving higher inference efficiency. It delivers...","context_length":262144,"architecture":{"modality":"text+image+video->text","input_modalities":["text","image","video"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.00000039","completion":"0.00000234"},"top_provider":{"context_length":262144,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{"temperature":0.6,"top_p":0.95,"top_k":20,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3.5-397b-a17b-20260216/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.39,"completion":2.34}},{"id":"anthropic/claude-sonnet-4.6","canonical_slug":"anthropic/claude-4.6-sonnet-20260217","hugging_face_id":"","name":"Anthropic: Claude Sonnet 4.6","created":1771342990,"description":"Sonnet 4.6 is Anthropic's most capable Sonnet-class model yet, with frontier performance across coding, agents, and professional work. It excels at iterative development, complex codebase navigation, end-to-end project management with memory, polished document creation, and confident computer use for web QA and workflow automation.","context_length":1000000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"Claude","instruct_type":null},"pricing":{"prompt":"0.000003","completion":"0.000015","web_search":"0.01","input_cache_read":"0.0000003","input_cache_write":"0.00000375"},"top_provider":{"context_length":1000000,"max_completion_tokens":128000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p","verbosity"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/anthropic/claude-4.6-sonnet-20260217/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":3.0,"completion":15.0}},{"id":"google/gemini-3.1-pro-preview","canonical_slug":"google/gemini-3.1-pro-preview-20260219","hugging_face_id":"","name":"Google: Gemini 3.1 Pro Preview","created":1771509627,"description":"Gemini 3.1 Pro Preview is Google’s frontier reasoning model, delivering enhanced software engineering performance, improved agentic reliability, and more efficient token usage across complex workflows. Building on the multimodal foundation of the Gemini 3 series, it combines high-precision reasoning across text, image, video, audio, and code with a 1M-token context window. Reasoning Details must be preserved when using multi-turn tool calling, see our docs here: https://openrouter.ai/docs/use-cases/reasoning-tokens#preserving-reasoning. The 3.1 update introduces measurable gains in SWE benchmarks and real-world coding environments, along with stronger autonomous task execution in structured domains such as finance and spreadsheet-based workflows.\r\n\r\nDesigned for advanced development and agentic systems, Gemini 3.1 Pro Preview improves long-horizon stability and tool orchestration while increasing token efficiency. It introduces a new medium thinking level to better balance cost, speed, and performance. The model excels in agentic coding, structured planning, multimodal analysis, and workflow automation, making it well-suited for autonomous agents, financial modeling, spreadsheet automation, and high-context enterprise tasks.","context_length":1048576,"architecture":{"modality":"text+image+file+audio+video->text","input_modalities":["audio","file","image","text","video"],"output_modalities":["text"],"tokenizer":"Gemini","instruct_type":null},"pricing":{"prompt":"0.000002","completion":"0.000012","image":"0.000002","audio":"0.000002","web_search":"0.014","internal_reasoning":"0.000012","input_cache_read":"0.0000002","input_cache_write":"0.000000375"},"top_provider":{"context_length":1048576,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/google/gemini-3.1-pro-preview-20260219/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.0,"completion":12.0}},{"id":"openai/gpt-5.3-codex","canonical_slug":"openai/gpt-5.3-codex-20260224","hugging_face_id":"","name":"OpenAI: GPT-5.3-Codex","created":1771959164,"description":"GPT-5.3-Codex is OpenAI’s most advanced agentic coding model, combining the frontier software engineering performance of GPT-5.2-Codex with the broader reasoning and professional knowledge capabilities of GPT-5.2. It achieves state-of-the-art results on SWE-Bench Pro and strong performance on Terminal-Bench 2.0 and OSWorld-Verified, reflecting improved multi-language coding, terminal proficiency, and real-world computer-use skills. The model is optimized for long-running, tool-using workflows and supports interactive steering during execution, making it suitable for complex development tasks, debugging, deployment, and iterative product work.\r\n\r\nBeyond coding, GPT-5.3-Codex performs strongly on structured knowledge-work benchmarks such as GDPval, supporting tasks like document drafting, spreadsheet analysis, slide creation, and operational research across domains. It is trained with enhanced cybersecurity awareness, including vulnerability identification capabilities, and deployed with additional safeguards for high-risk use cases. Compared to prior Codex models, it is more token-efficient and approximately 25% faster, targeting professional end-to-end workflows that span reasoning, execution, and computer interaction.","context_length":400000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00000175","completion":"0.000014","web_search":"0.01","input_cache_read":"0.000000175"},"top_provider":{"context_length":400000,"max_completion_tokens":128000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5.3-codex-20260224/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.75,"completion":14.0}},{"id":"aion-labs/aion-2.0","canonical_slug":"aion-labs/aion-2.0-20260223","hugging_face_id":null,"name":"AionLabs: Aion-2.0","created":1771881306,"description":"Aion-2.0 is a variant of DeepSeek V3.2 optimized for immersive roleplaying and storytelling. It is particularly strong at introducing tension, crises, and conflict into stories, making narratives feel more engaging....","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000008","completion":"0.0000016","input_cache_read":"0.0000002"},"top_provider":{"context_length":131072,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","temperature","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/aion-labs/aion-2.0-20260223/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.8,"completion":1.6}},{"id":"google/gemini-3.1-pro-preview-customtools","canonical_slug":"google/gemini-3.1-pro-preview-customtools-20260219","hugging_face_id":null,"name":"Google: Gemini 3.1 Pro Preview Custom Tools","created":1772045923,"description":"Gemini 3.1 Pro Preview Custom Tools is a variant of Gemini 3.1 Pro that improves tool selection behavior by preventing overuse of a general bash tool when more efficient third-party...","context_length":1048576,"architecture":{"modality":"text+image+file+audio+video->text","input_modalities":["text","audio","image","video","file"],"output_modalities":["text"],"tokenizer":"Gemini","instruct_type":null},"pricing":{"prompt":"0.000002","completion":"0.000012","image":"0.000002","audio":"0.000002","web_search":"0.014","internal_reasoning":"0.000012","input_cache_read":"0.0000002","input_cache_write":"0.000000375"},"top_provider":{"context_length":1048576,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/google/gemini-3.1-pro-preview-customtools-20260219/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.0,"completion":12.0}},{"id":"liquid/lfm-2-24b-a2b","canonical_slug":"liquid/lfm-2-24b-a2b-20260224","hugging_face_id":"LiquidAI/LFM2-24B-A2B","name":"LiquidAI: LFM2-24B-A2B","created":1772048711,"description":"LFM2-24B-A2B is the largest model in the LFM2 family of hybrid architectures designed for efficient on-device deployment. Built as a 24B parameter Mixture-of-Experts model with only 2B active parameters per...","context_length":32768,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000003","completion":"0.00000012"},"top_provider":{"context_length":32768,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","stop","temperature","top_k","top_p"],"default_parameters":{"temperature":0.1,"top_p":null,"top_k":50,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":1.05},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/liquid/lfm-2-24b-a2b-20260224/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.03,"completion":0.12}},{"id":"qwen/qwen3.5-35b-a3b","canonical_slug":"qwen/qwen3.5-35b-a3b-20260224","hugging_face_id":"Qwen/Qwen3.5-35B-A3B","name":"Qwen: Qwen3.5-35B-A3B","created":1772053822,"description":"The Qwen3.5 Series 35B-A3B is a native vision-language model designed with a hybrid architecture that integrates linear attention mechanisms and a sparse mixture-of-experts model, achieving higher inference efficiency. Its overall...","context_length":262144,"architecture":{"modality":"text+image+video->text","input_modalities":["text","image","video"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.00000014","completion":"0.000001","input_cache_read":"0.00000005"},"top_provider":{"context_length":262144,"max_completion_tokens":81920,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"top_k":20,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3.5-35b-a3b-20260224/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.14,"completion":1.0}},{"id":"qwen/qwen3.5-27b","canonical_slug":"qwen/qwen3.5-27b-20260224","hugging_face_id":"Qwen/Qwen3.5-27B","name":"Qwen: Qwen3.5-27B","created":1772053810,"description":"The Qwen3.5 27B native vision-language Dense model incorporates a linear attention mechanism, delivering fast response times while balancing inference speed and performance. Its overall capabilities are comparable to those of...","context_length":262144,"architecture":{"modality":"text+image+video->text","input_modalities":["text","image","video"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.000000195","completion":"0.00000156"},"top_provider":{"context_length":262144,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{"temperature":0.6,"top_p":0.95,"top_k":20,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3.5-27b-20260224/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.195,"completion":1.56}},{"id":"qwen/qwen3.5-122b-a10b","canonical_slug":"qwen/qwen3.5-122b-a10b-20260224","hugging_face_id":"Qwen/Qwen3.5-122B-A10B","name":"Qwen: Qwen3.5-122B-A10B","created":1772053789,"description":"The Qwen3.5 122B-A10B native vision-language model is built on a hybrid architecture that integrates a linear attention mechanism with a sparse mixture-of-experts model, achieving higher inference efficiency. In terms of...","context_length":262144,"architecture":{"modality":"text+image+video->text","input_modalities":["text","image","video"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.00000026","completion":"0.00000208"},"top_provider":{"context_length":262144,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{"temperature":0.6,"top_p":0.95,"top_k":20,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3.5-122b-a10b-20260224/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.26,"completion":2.08}},{"id":"qwen/qwen3.5-flash-02-23","canonical_slug":"qwen/qwen3.5-flash-20260224","hugging_face_id":null,"name":"Qwen: Qwen3.5-Flash","created":1772053776,"description":"The Qwen3.5 native vision-language Flash models are built on a hybrid architecture that integrates a linear attention mechanism with a sparse mixture-of-experts model, achieving higher inference efficiency. Compared to the...","context_length":1000000,"architecture":{"modality":"text+image+video->text","input_modalities":["text","image","video"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.000000065","completion":"0.00000026","input_cache_write":"0.00000008125"},"top_provider":{"context_length":1000000,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3.5-flash-20260224/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.065,"completion":0.26}},{"id":"google/gemini-3.1-flash-image-preview","canonical_slug":"google/gemini-3.1-flash-image-preview-20260226","hugging_face_id":"","name":"Google: Nano Banana 2 (Gemini 3.1 Flash Image Preview)","created":1772119558,"description":"Gemini 3.1 Flash Image Preview, a.k.a. \"Nano Banana 2,\" is Google’s latest state of the art image generation and editing model, delivering Pro-level visual quality at Flash speed. It combines advanced contextual understanding with fast, cost-efficient inference, making complex image generation and iterative edits significantly more accessible. Aspect ratios can be controlled with the [image_config API Parameter](https://openrouter.ai/docs/features/multimodal/image-generation#image-aspect-ratio-configuration)","context_length":65536,"architecture":{"modality":"text+image->text+image","input_modalities":["image","text"],"output_modalities":["image","text"],"tokenizer":"Gemini","instruct_type":null},"pricing":{"prompt":"0.0000005","completion":"0.000003","web_search":"0.014"},"top_provider":{"context_length":65536,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/google/gemini-3.1-flash-image-preview-20260226/endpoints"},"object":"model","owned_by":"proxy","type":"image","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"bytedance-seed/seed-2.0-mini","canonical_slug":"bytedance-seed/seed-2.0-mini-20260224","hugging_face_id":"","name":"ByteDance Seed: Seed-2.0-Mini","created":1772131107,"description":"Seed-2.0-mini targets latency-sensitive, high-concurrency, and cost-sensitive scenarios, emphasizing fast response and flexible inference deployment. It delivers performance comparable to ByteDance-Seed-1.6, supports 256k context, four reasoning effort modes (minimal/low/medium/high), multimodal understanding,...","context_length":262144,"architecture":{"modality":"text+image+video->text","input_modalities":["text","image","video"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000001","completion":"0.0000004"},"top_provider":{"context_length":262144,"max_completion_tokens":131072,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"frequency_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/bytedance-seed/seed-2.0-mini-20260224/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.1,"completion":0.4}},{"id":"upstage/solar-pro-3","canonical_slug":"upstage/solar-pro-3","hugging_face_id":"","name":"Upstage: Solar Pro 3","created":1769481200,"description":"Solar Pro 3 is Upstage's powerful Mixture-of-Experts (MoE) language model. With 102B total parameters and 12B active parameters per forward pass, it delivers exceptional performance while maintaining computational efficiency. Optimized...","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000015","completion":"0.0000006","input_cache_read":"0.000000015"},"top_provider":{"context_length":128000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","structured_outputs","temperature","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/upstage/solar-pro-3/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.15,"completion":0.6}},{"id":"google/gemini-3.1-flash-lite-preview","canonical_slug":"google/gemini-3.1-flash-lite-preview-20260303","hugging_face_id":"","name":"Google: Gemini 3.1 Flash Lite Preview","created":1772512673,"description":"Gemini 3.1 Flash Lite Preview is Google's high-efficiency model optimized for high-volume use cases. It outperforms Gemini 2.5 Flash Lite on overall quality and approaches Gemini 2.5 Flash performance across...","context_length":1048576,"architecture":{"modality":"text+image+file+audio+video->text","input_modalities":["text","image","video","file","audio"],"output_modalities":["text"],"tokenizer":"Gemini","instruct_type":null},"pricing":{"prompt":"0.00000025","completion":"0.0000015","image":"0.00000025","audio":"0.0000005","web_search":"0.014","internal_reasoning":"0.0000015","input_cache_read":"0.000000025","input_cache_write":"0.00000008333333333333334"},"top_provider":{"context_length":1048576,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/google/gemini-3.1-flash-lite-preview-20260303/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.25,"completion":1.5}},{"id":"openai/gpt-5.3-chat","canonical_slug":"openai/gpt-5.3-chat-20260303","hugging_face_id":"","name":"OpenAI: GPT-5.3 Chat","created":1772564061,"description":"GPT-5.3 Chat is an update to ChatGPT's most-used model that makes everyday conversations smoother, more useful, and more directly helpful. It delivers more accurate answers with better contextualization and significantly...","context_length":128000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00000175","completion":"0.000014","web_search":"0.01","input_cache_read":"0.000000175"},"top_provider":{"context_length":128000,"max_completion_tokens":16384,"is_moderated":true},"per_request_limits":null,"supported_parameters":["max_completion_tokens","max_tokens","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5.3-chat-20260303/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.75,"completion":14.0}},{"id":"inception/mercury-2","canonical_slug":"inception/mercury-2-20260304","hugging_face_id":null,"name":"Inception: Mercury 2","created":1772636275,"description":"Mercury 2 is an extremely fast reasoning LLM, and the first reasoning diffusion LLM (dLLM). Instead of generating tokens sequentially, Mercury 2 produces and refines multiple tokens in parallel, achieving...","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000025","completion":"0.00000075","input_cache_read":"0.000000025"},"top_provider":{"context_length":128000,"max_completion_tokens":50000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools"],"default_parameters":{"temperature":0.75,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/inception/mercury-2-20260304/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.25,"completion":0.75}},{"id":"openai/gpt-5.4-pro","canonical_slug":"openai/gpt-5.4-pro-20260305","hugging_face_id":"","name":"OpenAI: GPT-5.4 Pro","created":1772734366,"description":"GPT-5.4 Pro is OpenAI's most advanced model, building on GPT-5.4's unified architecture with enhanced reasoning capabilities for complex, high-stakes tasks. It features a 1M+ token context window (922K input, 128K...","context_length":1050000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00003","completion":"0.00018","web_search":"0.01"},"top_provider":{"context_length":1050000,"max_completion_tokens":128000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5.4-pro-20260305/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":30.0,"completion":180.0}},{"id":"openai/gpt-5.4","canonical_slug":"openai/gpt-5.4-20260305","hugging_face_id":"","name":"OpenAI: GPT-5.4","created":1772734352,"description":"GPT-5.4 is OpenAI’s latest frontier model, unifying the Codex and GPT lines into a single system. It features a 1M+ token context window (922K input, 128K output) with support for...","context_length":1050000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.0000025","completion":"0.000015","web_search":"0.01","input_cache_read":"0.00000025"},"top_provider":{"context_length":1050000,"max_completion_tokens":128000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5.4-20260305/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.5,"completion":15.0}},{"id":"qwen/qwen3.5-9b","canonical_slug":"qwen/qwen3.5-9b-20260310","hugging_face_id":"Qwen/Qwen3.5-9B","name":"Qwen: Qwen3.5-9B","created":1773152396,"description":"Qwen3.5-9B is a multimodal foundation model from the Qwen3.5 family, designed to deliver strong reasoning, coding, and visual understanding in an efficient 9B-parameter architecture. It uses a unified vision-language design...","context_length":262144,"architecture":{"modality":"text+image+video->text","input_modalities":["text","image","video"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.00000004","completion":"0.00000015"},"top_provider":{"context_length":262144,"max_completion_tokens":81920,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3.5-9b-20260310/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.04,"completion":0.15}},{"id":"bytedance-seed/seed-2.0-lite","canonical_slug":"bytedance-seed/seed-2.0-lite-20260309","hugging_face_id":null,"name":"ByteDance Seed: Seed-2.0-Lite","created":1773157231,"description":"Seed-2.0-Lite is a versatile, cost‑efficient enterprise workhorse that delivers strong multimodal and agent capabilities while offering noticeably lower latency, making it a practical default choice for most production workloads across...","context_length":262144,"architecture":{"modality":"text+image+video->text","input_modalities":["text","image","video"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000025","completion":"0.000002"},"top_provider":{"context_length":262144,"max_completion_tokens":131072,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/bytedance-seed/seed-2.0-lite-20260309/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.25,"completion":2.0}},{"id":"nvidia/nemotron-3-super-120b-a12b:free","canonical_slug":"nvidia/nemotron-3-super-120b-a12b-20230311","hugging_face_id":"nvidia/NVIDIA-Nemotron-3-Super-120B-A12B-FP8","name":"NVIDIA: Nemotron 3 Super (free)","created":1773245239,"description":"NVIDIA Nemotron 3 Super is a 120B-parameter open hybrid MoE model, activating just 12B parameters for maximum compute efficiency and accuracy in complex multi-agent applications. Built on a hybrid Mamba-Transformer...","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":262144,"max_completion_tokens":262144,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/nvidia/nemotron-3-super-120b-a12b-20230311/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"openrouter/hunter-alpha","canonical_slug":"openrouter/hunter-alpha","hugging_face_id":"","name":"Hunter Alpha","created":1773260671,"description":"Hunter Alpha is a 1 Trillion parameter + 1M token context frontier intelligence model built for agentic use. It excels at long-horizon planning, complex reasoning, and sustained multi-step task execution, with the reliability and instruction-following precision that frameworks like OpenClaw need.\n\n**Note:** All prompts and completions for this model are logged by the provider and may be used to improve the model.","context_length":1048576,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":1048576,"max_completion_tokens":32000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","response_format","stop","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"openrouter/healer-alpha","canonical_slug":"openrouter/healer-alpha","hugging_face_id":"","name":"Healer Alpha","created":1773260668,"description":"Healer Alpha is a frontier omni-modal model with vision, hearing, reasoning, and action capabilities. It brings the full power of agentic intelligence into the real world: natively perceiving visual and audio inputs, reasoning across modalities, and executing complex multi-step tasks with precision and reliability.\n\n**Note:** All prompts and completions for this model are logged by the provider and may be used to improve the model.","context_length":262144,"architecture":{"modality":"text+image+audio+video->text","input_modalities":["text","image","audio","video"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0","request":"0","image":"0","web_search":"0","internal_reasoning":"0"},"top_provider":{"context_length":262144,"max_completion_tokens":32000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","response_format","stop","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"x-ai/grok-4.20-multi-agent-beta","canonical_slug":"x-ai/grok-4.20-multi-agent-beta-20260309","hugging_face_id":"","name":"xAI: Grok 4.20 Multi-Agent Beta","created":1773325367,"description":"Grok 4.20 Multi-Agent Beta is a variant of xAI’s Grok 4.20 designed for collaborative, agent-based workflows. Multiple agents operate in parallel to conduct deep research, coordinate tool use, and synthesize information across complex tasks.\r\n\r\nReasoning effort behavior:\r\n- low / medium: 4 agents\r\n- high / xhigh: 16 agents","context_length":2000000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Grok","instruct_type":null},"pricing":{"prompt":"0.000002","completion":"0.000006","web_search":"0.005","input_cache_read":"0.0000002"},"top_provider":{"context_length":2000000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","logprobs","max_tokens","reasoning","response_format","seed","structured_outputs","temperature","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"knowledge_cutoff":null,"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.0,"completion":6.0}},{"id":"x-ai/grok-4.20-beta","canonical_slug":"x-ai/grok-4.20-beta-20260309","hugging_face_id":"","name":"xAI: Grok 4.20 Beta","created":1773325354,"description":"Grok 4.20 Beta is xAI's newest flagship model with industry-leading speed and agentic tool calling capabilities. It combines the lowest hallucination rate on the market with strict prompt adherance, delivering consistently precise and truthful responses.\r\n\r\nReasoning can be enabled/disabled using the `reasoning` `enabled` parameter in the API. [Learn more in our docs](https://openrouter.ai/docs/use-cases/reasoning-tokens#controlling-reasoning-tokens)","context_length":2000000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Grok","instruct_type":null},"pricing":{"prompt":"0.000002","completion":"0.000006","web_search":"0.005","input_cache_read":"0.0000002"},"top_provider":{"context_length":2000000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","logprobs","max_tokens","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"knowledge_cutoff":null,"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.0,"completion":6.0}},{"id":"z-ai/glm-5-turbo","canonical_slug":"z-ai/glm-5-turbo-20260315","hugging_face_id":"","name":"Z.ai: GLM 5 Turbo","created":1773583573,"description":"GLM-5 Turbo is a new model from Z.ai designed for fast inference and strong performance in agent-driven environments such as OpenClaw scenarios. It is deeply optimized for real-world agent workflows...","context_length":202752,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000012","completion":"0.000004","input_cache_read":"0.00000024"},"top_provider":{"context_length":202752,"max_completion_tokens":131072,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/z-ai/glm-5-turbo-20260315/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.2,"completion":4.0}},{"id":"minimax/minimax-m2.5:free","canonical_slug":"minimax/minimax-m2.5-20260211","hugging_face_id":"MiniMaxAI/MiniMax-M2.5","name":"MiniMax: MiniMax M2.5 (free)","created":1770908502,"description":"MiniMax-M2.5 is a SOTA large language model designed for real-world productivity. Trained in a diverse range of complex real-world digital working environments, M2.5 builds upon the coding expertise of M2.1...","context_length":196608,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":196608,"max_completion_tokens":8192,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","temperature","tools"],"default_parameters":{"temperature":1,"top_p":0.95,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/minimax/minimax-m2.5-20260211/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"mistralai/mistral-small-2603","canonical_slug":"mistralai/mistral-small-2603","hugging_face_id":"mistralai/Mistral-Small-4-119B-2603","name":"Mistral: Mistral Small 4","created":1773695685,"description":"Mistral Small 4 is the next major release in the Mistral Small family, unifying the capabilities of several flagship Mistral models into a single system. It combines strong reasoning from...","context_length":262144,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.00000015","completion":"0.0000006","input_cache_read":"0.000000015"},"top_provider":{"context_length":262144,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/mistralai/mistral-small-2603/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.15,"completion":0.6}},{"id":"openai/gpt-5.4-nano","canonical_slug":"openai/gpt-5.4-nano-20260317","hugging_face_id":"","name":"OpenAI: GPT-5.4 Nano","created":1773748187,"description":"GPT-5.4 nano is the most lightweight and cost-efficient variant of the GPT-5.4 family, optimized for speed-critical and high-volume tasks. It supports text and image inputs and is designed for low-latency...","context_length":400000,"architecture":{"modality":"text+image+file->text","input_modalities":["file","image","text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.0000002","completion":"0.00000125","web_search":"0.01","input_cache_read":"0.00000002"},"top_provider":{"context_length":400000,"max_completion_tokens":128000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-08-31","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5.4-nano-20260317/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.2,"completion":1.25}},{"id":"openai/gpt-5.4-mini","canonical_slug":"openai/gpt-5.4-mini-20260317","hugging_face_id":"","name":"OpenAI: GPT-5.4 Mini","created":1773748178,"description":"GPT-5.4 mini brings the core capabilities of GPT-5.4 to a faster, more efficient model optimized for high-throughput workloads. It supports text and image inputs with strong performance across reasoning, coding,...","context_length":400000,"architecture":{"modality":"text+image+file->text","input_modalities":["file","image","text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00000075","completion":"0.0000045","web_search":"0.01","input_cache_read":"0.000000075"},"top_provider":{"context_length":400000,"max_completion_tokens":128000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-08-31","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5.4-mini-20260317/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.75,"completion":4.5}},{"id":"minimax/minimax-m2.7","canonical_slug":"minimax/minimax-m2.7-20260318","hugging_face_id":"MiniMaxAI/MiniMax-M2.7","name":"MiniMax: MiniMax M2.7","created":1773836697,"description":"MiniMax-M2.7 is a next-generation large language model designed for autonomous, real-world productivity and continuous improvement. Built to actively participate in its own evolution, M2.7 integrates advanced agentic capabilities through multi-agent...","context_length":196608,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000028","completion":"0.0000012"},"top_provider":{"context_length":196608,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/minimax/minimax-m2.7-20260318/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.28,"completion":1.2}},{"id":"nvidia/nemotron-3-super-120b-a12b","canonical_slug":"nvidia/nemotron-3-super-120b-a12b-20230311","hugging_face_id":"nvidia/NVIDIA-Nemotron-3-Super-120B-A12B-FP8","name":"NVIDIA: Nemotron 3 Super","created":1773245239,"description":"NVIDIA Nemotron 3 Super is a 120B-parameter open hybrid MoE model, activating just 12B parameters for maximum compute efficiency and accuracy in complex multi-agent applications. Built on a hybrid Mamba-Transformer...","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000009","completion":"0.00000045"},"top_provider":{"context_length":262144,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/nvidia/nemotron-3-super-120b-a12b-20230311/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.09,"completion":0.45}},{"id":"xiaomi/mimo-v2-omni","canonical_slug":"xiaomi/mimo-v2-omni-20260318","hugging_face_id":"","name":"Xiaomi: MiMo-V2-Omni","created":1773863703,"description":"MiMo-V2-Omni is a frontier omni-modal model that natively processes image, video, and audio inputs within a unified architecture. It combines strong multimodal perception with agentic capability - visual grounding, multi-step...","context_length":262144,"architecture":{"modality":"text+image+audio+video->text","input_modalities":["text","audio","image","video"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000004","completion":"0.000002","input_cache_read":"0.00000008"},"top_provider":{"context_length":262144,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","response_format","stop","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/xiaomi/mimo-v2-omni-20260318/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.4,"completion":2.0}},{"id":"xiaomi/mimo-v2-pro","canonical_slug":"xiaomi/mimo-v2-pro-20260318","hugging_face_id":"","name":"Xiaomi: MiMo-V2-Pro","created":1773863643,"description":"MiMo-V2-Pro is Xiaomi's flagship foundation model, featuring over 1T total parameters and a 1M context length, deeply optimized for agentic scenarios. It is highly adaptable to general agent frameworks like...","context_length":1048576,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.000001","completion":"0.000003","input_cache_read":"0.0000002"},"top_provider":{"context_length":1048576,"max_completion_tokens":131072,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","response_format","stop","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/xiaomi/mimo-v2-pro-20260318/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.0,"completion":3.0}},{"id":"reka/reka-edge","canonical_slug":"rekaai/reka-edge-2603","hugging_face_id":"RekaAI/reka-edge-2603","name":"Reka Edge","created":1774026965,"description":"Reka Edge is an extremely efficient 7B multimodal vision-language model that accepts image/video+text inputs and generates text outputs. This model is optimized specifically to deliver industry-leading performance in image understanding, video analysis, object detection, and agentic tool-use.","context_length":16384,"architecture":{"modality":"text+image+video->text","input_modalities":["image","text","video"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000001","completion":"0.0000001"},"top_provider":{"context_length":16384,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"knowledge_cutoff":null,"expiration_date":null,"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.1,"completion":0.1}},{"id":"kwaipilot/kat-coder-pro-v2","canonical_slug":"kwaipilot/kat-coder-pro-v2-20260327","hugging_face_id":"","name":"Kwaipilot: KAT-Coder-Pro V2","created":1774649310,"description":"KAT-Coder-Pro V2 is the latest high-performance model in KwaiKAT’s KAT-Coder series, designed for complex enterprise-grade software engineering and SaaS integration. It builds on the agentic coding strengths of earlier versions,...","context_length":256000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000003","completion":"0.0000012","input_cache_read":"0.00000006"},"top_provider":{"context_length":256000,"max_completion_tokens":80000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/kwaipilot/kat-coder-pro-v2-20260327/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.3,"completion":1.2}},{"id":"openrouter/elephant-alpha","canonical_slug":"openrouter/elephant-alpha","hugging_face_id":null,"name":"Elephant","created":1776052598,"description":"Elephant Alpha is a 100B-parameter text model focused on intelligence efficiency, delivering strong performance while minimizing token usage. It supports a 256K context window with up to 32K output tokens,...","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":262144,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","response_format","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openrouter/elephant-alpha/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"anthropic/claude-opus-4.6-fast","canonical_slug":"anthropic/claude-4.6-opus-fast-20260407","hugging_face_id":null,"name":"Anthropic: Claude Opus 4.6 (Fast)","created":1775592472,"description":"Fast-mode variant of [Opus 4.6](/anthropic/claude-opus-4.6) - identical capabilities with higher output speed at premium 6x pricing.\n\nLearn more in Anthropic's docs: https://platform.claude.com/docs/en/build-with-claude/fast-mode","context_length":1000000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"Claude","instruct_type":null},"pricing":{"prompt":"0.00003","completion":"0.00015","web_search":"0.01","input_cache_read":"0.000003","input_cache_write":"0.0000375"},"top_provider":{"context_length":1000000,"max_completion_tokens":128000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_p","verbosity"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/anthropic/claude-4.6-opus-fast-20260407/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":30.0,"completion":150.0}},{"id":"z-ai/glm-5.1","canonical_slug":"z-ai/glm-5.1-20260406","hugging_face_id":"zai-org/GLM-5.1","name":"Z.ai: GLM 5.1","created":1775578025,"description":"GLM-5.1 delivers a major leap in coding capability, with particularly significant gains in handling long-horizon tasks. Unlike previous models built around minute-level interactions, GLM-5.1 can work independently and continuously on...","context_length":202752,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000105","completion":"0.0000035","input_cache_read":"0.000000525"},"top_provider":{"context_length":202752,"max_completion_tokens":65535,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","parallel_tool_calls","presence_penalty","reasoning","reasoning_effort","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/z-ai/glm-5.1-20260406/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.95,"completion":3.15}},{"id":"google/gemma-4-26b-a4b-it:free","canonical_slug":"google/gemma-4-26b-a4b-it-20260403","hugging_face_id":"google/gemma-4-26B-A4B-it","name":"Google: Gemma 4 26B A4B  (free)","created":1775227989,"description":"Gemma 4 26B A4B IT is an instruction-tuned Mixture-of-Experts (MoE) model from Google DeepMind. Despite 25.2B total parameters, only 3.8B activate per token during inference — delivering near-31B quality at...","context_length":262144,"architecture":{"modality":"text+image+video->text","input_modalities":["image","text","video"],"output_modalities":["text"],"tokenizer":"Gemma","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":262144,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"top_k":64},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/google/gemma-4-26b-a4b-it-20260403/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"google/gemma-4-26b-a4b-it","canonical_slug":"google/gemma-4-26b-a4b-it-20260403","hugging_face_id":"google/gemma-4-26B-A4B-it","name":"Google: Gemma 4 26B A4B ","created":1775227989,"description":"Gemma 4 26B A4B IT is an instruction-tuned Mixture-of-Experts (MoE) model from Google DeepMind. Despite 25.2B total parameters, only 3.8B activate per token during inference — delivering near-31B quality at...","context_length":262144,"architecture":{"modality":"text+image+video->text","input_modalities":["image","text","video"],"output_modalities":["text"],"tokenizer":"Gemma","instruct_type":null},"pricing":{"prompt":"0.00000006","completion":"0.00000033"},"top_provider":{"context_length":262144,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"top_k":64},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/google/gemma-4-26b-a4b-it-20260403/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.06,"completion":0.33}},{"id":"google/gemma-4-31b-it:free","canonical_slug":"google/gemma-4-31b-it-20260402","hugging_face_id":"google/gemma-4-31B-it","name":"Google: Gemma 4 31B (free)","created":1775148486,"description":"Gemma 4 31B Instruct is Google DeepMind's 30.7B dense multimodal model supporting text and image input with text output. Features a 256K token context window, configurable thinking/reasoning mode, native function...","context_length":262144,"architecture":{"modality":"text+image+video->text","input_modalities":["image","text","video"],"output_modalities":["text"],"tokenizer":"Gemma","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":262144,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"top_k":64,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/google/gemma-4-31b-it-20260402/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"google/gemma-4-31b-it","canonical_slug":"google/gemma-4-31b-it-20260402","hugging_face_id":"google/gemma-4-31B-it","name":"Google: Gemma 4 31B","created":1775148486,"description":"Gemma 4 31B Instruct is Google DeepMind's 30.7B dense multimodal model supporting text and image input with text output. Features a 256K token context window, configurable thinking/reasoning mode, native function...","context_length":262144,"architecture":{"modality":"text+image+video->text","input_modalities":["image","text","video"],"output_modalities":["text"],"tokenizer":"Gemma","instruct_type":null},"pricing":{"prompt":"0.00000012","completion":"0.00000037"},"top_provider":{"context_length":262144,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"top_k":64,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/google/gemma-4-31b-it-20260402/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.12,"completion":0.37}},{"id":"qwen/qwen3.6-plus","canonical_slug":"qwen/qwen3.6-plus-04-02","hugging_face_id":"","name":"Qwen: Qwen3.6 Plus","created":1775133557,"description":"Qwen 3.6 Plus builds on a hybrid architecture that combines efficient linear attention with sparse mixture-of-experts routing, enabling strong scalability and high-performance inference. Compared to the 3.5 series, it delivers...","context_length":1000000,"architecture":{"modality":"text+image+video->text","input_modalities":["text","image","video"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.000000325","completion":"0.00000195","input_cache_write":"0.00000040625"},"top_provider":{"context_length":1000000,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3.6-plus-04-02/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.325,"completion":1.95}},{"id":"z-ai/glm-5v-turbo","canonical_slug":"z-ai/glm-5v-turbo-20260401","hugging_face_id":"","name":"Z.ai: GLM 5V Turbo","created":1775061458,"description":"GLM-5V-Turbo is Z.ai’s first native multimodal agent foundation model, built for vision-based coding and agent-driven tasks. It natively handles image, video, and text inputs, excels at long-horizon planning, complex coding,...","context_length":202752,"architecture":{"modality":"text+image+video->text","input_modalities":["image","text","video"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000012","completion":"0.000004","input_cache_read":"0.00000024"},"top_provider":{"context_length":202752,"max_completion_tokens":131072,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/z-ai/glm-5v-turbo-20260401/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.2,"completion":4.0}},{"id":"arcee-ai/trinity-large-thinking","canonical_slug":"arcee-ai/trinity-large-thinking","hugging_face_id":"arcee-ai/Trinity-Large-Thinking","name":"Arcee AI: Trinity Large Thinking","created":1775058318,"description":"Trinity Large Thinking is a powerful open source reasoning model from the team at Arcee AI. It shows strong performance in PinchBench, agentic workloads, and reasoning tasks. Launch video: https://youtu.be/Gc82AXLa0Rg?si=4RLn6WBz33qT--B7...","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000022","completion":"0.00000085","input_cache_read":"0.00000006"},"top_provider":{"context_length":262144,"max_completion_tokens":262144,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.3,"top_p":0.8,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/arcee-ai/trinity-large-thinking/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.22,"completion":0.85}},{"id":"x-ai/grok-4.20-multi-agent","canonical_slug":"x-ai/grok-4.20-multi-agent-20260309","hugging_face_id":"","name":"xAI: Grok 4.20 Multi-Agent","created":1774979158,"description":"Grok 4.20 Multi-Agent is a variant of xAI’s Grok 4.20 designed for collaborative, agent-based workflows. Multiple agents operate in parallel to conduct deep research, coordinate tool use, and synthesize information...","context_length":2000000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"Grok","instruct_type":null},"pricing":{"prompt":"0.000002","completion":"0.000006","web_search":"0.005","input_cache_read":"0.0000002"},"top_provider":{"context_length":2000000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","logprobs","max_tokens","reasoning","response_format","seed","structured_outputs","temperature","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-09-01","expiration_date":null,"links":{"details":"/api/v1/models/x-ai/grok-4.20-multi-agent-20260309/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.0,"completion":6.0}},{"id":"x-ai/grok-4.20","canonical_slug":"x-ai/grok-4.20-20260309","hugging_face_id":"","name":"xAI: Grok 4.20","created":1774979019,"description":"Grok 4.20 is xAI's newest flagship model with industry-leading speed and agentic tool calling capabilities. It combines the lowest hallucination rate on the market with strict prompt adherance, delivering consistently...","context_length":2000000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"Grok","instruct_type":null},"pricing":{"prompt":"0.00000125","completion":"0.0000025","web_search":"0.005","input_cache_read":"0.0000002"},"top_provider":{"context_length":2000000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","logprobs","max_tokens","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-09-01","expiration_date":null,"links":{"details":"/api/v1/models/x-ai/grok-4.20-20260309/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.0,"completion":6.0}},{"id":"google/lyria-3-pro-preview","canonical_slug":"google/lyria-3-pro-preview-20260330","hugging_face_id":null,"name":"Google: Lyria 3 Pro Preview","created":1774907286,"description":"Full-length songs are priced at $0.08 per song. Lyria 3 is Google's family of music generation models, available through the Gemini API. With Lyria 3, you can generate high-quality, 48kHz...","context_length":1048576,"architecture":{"modality":"text+image->text+audio","input_modalities":["text","image"],"output_modalities":["text","audio"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":1048576,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","response_format","seed","temperature","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/google/lyria-3-pro-preview-20260330/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"google/lyria-3-clip-preview","canonical_slug":"google/lyria-3-clip-preview-20260330","hugging_face_id":null,"name":"Google: Lyria 3 Clip Preview","created":1774907255,"description":"30 second duration clips are priced at $0.04 per clip. Lyria 3 is Google's family of music generation models, available through the Gemini API. With Lyria 3, you can generate...","context_length":1048576,"architecture":{"modality":"text+image->text+audio","input_modalities":["text","image"],"output_modalities":["text","audio"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":1048576,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","response_format","seed","temperature","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/google/lyria-3-clip-preview-20260330/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"rekaai/reka-edge","canonical_slug":"rekaai/reka-edge-2603","hugging_face_id":"RekaAI/reka-edge-2603","name":"Reka Edge","created":1774026965,"description":"Reka Edge is an extremely efficient 7B multimodal vision-language model that accepts image/video+text inputs and generates text outputs. This model is optimized specifically to deliver industry-leading performance in image understanding,...","context_length":16384,"architecture":{"modality":"text+image+video->text","input_modalities":["image","text","video"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000001","completion":"0.0000001"},"top_provider":{"context_length":16384,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/rekaai/reka-edge-2603/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.1,"completion":0.1}},{"id":"rekaai/reka-flash-3","canonical_slug":"rekaai/reka-flash-3","hugging_face_id":"RekaAI/reka-flash-3","name":"Reka Flash 3","created":1741812813,"description":"Reka Flash 3 is a general-purpose, instruction-tuned large language model with 21 billion parameters, developed by Reka. It excels at general chat, coding tasks, instruction-following, and function calling. Featuring a...","context_length":65536,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000001","completion":"0.0000002"},"top_provider":{"context_length":65536,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","seed","stop","temperature","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-01-31","expiration_date":null,"links":{"details":"/api/v1/models/rekaai/reka-flash-3/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.1,"completion":0.2}},{"id":"anthropic/claude-opus-4.7","canonical_slug":"anthropic/claude-4.7-opus-20260416","hugging_face_id":null,"name":"Anthropic: Claude Opus 4.7","created":1776351100,"description":"Opus 4.7 is the next generation of Anthropic's Opus family, built for long-running, asynchronous agents. Building on the coding and agentic strengths of Opus 4.6, it delivers stronger performance on...","context_length":1000000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"Claude","instruct_type":null},"pricing":{"prompt":"0.000005","completion":"0.000025","web_search":"0.01","input_cache_read":"0.0000005","input_cache_write":"0.00000625"},"top_provider":{"context_length":1000000,"max_completion_tokens":128000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","tool_choice","tools","verbosity"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/anthropic/claude-4.7-opus-20260416/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":5.0,"completion":25.0}},{"id":"moonshotai/kimi-k2.6","canonical_slug":"moonshotai/kimi-k2.6-20260420","hugging_face_id":"moonshotai/Kimi-K2.6","name":"MoonshotAI: Kimi K2.6","created":1776699402,"description":"Kimi K2.6 is Moonshot AI's next-generation multimodal model, designed for long-horizon coding, coding-driven UI/UX generation, and multi-agent orchestration. It handles complex end-to-end coding tasks across Python, Rust, and Go, and...","context_length":262142,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000074","completion":"0.0000035","input_cache_read":"0.00000025"},"top_provider":{"context_length":262142,"max_completion_tokens":262142,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","parallel_tool_calls","presence_penalty","reasoning","reasoning_effort","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/moonshotai/kimi-k2.6-20260420/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.74,"completion":3.5}},{"id":"tencent/hy3-preview:free","canonical_slug":"tencent/hy3-preview-20260421","hugging_face_id":"tencent/Hy3-preview","name":"Tencent: Hy3 preview (free)","created":1776878150,"description":"Hy3 preview is a high-efficiency Mixture-of-Experts model from Tencent designed for agentic workflows and production use. It supports configurable reasoning levels across disabled, low, and high modes, allowing it to...","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":262144,"max_completion_tokens":262144,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.9,"top_p":1,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/tencent/hy3-preview-20260421/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"xiaomi/mimo-v2.5-pro","canonical_slug":"xiaomi/mimo-v2.5-pro-20260422","hugging_face_id":"XiaomiMiMo/MiMo-V2.5-Pro","name":"Xiaomi: MiMo-V2.5-Pro","created":1776874273,"description":"MiMo-V2.5-Pro is Xiaomi’s flagship model, delivering strong performance in general agentic capabilities, complex software engineering, and long-horizon tasks, with top rankings on benchmarks such as ClawEval, GDPVal, and SWE-bench Pro....","context_length":1048576,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.000001","completion":"0.000003","input_cache_read":"0.0000002"},"top_provider":{"context_length":1048576,"max_completion_tokens":16384,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/xiaomi/mimo-v2.5-pro-20260422/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.0,"completion":3.0}},{"id":"xiaomi/mimo-v2.5","canonical_slug":"xiaomi/mimo-v2.5-20260422","hugging_face_id":"XiaomiMiMo/MiMo-V2.5","name":"Xiaomi: MiMo-V2.5","created":1776874269,"description":"MiMo-V2.5 is a native omnimodal model by Xiaomi. It delivers Pro-level agentic performance at roughly half the inference cost, while surpassing MiMo-V2-Omni in multimodal perception across image and video understanding...","context_length":1048576,"architecture":{"modality":"text+image+audio+video->text","input_modalities":["text","audio","image","video"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000004","completion":"0.000002","input_cache_read":"0.00000008"},"top_provider":{"context_length":1048576,"max_completion_tokens":131072,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","response_format","stop","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/xiaomi/mimo-v2.5-20260422/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.4,"completion":2.0}},{"id":"openai/gpt-5.4-image-2","canonical_slug":"openai/gpt-5.4-image-2-20260421","hugging_face_id":"","name":"OpenAI: GPT-5.4 Image 2","created":1776797528,"description":"[GPT-5.4](https://openrouter.ai/openai/gpt-5.4) Image 2 combines OpenAI's GPT-5.4 model with state-of-the-art image generation capabilities from GPT Image 2. It enables rich multimodal workflows, allowing users to seamlessly move between reasoning, coding, and...","context_length":272000,"architecture":{"modality":"text+image+file->text+image","input_modalities":["image","text","file"],"output_modalities":["image","text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.000008","completion":"0.000015","web_search":"0.01","input_cache_read":"0.000002"},"top_provider":{"context_length":272000,"max_completion_tokens":128000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","presence_penalty","reasoning","response_format","seed","stop","structured_outputs","top_logprobs"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5.4-image-2-20260421/endpoints"},"object":"model","owned_by":"proxy","type":"image","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"inclusionai/ling-2.6-flash:free","canonical_slug":"inclusionai/ling-2.6-flash-20260421","hugging_face_id":"","name":"inclusionAI: Ling-2.6-flash (free)","created":1776795886,"description":"Ling-2.6-flash is an instant (instruct) model from inclusionAI with 104B total parameters and 7.4B active parameters, designed for real-world agents that require fast responses, strong execution, and high token efficiency....","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":262144,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"knowledge_cutoff":null,"expiration_date":"2026-04-29","links":{"details":"/api/v1/models/inclusionai/ling-2.6-flash-20260421/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"~anthropic/claude-opus-latest","canonical_slug":"~anthropic/claude-opus-latest","hugging_face_id":"","name":"Anthropic: Claude Opus Latest","created":1776795361,"description":"This model always redirects to the latest model in the Claude Opus family.","context_length":1000000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"Router","instruct_type":null},"pricing":{"prompt":"0.000005","completion":"0.000025","web_search":"0.01","input_cache_read":"0.0000005","input_cache_write":"0.00000625"},"top_provider":{"context_length":1000000,"max_completion_tokens":128000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","tool_choice","tools","verbosity"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/~anthropic/claude-opus-latest/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":5.0,"completion":25.0}},{"id":"openrouter/pareto-code","canonical_slug":"openrouter/pareto-code","hugging_face_id":"","name":"Pareto Code Router","created":1776747900,"description":"The Pareto Router maintains a tiered shortlist of strong coding models, ranked by [Artificial Analysis](https://artificialanalysis.ai/) coding percentiles. Set min_coding_score between 0 and 1 on the [pareto-router plugin](https://openrouter.ai/docs/guides/routing/routers/pareto-router#the-min_coding_score-parameter) to control how...","context_length":2000000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Router","instruct_type":null},"pricing":{"prompt":"-1","completion":"-1"},"top_provider":{"context_length":null,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":[],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openrouter/pareto-code/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":-1000000.0,"completion":-1000000.0}},{"id":"baidu/qianfan-ocr-fast:free","canonical_slug":"baidu/qianfan-ocr-fast-20260420","hugging_face_id":"","name":"Baidu: Qianfan-OCR-Fast (free)","created":1776707472,"description":"Qianfan-OCR-Fast is a domain-specific multimodal large model purpose-built for OCR. By leveraging specialized OCR training data while preserving versatile multimodal intelligence, it provides a powerful performance upgrade over Qianfan-OCR.","context_length":65536,"architecture":{"modality":"text+image->text","input_modalities":["image","text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":65536,"max_completion_tokens":28672,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","seed","stop","temperature","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/baidu/qianfan-ocr-fast-20260420/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"arcee-ai/trinity-large-preview","canonical_slug":"arcee-ai/trinity-large-preview","hugging_face_id":"arcee-ai/Trinity-Large-Preview","name":"Arcee AI: Trinity Large Preview","created":1769552670,"description":"Trinity-Large-Preview is a frontier-scale open-weight language model from Arcee, built as a 400B-parameter sparse Mixture-of-Experts with 13B active parameters per token using 4-of-256 expert routing. It excels in creative writing,...","context_length":131000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000015","completion":"0.00000045"},"top_provider":{"context_length":131000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["max_tokens","response_format","structured_outputs","temperature","tools","top_k","top_p"],"default_parameters":{"temperature":0.8,"top_p":0.8,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/arcee-ai/trinity-large-preview/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.15,"completion":0.45}},{"id":"inclusionai/ling-2.6-1t:free","canonical_slug":"inclusionai/ling-2.6-1t-20260423","hugging_face_id":null,"name":"inclusionAI: Ling-2.6-1T (free)","created":1776948238,"description":"Ling-2.6-1T is an instant (instruct) model from inclusionAI and the company’s trillion-parameter flagship, designed for real-world agents that require fast execution and high efficiency at scale. It uses a “fast...","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":262144,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":"2026-05-07","links":{"details":"/api/v1/models/inclusionai/ling-2.6-1t-20260423/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"deepseek/deepseek-v4-pro","canonical_slug":"deepseek/deepseek-v4-pro-20260423","hugging_face_id":"deepseek-ai/DeepSeek-V4-Pro","name":"DeepSeek: DeepSeek V4 Pro","created":1777000679,"description":"DeepSeek V4 Pro is a large-scale Mixture-of-Experts model from DeepSeek with 1.6T total parameters and 49B activated parameters, supporting a 1M-token context window. It is designed for advanced reasoning, coding,...","context_length":1048576,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"DeepSeek","instruct_type":null},"pricing":{"prompt":"0.000000435","completion":"0.00000087","input_cache_read":"0.000000003625"},"top_provider":{"context_length":1048576,"max_completion_tokens":384000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{"temperature":1,"top_p":1,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/deepseek/deepseek-v4-pro-20260423/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.43,"completion":0.87}},{"id":"deepseek/deepseek-v4-flash","canonical_slug":"deepseek/deepseek-v4-flash-20260423","hugging_face_id":"deepseek-ai/DeepSeek-V4-Flash","name":"DeepSeek: DeepSeek V4 Flash","created":1777000666,"description":"DeepSeek V4 Flash is an efficiency-optimized Mixture-of-Experts model from DeepSeek with 284B total parameters and 13B activated parameters, supporting a 1M-token context window. It is designed for fast inference and...","context_length":1048576,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"DeepSeek","instruct_type":null},"pricing":{"prompt":"0.00000014","completion":"0.00000028","input_cache_read":"0.0000000028"},"top_provider":{"context_length":1048576,"max_completion_tokens":384000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/deepseek/deepseek-v4-flash-20260423/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.14,"completion":0.28}},{"id":"openai/gpt-5.5-pro","canonical_slug":"openai/gpt-5.5-pro-20260423","hugging_face_id":"","name":"OpenAI: GPT-5.5 Pro","created":1777051896,"description":"GPT-5.5 Pro is OpenAI’s high-capability model optimized for deep reasoning and accuracy on complex, high-stakes workloads. It features a 1M+ token context window (922K input, 128K output) with support for...","context_length":1050000,"architecture":{"modality":"text+image+file->text","input_modalities":["file","image","text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.00003","completion":"0.00018","web_search":"0.01"},"top_provider":{"context_length":1050000,"max_completion_tokens":128000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-12-01","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5.5-pro-20260423/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":30.0,"completion":180.0}},{"id":"openai/gpt-5.5","canonical_slug":"openai/gpt-5.5-20260423","hugging_face_id":"","name":"OpenAI: GPT-5.5","created":1777051893,"description":"GPT-5.5 is OpenAI’s frontier model designed for complex professional workloads, building on GPT-5.4 with stronger reasoning, higher reliability, and improved token efficiency on hard tasks. It features a 1M+ token...","context_length":1050000,"architecture":{"modality":"text+image+file->text","input_modalities":["file","image","text"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.000005","completion":"0.00003","web_search":"0.01","input_cache_read":"0.0000005"},"top_provider":{"context_length":1050000,"max_completion_tokens":128000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-12-01","expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-5.5-20260423/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":5.0,"completion":30.0}},{"id":"qwen/qwen3.6-27b","canonical_slug":"qwen/qwen3.6-27b-20260422","hugging_face_id":"Qwen/Qwen3.6-27B","name":"Qwen: Qwen3.6 27B","created":1777255064,"description":"Qwen3.6 27B is a dense 27-billion-parameter language model from the Qwen Team at Alibaba, released in April 2026. It features hybrid multimodal capabilities — accepting text, image, and video inputs...","context_length":262144,"architecture":{"modality":"text+image+video->text","input_modalities":["text","image","video"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.00000032","completion":"0.0000032"},"top_provider":{"context_length":262144,"max_completion_tokens":81920,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3.6-27b-20260422/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.32,"completion":3.2}},{"id":"qwen/qwen3.6-35b-a3b","canonical_slug":"qwen/qwen3.6-35b-a3b-20260415","hugging_face_id":"Qwen/Qwen3.6-35B-A3B","name":"Qwen: Qwen3.6 35B A3B","created":1777260255,"description":"Qwen3.6-35B-A3B is an open-weight multimodal model from Alibaba Cloud with 35 billion total parameters and 3 billion active parameters per token. It uses a hybrid sparse mixture-of-experts architecture combining Gated...","context_length":262144,"architecture":{"modality":"text+image+video->text","input_modalities":["text","image","video"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":null},"pricing":{"prompt":"0.00000015","completion":"0.000001","input_cache_read":"0.00000005"},"top_provider":{"context_length":262144,"max_completion_tokens":262144,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":1,"top_p":0.95,"top_k":20},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3.6-35b-a3b-20260415/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.15,"completion":1.0}},{"id":"qwen/qwen3.6-max-preview","canonical_slug":"qwen/qwen3.6-max-preview-20260420","hugging_face_id":null,"name":"Qwen: Qwen3.6 Max Preview","created":1777260242,"description":"Qwen3.6-Max-Preview is a proprietary frontier model from Alibaba Cloud built on a sparse mixture-of-experts architecture with approximately 1 trillion total parameters. It is optimized for agentic coding, tool use, and...","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":null},"pricing":{"prompt":"0.00000104","completion":"0.00000624","input_cache_write":"0.0000013"},"top_provider":{"context_length":262144,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","logprobs","max_tokens","presence_penalty","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3.6-max-preview-20260420/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.04,"completion":6.24}},{"id":"qwen/qwen3.5-plus-20260420","canonical_slug":"qwen/qwen3.5-plus-20260420","hugging_face_id":null,"name":"Qwen: Qwen3.5 Plus 2026-04-20","created":1777261368,"description":"Qwen3.5 Plus (April 2026) is a large-scale multimodal language model from Alibaba. It accepts text, image, and video input and produces text output, with a 1M token context window. This...","context_length":1000000,"architecture":{"modality":"text+image+video->text","input_modalities":["text","image","video"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.0000004","completion":"0.0000024"},"top_provider":{"context_length":1000000,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3.5-plus-20260420/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.4,"completion":2.4}},{"id":"qwen/qwen3.6-flash","canonical_slug":"qwen/qwen3.6-flash","hugging_face_id":null,"name":"Qwen: Qwen3.6 Flash","created":1777261362,"description":"Qwen3.6 Flash is a fast, efficient language model from Alibaba's Qwen 3.6 series. It supports text, image, and video input with a 1M token context window. Tiered pricing kicks in...","context_length":1000000,"architecture":{"modality":"text+image+video->text","input_modalities":["text","image","video"],"output_modalities":["text"],"tokenizer":"Qwen3","instruct_type":null},"pricing":{"prompt":"0.00000025","completion":"0.0000015","input_cache_write":"0.0000003125"},"top_provider":{"context_length":1000000,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/qwen/qwen3.6-flash/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.25,"completion":1.5}},{"id":"~anthropic/claude-haiku-latest","canonical_slug":"~anthropic/claude-haiku-latest","hugging_face_id":null,"name":"Anthropic Claude Haiku Latest","created":1777318492,"description":"This model always redirects to the latest model in the Anthropic Claude Haiku family.","context_length":200000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"Router","instruct_type":null},"pricing":{"prompt":"0.000001","completion":"0.000005","web_search":"0.01","input_cache_read":"0.0000001","input_cache_write":"0.00000125"},"top_provider":{"context_length":200000,"max_completion_tokens":64000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/~anthropic/claude-haiku-latest/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.0,"completion":5.0}},{"id":"~openai/gpt-mini-latest","canonical_slug":"~openai/gpt-mini-latest","hugging_face_id":null,"name":"OpenAI GPT Mini Latest","created":1777318471,"description":"This model always redirects to the latest model in the OpenAI GPT Mini family.","context_length":400000,"architecture":{"modality":"text+image+file->text","input_modalities":["file","image","text"],"output_modalities":["text"],"tokenizer":"Router","instruct_type":null},"pricing":{"prompt":"0.00000075","completion":"0.0000045","web_search":"0.01","input_cache_read":"0.000000075"},"top_provider":{"context_length":400000,"max_completion_tokens":128000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-08-31","expiration_date":null,"links":{"details":"/api/v1/models/~openai/gpt-mini-latest/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.75,"completion":4.5}},{"id":"~google/gemini-pro-latest","canonical_slug":"~google/gemini-pro-latest","hugging_face_id":null,"name":"Google Gemini Pro Latest","created":1777318451,"description":"This model always redirects to the latest model in the Google Gemini Pro family.","context_length":1048576,"architecture":{"modality":"text+image+file+audio+video->text","input_modalities":["audio","file","image","text","video"],"output_modalities":["text"],"tokenizer":"Router","instruct_type":null},"pricing":{"prompt":"0.000002","completion":"0.000012","image":"0.000002","audio":"0.000002","web_search":"0.014","internal_reasoning":"0.000012","input_cache_read":"0.0000002","input_cache_write":"0.000000375"},"top_provider":{"context_length":1048576,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/~google/gemini-pro-latest/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":2.0,"completion":12.0}},{"id":"~moonshotai/kimi-latest","canonical_slug":"~moonshotai/kimi-latest","hugging_face_id":null,"name":"MoonshotAI Kimi Latest","created":1777318428,"description":"This model always redirects to the latest model in the MoonshotAI Kimi family.","context_length":262142,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Router","instruct_type":null},"pricing":{"prompt":"0.00000074","completion":"0.0000035","input_cache_read":"0.00000025"},"top_provider":{"context_length":262142,"max_completion_tokens":262142,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","parallel_tool_calls","presence_penalty","reasoning","reasoning_effort","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/~moonshotai/kimi-latest/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.74,"completion":3.5}},{"id":"~google/gemini-flash-latest","canonical_slug":"~google/gemini-flash-latest","hugging_face_id":null,"name":"Google Gemini Flash Latest","created":1777318398,"description":"This model always redirects to the latest model in the Google Gemini Flash family.","context_length":1048576,"architecture":{"modality":"text+image+file+audio+video->text","input_modalities":["text","image","file","audio","video"],"output_modalities":["text"],"tokenizer":"Router","instruct_type":null},"pricing":{"prompt":"0.0000005","completion":"0.000003","image":"0.0000005","audio":"0.000001","web_search":"0.014","internal_reasoning":"0.000003","input_cache_read":"0.00000005","input_cache_write":"0.00000008333333333333334"},"top_provider":{"context_length":1048576,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/~google/gemini-flash-latest/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.5,"completion":3.0}},{"id":"~anthropic/claude-sonnet-latest","canonical_slug":"~anthropic/claude-sonnet-latest","hugging_face_id":null,"name":"Anthropic Claude Sonnet Latest","created":1777318368,"description":"This model always redirects to the latest model in the Anthropic Claude Sonnet family.","context_length":1000000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"Router","instruct_type":null},"pricing":{"prompt":"0.000003","completion":"0.000015","web_search":"0.01","input_cache_read":"0.0000003","input_cache_write":"0.00000375"},"top_provider":{"context_length":1000000,"max_completion_tokens":128000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p","verbosity"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/~anthropic/claude-sonnet-latest/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":3.0,"completion":15.0}},{"id":"~openai/gpt-latest","canonical_slug":"~openai/gpt-latest","hugging_face_id":null,"name":"OpenAI GPT Latest","created":1777318334,"description":"This model always redirects to the latest model in the OpenAI GPT family.","context_length":1050000,"architecture":{"modality":"text+image+file->text","input_modalities":["file","image","text"],"output_modalities":["text"],"tokenizer":"Router","instruct_type":null},"pricing":{"prompt":"0.000005","completion":"0.00003","web_search":"0.01","input_cache_read":"0.0000005"},"top_provider":{"context_length":1050000,"max_completion_tokens":128000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":"2025-12-01","expiration_date":null,"links":{"details":"/api/v1/models/~openai/gpt-latest/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":5.0,"completion":30.0}},{"id":"openai/whisper-1","canonical_slug":"openai/whisper-1","hugging_face_id":null,"name":"OpenAI: Whisper 1","created":1777332905,"description":"Whisper is OpenAI's open-source automatic speech recognition model, available via API as `whisper-1`. It supports transcription and translation across 50+ languages from audio files up to 25 MB. Accepts formats...","context_length":0,"architecture":{"modality":"audio->text","input_modalities":["audio"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.006","completion":"0"},"top_provider":{"context_length":0,"max_completion_tokens":null,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openai/whisper-1/endpoints"},"object":"model","owned_by":"proxy","type":"audio","pricing_per_million":{"prompt":6000.0,"completion":0.0}},{"id":"poolside/laguna-xs.2:free","canonical_slug":"poolside/laguna-xs.2-20260421","hugging_face_id":"poolside/Laguna-XS.2","name":"Poolside: Laguna XS.2 (free)","created":1777389604,"description":"Laguna XS.2 is the second-generation model in the XS size class from [Poolside](https://poolside.ai), their efficient coding agent series. It combines tool calling and reasoning capabilities with a compact footprint, offering...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":131072,"max_completion_tokens":8192,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","temperature","tool_choice","tools"],"default_parameters":{"temperature":0.7,"top_p":0.9,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/poolside/laguna-xs.2-20260421/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"poolside/laguna-m.1:free","canonical_slug":"poolside/laguna-m.1-20260312","hugging_face_id":null,"name":"Poolside: Laguna M.1 (free)","created":1777388504,"description":"Laguna M.1 is the flagship coding agent model from [Poolside](https://poolside.ai), optimized for complex software engineering tasks. Designed for agentic coding workflows, it supports tool calling and reasoning, with a 128K...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":131072,"max_completion_tokens":8192,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","temperature","tool_choice","tools"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/poolside/laguna-m.1-20260312/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"nvidia/nemotron-3-nano-omni-30b-a3b-reasoning:free","canonical_slug":"nvidia/nemotron-3-nano-omni-30b-a3b-reasoning-20260428","hugging_face_id":null,"name":"NVIDIA: Nemotron 3 Nano Omni (free)","created":1777393095,"description":"NVIDIA Nemotron™ 3 Nano Omni is a 30B-A3B open multimodal model designed to function as a perception and context sub-agent in enterprise agent systems. It accepts text, image, video, and...","context_length":256000,"architecture":{"modality":"text+image+audio+video->text","input_modalities":["text","audio","image","video"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":256000,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","seed","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":0.6,"top_p":0.95,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/nvidia/nemotron-3-nano-omni-30b-a3b-reasoning-20260428/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"inclusionai/ling-2.6-flash","canonical_slug":"inclusionai/ling-2.6-flash-20260421","hugging_face_id":"","name":"inclusionAI: Ling-2.6-flash","created":1776795886,"description":"Ling-2.6-flash is an instant (instruct) model from inclusionAI with 104B total parameters and 7.4B active parameters, designed for real-world agents that require fast responses, strong execution, and high token efficiency....","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000008","completion":"0.00000024","input_cache_read":"0.000000016"},"top_provider":{"context_length":262144,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/inclusionai/ling-2.6-flash-20260421/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.08,"completion":0.24}},{"id":"openrouter/owl-alpha","canonical_slug":"openrouter/owl-alpha","hugging_face_id":null,"name":"Owl Alpha","created":1777398589,"description":"Owl Alpha is a high-performance foundation model designed for agentic workloads. Natively supports tool use, and long-context tasks, with strong performance in code generation, automated workflows, and complex instruction execution....","context_length":1048756,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":1048756,"max_completion_tokens":262144,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openrouter/owl-alpha/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"ibm-granite/granite-4.1-8b","canonical_slug":"ibm-granite/granite-4.1-8b-20260429","hugging_face_id":"ibm-granite/granite-4.1-8b","name":"IBM: Granite 4.1 8B","created":1777577071,"description":"Granite 4.1 8B is a dense, decoder-only 8-billion-parameter language model from IBM, part of the Granite 4.1 family. It supports a 131K-token context window and is designed for enterprise tasks...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000005","completion":"0.0000001","input_cache_read":"0.00000005"},"top_provider":{"context_length":131072,"max_completion_tokens":131072,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/ibm-granite/granite-4.1-8b-20260429/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.05,"completion":0.1}},{"id":"x-ai/grok-4.3","canonical_slug":"x-ai/grok-4.3-20260430","hugging_face_id":null,"name":"xAI: Grok 4.3","created":1777591821,"description":"Grok 4.3 is a reasoning model from xAI. It accepts text and image inputs with text output, and is suited for agentic workflows, instruction-following tasks, and applications requiring high factual...","context_length":1000000,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Grok","instruct_type":null},"pricing":{"prompt":"0.00000125","completion":"0.0000025","web_search":"0.005","input_cache_read":"0.0000002"},"top_provider":{"context_length":1000000,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","logprobs","max_tokens","presence_penalty","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/x-ai/grok-4.3-20260430/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.25,"completion":2.5}},{"id":"mistralai/mistral-medium-3-5","canonical_slug":"mistralai/mistral-medium-3.5-20260430","hugging_face_id":null,"name":"Mistral: Mistral Medium 3.5","created":1777570439,"description":"Mistral Medium 3.5 is a dense 128B instruction-following model from Mistral AI. It supports text and image inputs with text output, and is designed for agentic workflows, coding, and complex...","context_length":262144,"architecture":{"modality":"text+image->text","input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":null},"pricing":{"prompt":"0.0000015","completion":"0.0000075"},"top_provider":{"context_length":262144,"max_completion_tokens":null,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/mistralai/mistral-medium-3.5-20260430/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":1.5,"completion":7.5}},{"id":"microsoft/phi-4-mini-instruct","canonical_slug":"microsoft/phi-4-mini-instruct","hugging_face_id":"microsoft/Phi-4-mini-instruct","name":"Microsoft: Phi 4 Mini Instruct","created":1760726049,"description":"Phi-4-mini-instruct is a lightweight open model built upon synthetic data and filtered publicly available websites - with a focus on high-quality, reasoning dense data. The model belongs to the Phi-4...","context_length":128000,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000008","completion":"0.00000035","input_cache_read":"0.00000008"},"top_provider":{"context_length":128000,"max_completion_tokens":128000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/microsoft/phi-4-mini-instruct/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.08,"completion":0.35}},{"id":"openai/gpt-chat-latest","canonical_slug":"openai/gpt-chat-latest-20260505","hugging_face_id":null,"name":"OpenAI: GPT Chat Latest","created":1778000212,"description":"GPT Chat Latest points to OpenAI's stable API alias `chat-latest` that always resolves to the latest Instant chat model used in ChatGPT. As OpenAI rolls out new Instant model updates...","context_length":400000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"GPT","instruct_type":null},"pricing":{"prompt":"0.000005","completion":"0.00003","web_search":"0.01","input_cache_read":"0.0000005"},"top_provider":{"context_length":400000,"max_completion_tokens":128000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","tool_choice","tools","top_logprobs"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/openai/gpt-chat-latest-20260505/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":5.0,"completion":30.0}},{"id":"baidu/cobuddy:free","canonical_slug":"baidu/cobuddy-20260430","hugging_face_id":null,"name":"Baidu Qianfan: CoBuddy (free)","created":1778035480,"description":"CoBuddy is a code generation model from Baidu, optimized for coding tasks and AI Agent workflows. It features high inference throughput and low end-to-end latency, with native support for tool...","context_length":131072,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":131072,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","stop","tools"],"default_parameters":{},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/baidu/cobuddy-20260430/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"inclusionai/ling-2.6-1t","canonical_slug":"inclusionai/ling-2.6-1t-20260423","hugging_face_id":null,"name":"inclusionAI: Ling-2.6-1T","created":1776948238,"description":"Ling-2.6-1T is an instant (instruct) model from inclusionAI and the company’s trillion-parameter flagship, designed for real-world agents that require fast execution and high efficiency at scale. It uses a “fast...","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000003","completion":"0.0000025","input_cache_read":"0.00000006"},"top_provider":{"context_length":262144,"max_completion_tokens":32768,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/inclusionai/ling-2.6-1t-20260423/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.3,"completion":2.5}},{"id":"google/gemini-3.1-flash-lite","canonical_slug":"google/gemini-3.1-flash-lite-20260507","hugging_face_id":null,"name":"Google: Gemini 3.1 Flash Lite","created":1778168828,"description":"Gemini 3.1 Flash Lite is Google’s GA high-efficiency multimodal model optimized for low-latency, high-volume workloads. It supports text, image, video, audio, and PDF inputs, and is designed for lightweight agentic...","context_length":1048576,"architecture":{"modality":"text+image+file+audio+video->text","input_modalities":["text","image","video","file","audio"],"output_modalities":["text"],"tokenizer":"Gemini","instruct_type":null},"pricing":{"prompt":"0.00000025","completion":"0.0000015","image":"0.00000025","audio":"0.0000005","web_search":"0.014","internal_reasoning":"0.0000015","input_cache_read":"0.000000025","input_cache_write":"0.00000008333333333333334"},"top_provider":{"context_length":1048576,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/google/gemini-3.1-flash-lite-20260507/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.25,"completion":1.5}},{"id":"inclusionai/ring-2.6-1t:free","canonical_slug":"inclusionai/ring-2.6-1t-20260508","hugging_face_id":null,"name":"inclusionAI: Ring-2.6-1T (free)","created":1778247440,"description":"Ring-2.6-1T is a 1T-parameter-scale thinking model with 63B active parameters, built for real-world agent workflows that require both strong capability and operational efficiency. It is optimized for coding agents, tool...","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":262144,"max_completion_tokens":65536,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","seed","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/inclusionai/ring-2.6-1t-20260508/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"tencent/hy3-preview","canonical_slug":"tencent/hy3-preview-20260421","hugging_face_id":"tencent/Hy3-preview","name":"Tencent: Hy3 preview","created":1776878150,"description":"Hy3 preview is a high-efficiency Mixture-of-Experts model from Tencent designed for agentic workflows and production use. It supports configurable reasoning levels across disabled, low, and high modes, allowing it to...","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.000000066","completion":"0.00000026","input_cache_read":"0.000000029"},"top_provider":{"context_length":262144,"max_completion_tokens":262144,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","stop","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.9,"top_p":1,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/tencent/hy3-preview-20260421/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.066,"completion":0.26}},{"id":"arcee-ai/trinity-large-thinking:free","canonical_slug":"arcee-ai/trinity-large-thinking","hugging_face_id":"arcee-ai/Trinity-Large-Thinking","name":"Arcee AI: Trinity Large Thinking (free)","created":1775058318,"description":"Trinity Large Thinking is a powerful open source reasoning model from the team at Arcee AI. It shows strong performance in PinchBench, agentic workloads, and reasoning tasks. Launch video: https://youtu.be/Gc82AXLa0Rg?si=4RLn6WBz33qT--B7...","context_length":262144,"architecture":{"modality":"text->text","input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0","completion":"0"},"top_provider":{"context_length":262144,"max_completion_tokens":80000,"is_moderated":false},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","temperature","tool_choice","tools","top_k","top_p"],"default_parameters":{"temperature":0.3,"top_p":0.8,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/arcee-ai/trinity-large-thinking/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.0,"completion":0.0}},{"id":"perceptron/perceptron-mk1","canonical_slug":"perceptron/perceptron-mk1-20260512","hugging_face_id":null,"name":"Perceptron: Perceptron Mk1","created":1778597029,"description":"Perceptron Mk1 (Mark One) is Perceptron's highest-quality vision-language model for video and embodied reasoning.** It accepts image and video inputs paired with natural language queries, and produces detailed visual understanding...","context_length":32768,"architecture":{"modality":"text+image+video->text","input_modalities":["text","image","video"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.00000015","completion":"0.0000015"},"top_provider":{"context_length":32768,"max_completion_tokens":8192,"is_moderated":false},"per_request_limits":null,"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","structured_outputs","temperature","top_k","top_p"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/perceptron/perceptron-mk1-20260512/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":0.15,"completion":1.5}},{"id":"anthropic/claude-opus-4.7-fast","canonical_slug":"anthropic/claude-4.7-opus-fast-20260512","hugging_face_id":null,"name":"Anthropic: Claude Opus 4.7 (Fast)","created":1778613011,"description":"Fast-mode variant of [Opus 4.7](/anthropic/claude-opus-4.7) - identical capabilities with higher output speed at premium 6x pricing.\n\nLearn more in Anthropic's docs: https://platform.claude.com/docs/en/build-with-claude/fast-mode","context_length":1000000,"architecture":{"modality":"text+image+file->text","input_modalities":["text","image","file"],"output_modalities":["text"],"tokenizer":"Claude","instruct_type":null},"pricing":{"prompt":"0.00003","completion":"0.00015","web_search":"0.01","input_cache_read":"0.000003","input_cache_write":"0.0000375"},"top_provider":{"context_length":1000000,"max_completion_tokens":128000,"is_moderated":true},"per_request_limits":null,"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","tool_choice","tools","verbosity"],"default_parameters":{"temperature":null,"top_p":null,"top_k":null,"frequency_penalty":null,"presence_penalty":null,"repetition_penalty":null},"supported_voices":null,"knowledge_cutoff":null,"expiration_date":null,"links":{"details":"/api/v1/models/anthropic/claude-4.7-opus-fast-20260512/endpoints"},"object":"model","owned_by":"proxy","type":"chat","pricing_per_million":{"prompt":30.0,"completion":150.0}}]}