[
    {
        "id": "ai21/jamba-large-1.7",
        "name": "AI21: Jamba Large 1.7",
        "provider": "ai21",
        "architecture": [
            "text"
        ],
        "context": 256000,
        "prompt_price": 2.0,
        "completion_price": 8.0,
        "description": "Jamba Large 1.7 is the latest model in the Jamba open family, offering improvements in grounding, instruction-following, and overall efficiency. Built on a hybrid SSM-Transformer architecture with a 256K context..."
    },
    {
        "id": "aion-labs/aion-1.0",
        "name": "AionLabs: Aion-1.0",
        "provider": "aion-labs",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 4.0,
        "completion_price": 8.0,
        "description": "Aion-1.0 is a multi-model system designed for high performance across various tasks, including reasoning and coding. It is built on DeepSeek-R1, augmented with additional models and techniques such as Tree..."
    },
    {
        "id": "aion-labs/aion-1.0-mini",
        "name": "AionLabs: Aion-1.0-Mini",
        "provider": "aion-labs",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.7,
        "completion_price": 1.4,
        "description": "Aion-1.0-Mini 32B parameter model is a distilled version of the DeepSeek-R1 model, designed for strong performance in reasoning domains such as mathematics, coding, and logic. It is a modified variant..."
    },
    {
        "id": "aion-labs/aion-2.0",
        "name": "AionLabs: Aion-2.0",
        "provider": "aion-labs",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.7999999999999999,
        "completion_price": 1.5999999999999999,
        "description": "Aion-2.0 is a variant of DeepSeek V3.2 optimized for immersive roleplaying and storytelling. It is particularly strong at introducing tension, crises, and conflict into stories, making narratives feel more engaging...."
    },
    {
        "id": "aion-labs/aion-rp-llama-3.1-8b",
        "name": "AionLabs: Aion-RP 1.0 (8B)",
        "provider": "aion-labs",
        "architecture": [
            "text"
        ],
        "context": 32768,
        "prompt_price": 0.7999999999999999,
        "completion_price": 1.5999999999999999,
        "description": "Aion-RP-Llama-3.1-8B ranks the highest in the character evaluation portion of the RPBench-Auto benchmark, a roleplaying-specific variant of Arena-Hard-Auto, where LLMs evaluate each other\u2019s responses. It is a fine-tuned base model..."
    },
    {
        "id": "alfredpros/codellama-7b-instruct-solidity",
        "name": "AlfredPros: CodeLLaMa 7B Instruct Solidity",
        "provider": "alfredpros",
        "architecture": [
            "text"
        ],
        "context": 4096,
        "prompt_price": 0.7999999999999999,
        "completion_price": 1.2,
        "description": "A finetuned 7 billion parameters Code LLaMA - Instruct model to generate Solidity smart contract using 4-bit QLoRA finetuning provided by PEFT library."
    },
    {
        "id": "alibaba/tongyi-deepresearch-30b-a3b",
        "name": "Tongyi DeepResearch 30B A3B",
        "provider": "alibaba",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.09,
        "completion_price": 0.44999999999999996,
        "description": "Tongyi DeepResearch is an agentic large language model developed by Tongyi Lab, with 30 billion total parameters activating only 3 billion per token. It's optimized for long-horizon, deep information-seeking tasks..."
    },
    {
        "id": "allenai/olmo-2-0325-32b-instruct",
        "name": "AllenAI: Olmo 2 32B Instruct",
        "provider": "allenai",
        "architecture": [
            "text"
        ],
        "context": 128000,
        "prompt_price": 0.049999999999999996,
        "completion_price": 0.19999999999999998,
        "description": "OLMo-2 32B Instruct is a supervised instruction-finetuned variant of the OLMo-2 32B March 2025 base model. It excels in complex reasoning and instruction-following tasks across diverse benchmarks such as GSM8K,..."
    },
    {
        "id": "allenai/olmo-3-32b-think",
        "name": "AllenAI: Olmo 3 32B Think",
        "provider": "allenai",
        "architecture": [
            "text"
        ],
        "context": 65536,
        "prompt_price": 0.15,
        "completion_price": 0.5,
        "description": "Olmo 3 32B Think is a large-scale, 32-billion-parameter model purpose-built for deep reasoning, complex logic chains and advanced instruction-following scenarios. Its capacity enables strong performance on demanding evaluation tasks and..."
    },
    {
        "id": "allenai/olmo-3.1-32b-instruct",
        "name": "AllenAI: Olmo 3.1 32B Instruct",
        "provider": "allenai",
        "architecture": [
            "text"
        ],
        "context": 65536,
        "prompt_price": 0.19999999999999998,
        "completion_price": 0.6,
        "description": "Olmo 3.1 32B Instruct is a large-scale, 32-billion-parameter instruction-tuned language model engineered for high-performance conversational AI, multi-turn dialogue, and practical instruction following. As part of the Olmo 3.1 family, this..."
    },
    {
        "id": "alpindale/goliath-120b",
        "name": "Goliath 120B",
        "provider": "alpindale",
        "architecture": [
            "text"
        ],
        "context": 6144,
        "prompt_price": 3.75,
        "completion_price": 7.5,
        "description": "A large LLM created by combining two fine-tuned Llama 70B models into one 120B model. Combines Xwin and Euryale. Credits to - [@chargoddard](https://huggingface.co/chargoddard) for developing the framework used to merge..."
    },
    {
        "id": "amazon/nova-2-lite-v1",
        "name": "Amazon: Nova 2 Lite",
        "provider": "amazon",
        "architecture": [
            "image",
            "video"
        ],
        "context": 1000000,
        "prompt_price": 0.3,
        "completion_price": 2.5,
        "description": "Nova 2 Lite is a fast, cost-effective reasoning model for everyday workloads that can process text, images, and videos to generate text. Nova 2 Lite demonstrates standout capabilities in processing..."
    },
    {
        "id": "amazon/nova-lite-v1",
        "name": "Amazon: Nova Lite 1.0",
        "provider": "amazon",
        "architecture": [
            "image"
        ],
        "context": 300000,
        "prompt_price": 0.06,
        "completion_price": 0.24,
        "description": "Amazon Nova Lite 1.0 is a very low-cost multimodal model from Amazon that focused on fast processing of image, video, and text inputs to generate text output. Amazon Nova Lite..."
    },
    {
        "id": "amazon/nova-micro-v1",
        "name": "Amazon: Nova Micro 1.0",
        "provider": "amazon",
        "architecture": [
            "text"
        ],
        "context": 128000,
        "prompt_price": 0.035,
        "completion_price": 0.14,
        "description": "Amazon Nova Micro 1.0 is a text-only model that delivers the lowest latency responses in the Amazon Nova family of models at a very low cost. With a context length..."
    },
    {
        "id": "amazon/nova-premier-v1",
        "name": "Amazon: Nova Premier 1.0",
        "provider": "amazon",
        "architecture": [
            "image"
        ],
        "context": 1000000,
        "prompt_price": 2.5,
        "completion_price": 12.5,
        "description": "Amazon Nova Premier is the most capable of Amazon\u2019s multimodal models for complex reasoning tasks and for use as the best teacher for distilling custom models."
    },
    {
        "id": "amazon/nova-pro-v1",
        "name": "Amazon: Nova Pro 1.0",
        "provider": "amazon",
        "architecture": [
            "image"
        ],
        "context": 300000,
        "prompt_price": 0.7999999999999999,
        "completion_price": 3.1999999999999997,
        "description": "Amazon Nova Pro 1.0 is a capable multimodal model from Amazon focused on providing a combination of accuracy, speed, and cost for a wide range of tasks. As of December..."
    },
    {
        "id": "anthracite-org/magnum-v4-72b",
        "name": "Magnum v4 72B",
        "provider": "anthracite-org",
        "architecture": [
            "text"
        ],
        "context": 16384,
        "prompt_price": 3.0,
        "completion_price": 5.0,
        "description": "This is a series of models designed to replicate the prose quality of the Claude 3 models, specifically Sonnet(https://openrouter.ai/anthropic/claude-3.5-sonnet) and Opus(https://openrouter.ai/anthropic/claude-3-opus).\n\nThe model is fine-tuned on top of [Qwen2.5 72B](https://openrouter.ai/qwen/qwen-2.5-72b-instruct)."
    },
    {
        "id": "anthropic/claude-3-haiku",
        "name": "Anthropic: Claude 3 Haiku",
        "provider": "anthropic",
        "architecture": [
            "image"
        ],
        "context": 200000,
        "prompt_price": 0.25,
        "completion_price": 1.25,
        "description": "Claude 3 Haiku is Anthropic's fastest and most compact model for\nnear-instant responsiveness. Quick and accurate targeted performance.\n\nSee the launch announcement and benchmark results [here](https://www.anthropic.com/news/claude-3-haiku)\n\n#multimodal"
    },
    {
        "id": "anthropic/claude-3.5-haiku",
        "name": "Anthropic: Claude 3.5 Haiku",
        "provider": "anthropic",
        "architecture": [
            "image"
        ],
        "context": 200000,
        "prompt_price": 0.7999999999999999,
        "completion_price": 4.0,
        "description": "Claude 3.5 Haiku features offers enhanced capabilities in speed, coding accuracy, and tool use. Engineered to excel in real-time applications, it delivers quick response times that are essential for dynamic..."
    },
    {
        "id": "anthropic/claude-3.7-sonnet",
        "name": "Anthropic: Claude 3.7 Sonnet",
        "provider": "anthropic",
        "architecture": [
            "image"
        ],
        "context": 200000,
        "prompt_price": 3.0,
        "completion_price": 15.0,
        "description": "Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and..."
    },
    {
        "id": "anthropic/claude-3.7-sonnet:thinking",
        "name": "Anthropic: Claude 3.7 Sonnet (thinking)",
        "provider": "anthropic",
        "architecture": [
            "image"
        ],
        "context": 200000,
        "prompt_price": 3.0,
        "completion_price": 15.0,
        "description": "Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and..."
    },
    {
        "id": "anthropic/claude-haiku-4.5",
        "name": "Anthropic: Claude Haiku 4.5",
        "provider": "anthropic",
        "architecture": [
            "image"
        ],
        "context": 200000,
        "prompt_price": 1.0,
        "completion_price": 5.0,
        "description": "Claude Haiku 4.5 is Anthropic\u2019s fastest and most efficient model, delivering near-frontier intelligence at a fraction of the cost and latency of larger Claude models. Matching Claude Sonnet 4\u2019s performance..."
    },
    {
        "id": "anthropic/claude-opus-4",
        "name": "Anthropic: Claude Opus 4",
        "provider": "anthropic",
        "architecture": [
            "image"
        ],
        "context": 200000,
        "prompt_price": 15.0,
        "completion_price": 75.0,
        "description": "Claude Opus 4 is benchmarked as the world\u2019s best coding model, at time of release, bringing sustained performance on complex, long-running tasks and agent workflows. It sets new benchmarks in..."
    },
    {
        "id": "anthropic/claude-opus-4.1",
        "name": "Anthropic: Claude Opus 4.1",
        "provider": "anthropic",
        "architecture": [
            "image"
        ],
        "context": 200000,
        "prompt_price": 15.0,
        "completion_price": 75.0,
        "description": "Claude Opus 4.1 is an updated version of Anthropic\u2019s flagship model, offering improved performance in coding, reasoning, and agentic tasks. It achieves 74.5% on SWE-bench Verified and shows notable gains..."
    },
    {
        "id": "anthropic/claude-opus-4.5",
        "name": "Anthropic: Claude Opus 4.5",
        "provider": "anthropic",
        "architecture": [
            "image"
        ],
        "context": 200000,
        "prompt_price": 5.0,
        "completion_price": 25.0,
        "description": "Claude Opus 4.5 is Anthropic\u2019s frontier reasoning model optimized for complex software engineering, agentic workflows, and long-horizon computer use. It offers strong multimodal capabilities, competitive performance across real-world coding and..."
    },
    {
        "id": "anthropic/claude-opus-4.6",
        "name": "Anthropic: Claude Opus 4.6",
        "provider": "anthropic",
        "architecture": [
            "image"
        ],
        "context": 1000000,
        "prompt_price": 5.0,
        "completion_price": 25.0,
        "description": "Opus 4.6 is Anthropic\u2019s strongest model for coding and long-running professional tasks. It is built for agents that operate across entire workflows rather than single prompts, making it especially effective..."
    },
    {
        "id": "anthropic/claude-opus-4.6-fast",
        "name": "Anthropic: Claude Opus 4.6 (Fast)",
        "provider": "anthropic",
        "architecture": [
            "image"
        ],
        "context": 1000000,
        "prompt_price": 30.0,
        "completion_price": 150.0,
        "description": "Fast-mode variant of [Opus 4.6](/anthropic/claude-opus-4.6) - identical capabilities with higher output speed at premium 6x pricing.\n\nLearn more in Anthropic's docs: https://platform.claude.com/docs/en/build-with-claude/fast-mode"
    },
    {
        "id": "anthropic/claude-sonnet-4",
        "name": "Anthropic: Claude Sonnet 4",
        "provider": "anthropic",
        "architecture": [
            "image"
        ],
        "context": 1000000,
        "prompt_price": 3.0,
        "completion_price": 15.0,
        "description": "Claude Sonnet 4 significantly enhances the capabilities of its predecessor, Sonnet 3.7, excelling in both coding and reasoning tasks with improved precision and controllability. Achieving state-of-the-art performance on SWE-bench (72.7%),..."
    },
    {
        "id": "anthropic/claude-sonnet-4.5",
        "name": "Anthropic: Claude Sonnet 4.5",
        "provider": "anthropic",
        "architecture": [
            "image"
        ],
        "context": 1000000,
        "prompt_price": 3.0,
        "completion_price": 15.0,
        "description": "Claude Sonnet 4.5 is Anthropic\u2019s most advanced Sonnet model to date, optimized for real-world agents and coding workflows. It delivers state-of-the-art performance on coding benchmarks such as SWE-bench Verified, with..."
    },
    {
        "id": "anthropic/claude-sonnet-4.6",
        "name": "Anthropic: Claude Sonnet 4.6",
        "provider": "anthropic",
        "architecture": [
            "image"
        ],
        "context": 1000000,
        "prompt_price": 3.0,
        "completion_price": 15.0,
        "description": "Sonnet 4.6 is Anthropic's most capable Sonnet-class model yet, with frontier performance across coding, agents, and professional work. It excels at iterative development, complex codebase navigation, end-to-end project management with..."
    },
    {
        "id": "arcee-ai/coder-large",
        "name": "Arcee AI: Coder Large",
        "provider": "arcee-ai",
        "architecture": [
            "text"
        ],
        "context": 32768,
        "prompt_price": 0.5,
        "completion_price": 0.7999999999999999,
        "description": "Coder\u2011Large is a 32 B\u2011parameter offspring of Qwen 2.5\u2011Instruct that has been further trained on permissively\u2011licensed GitHub, CodeSearchNet and synthetic bug\u2011fix corpora. It supports a 32k context window, enabling multi\u2011file..."
    },
    {
        "id": "arcee-ai/maestro-reasoning",
        "name": "Arcee AI: Maestro Reasoning",
        "provider": "arcee-ai",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.8999999999999999,
        "completion_price": 3.3000000000000003,
        "description": "Maestro Reasoning is Arcee's flagship analysis model: a 32 B\u2011parameter derivative of Qwen 2.5\u201132 B tuned with DPO and chain\u2011of\u2011thought RL for step\u2011by\u2011step logic. Compared to the earlier 7 B..."
    },
    {
        "id": "arcee-ai/spotlight",
        "name": "Arcee AI: Spotlight",
        "provider": "arcee-ai",
        "architecture": [
            "image"
        ],
        "context": 131072,
        "prompt_price": 0.18,
        "completion_price": 0.18,
        "description": "Spotlight is a 7\u2011billion\u2011parameter vision\u2011language model derived from Qwen 2.5\u2011VL and fine\u2011tuned by Arcee AI for tight image\u2011text grounding tasks. It offers a 32 k\u2011token context window, enabling rich multimodal..."
    },
    {
        "id": "arcee-ai/trinity-large-preview:free",
        "name": "Arcee AI: Trinity Large Preview (free)",
        "provider": "arcee-ai",
        "architecture": [
            "text"
        ],
        "context": 131000,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "Trinity-Large-Preview is a frontier-scale open-weight language model from Arcee, built as a 400B-parameter sparse Mixture-of-Experts with 13B active parameters per token using 4-of-256 expert routing. It excels in creative writing,..."
    },
    {
        "id": "arcee-ai/trinity-large-thinking",
        "name": "Arcee AI: Trinity Large Thinking",
        "provider": "arcee-ai",
        "architecture": [
            "text"
        ],
        "context": 262144,
        "prompt_price": 0.22,
        "completion_price": 0.85,
        "description": "Trinity Large Thinking is a powerful open source reasoning model from the team at Arcee AI. It shows strong performance in PinchBench, agentic workloads, and reasoning tasks. Launch video: https://youtu.be/Gc82AXLa0Rg?si=4RLn6WBz33qT--B7"
    },
    {
        "id": "arcee-ai/trinity-mini",
        "name": "Arcee AI: Trinity Mini",
        "provider": "arcee-ai",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.045,
        "completion_price": 0.15,
        "description": "Trinity Mini is a 26B-parameter (3B active) sparse mixture-of-experts language model featuring 128 experts with 8 active per token. Engineered for efficient reasoning over long contexts (131k) with robust function..."
    },
    {
        "id": "arcee-ai/virtuoso-large",
        "name": "Arcee AI: Virtuoso Large",
        "provider": "arcee-ai",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.75,
        "completion_price": 1.2,
        "description": "Virtuoso\u2011Large is Arcee's top\u2011tier general\u2011purpose LLM at 72 B parameters, tuned to tackle cross\u2011domain reasoning, creative writing and enterprise QA. Unlike many 70 B peers, it retains the 128 k..."
    },
    {
        "id": "baidu/ernie-4.5-21b-a3b",
        "name": "Baidu: ERNIE 4.5 21B A3B",
        "provider": "baidu",
        "architecture": [
            "text"
        ],
        "context": 120000,
        "prompt_price": 0.07,
        "completion_price": 0.28,
        "description": "A sophisticated text-based Mixture-of-Experts (MoE) model featuring 21B total parameters with 3B activated per token, delivering exceptional multimodal understanding and generation through heterogeneous MoE structures and modality-isolated routing. Supporting an..."
    },
    {
        "id": "baidu/ernie-4.5-21b-a3b-thinking",
        "name": "Baidu: ERNIE 4.5 21B A3B Thinking",
        "provider": "baidu",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.07,
        "completion_price": 0.28,
        "description": "ERNIE-4.5-21B-A3B-Thinking is Baidu's upgraded lightweight MoE model, refined to boost reasoning depth and quality for top-tier performance in logical puzzles, math, science, coding, text generation, and expert-level academic benchmarks."
    },
    {
        "id": "baidu/ernie-4.5-300b-a47b",
        "name": "Baidu: ERNIE 4.5 300B A47B ",
        "provider": "baidu",
        "architecture": [
            "text"
        ],
        "context": 123000,
        "prompt_price": 0.28,
        "completion_price": 1.1,
        "description": "ERNIE-4.5-300B-A47B is a 300B parameter Mixture-of-Experts (MoE) language model developed by Baidu as part of the ERNIE 4.5 series. It activates 47B parameters per token and supports text generation in..."
    },
    {
        "id": "baidu/ernie-4.5-vl-28b-a3b",
        "name": "Baidu: ERNIE 4.5 VL 28B A3B",
        "provider": "baidu",
        "architecture": [
            "image"
        ],
        "context": 30000,
        "prompt_price": 0.14,
        "completion_price": 0.56,
        "description": "A powerful multimodal Mixture-of-Experts chat model featuring 28B total parameters with 3B activated per token, delivering exceptional text and vision understanding through its innovative heterogeneous MoE structure with modality-isolated routing...."
    },
    {
        "id": "baidu/ernie-4.5-vl-424b-a47b",
        "name": "Baidu: ERNIE 4.5 VL 424B A47B ",
        "provider": "baidu",
        "architecture": [
            "image"
        ],
        "context": 123000,
        "prompt_price": 0.42,
        "completion_price": 1.25,
        "description": "ERNIE-4.5-VL-424B-A47B is a multimodal Mixture-of-Experts (MoE) model from Baidu\u2019s ERNIE 4.5 series, featuring 424B total parameters with 47B active per token. It is trained jointly on text and image data..."
    },
    {
        "id": "bytedance/ui-tars-1.5-7b",
        "name": "ByteDance: UI-TARS 7B ",
        "provider": "bytedance",
        "architecture": [
            "image"
        ],
        "context": 128000,
        "prompt_price": 0.09999999999999999,
        "completion_price": 0.19999999999999998,
        "description": "UI-TARS-1.5 is a multimodal vision-language agent optimized for GUI-based environments, including desktop interfaces, web browsers, mobile systems, and games. Built by ByteDance, it builds upon the UI-TARS framework with reinforcement..."
    },
    {
        "id": "bytedance-seed/seed-1.6",
        "name": "ByteDance Seed: Seed 1.6",
        "provider": "bytedance-seed",
        "architecture": [
            "image",
            "video"
        ],
        "context": 262144,
        "prompt_price": 0.25,
        "completion_price": 2.0,
        "description": "Seed 1.6 is a general-purpose model released by the ByteDance Seed team. It incorporates multimodal capabilities and adaptive deep thinking with a 256K context window."
    },
    {
        "id": "bytedance-seed/seed-1.6-flash",
        "name": "ByteDance Seed: Seed 1.6 Flash",
        "provider": "bytedance-seed",
        "architecture": [
            "image",
            "video"
        ],
        "context": 262144,
        "prompt_price": 0.075,
        "completion_price": 0.3,
        "description": "Seed 1.6 Flash is an ultra-fast multimodal deep thinking model by ByteDance Seed, supporting both text and visual understanding. It features a 256k context window and can generate outputs of..."
    },
    {
        "id": "bytedance-seed/seed-2.0-lite",
        "name": "ByteDance Seed: Seed-2.0-Lite",
        "provider": "bytedance-seed",
        "architecture": [
            "image",
            "video"
        ],
        "context": 262144,
        "prompt_price": 0.25,
        "completion_price": 2.0,
        "description": "Seed-2.0-Lite is a versatile, cost\u2011efficient enterprise workhorse that delivers strong multimodal and agent capabilities while offering noticeably lower latency, making it a practical default choice for most production workloads across..."
    },
    {
        "id": "bytedance-seed/seed-2.0-mini",
        "name": "ByteDance Seed: Seed-2.0-Mini",
        "provider": "bytedance-seed",
        "architecture": [
            "image",
            "video"
        ],
        "context": 262144,
        "prompt_price": 0.09999999999999999,
        "completion_price": 0.39999999999999997,
        "description": "Seed-2.0-mini targets latency-sensitive, high-concurrency, and cost-sensitive scenarios, emphasizing fast response and flexible inference deployment. It delivers performance comparable to ByteDance-Seed-1.6, supports 256k context, four reasoning effort modes (minimal/low/medium/high), multimodal understanding,..."
    },
    {
        "id": "cognitivecomputations/dolphin-mistral-24b-venice-edition:free",
        "name": "Venice: Uncensored (free)",
        "provider": "cognitivecomputations",
        "architecture": [
            "text"
        ],
        "context": 32768,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "Venice Uncensored Dolphin Mistral 24B Venice Edition is a fine-tuned variant of Mistral-Small-24B-Instruct-2501, developed by dphn.ai in collaboration with Venice.ai. This model is designed as an \u201cuncensored\u201d instruct-tuned LLM, preserving..."
    },
    {
        "id": "cohere/command-a",
        "name": "Cohere: Command A",
        "provider": "cohere",
        "architecture": [
            "text"
        ],
        "context": 256000,
        "prompt_price": 2.5,
        "completion_price": 10.0,
        "description": "Command A is an open-weights 111B parameter model with a 256k context window focused on delivering great performance across agentic, multilingual, and coding use cases. Compared to other leading proprietary..."
    },
    {
        "id": "cohere/command-r-08-2024",
        "name": "Cohere: Command R (08-2024)",
        "provider": "cohere",
        "architecture": [
            "text"
        ],
        "context": 128000,
        "prompt_price": 0.15,
        "completion_price": 0.6,
        "description": "command-r-08-2024 is an update of the [Command R](/models/cohere/command-r) with improved performance for multilingual retrieval-augmented generation (RAG) and tool use. More broadly, it is better at math, code and reasoning and..."
    },
    {
        "id": "cohere/command-r-plus-08-2024",
        "name": "Cohere: Command R+ (08-2024)",
        "provider": "cohere",
        "architecture": [
            "text"
        ],
        "context": 128000,
        "prompt_price": 2.5,
        "completion_price": 10.0,
        "description": "command-r-plus-08-2024 is an update of the [Command R+](/models/cohere/command-r-plus) with roughly 50% higher throughput and 25% lower latencies as compared to the previous Command R+ version, while keeping the hardware footprint..."
    },
    {
        "id": "cohere/command-r7b-12-2024",
        "name": "Cohere: Command R7B (12-2024)",
        "provider": "cohere",
        "architecture": [
            "text"
        ],
        "context": 128000,
        "prompt_price": 0.0375,
        "completion_price": 0.15,
        "description": "Command R7B (12-2024) is a small, fast update of the Command R+ model, delivered in December 2024. It excels at RAG, tool use, agents, and similar tasks requiring complex reasoning..."
    },
    {
        "id": "deepcogito/cogito-v2.1-671b",
        "name": "Deep Cogito: Cogito v2.1 671B",
        "provider": "deepcogito",
        "architecture": [
            "text"
        ],
        "context": 128000,
        "prompt_price": 1.25,
        "completion_price": 1.25,
        "description": "Cogito v2.1 671B MoE represents one of the strongest open models globally, matching performance of frontier closed and open models. This model is trained using self play with reinforcement learning..."
    },
    {
        "id": "deepseek/deepseek-chat",
        "name": "DeepSeek: DeepSeek V3",
        "provider": "deepseek",
        "architecture": [
            "text"
        ],
        "context": 163840,
        "prompt_price": 0.32,
        "completion_price": 0.8899999999999999,
        "description": "DeepSeek-V3 is the latest model from the DeepSeek team, building upon the instruction following and coding abilities of the previous versions. Pre-trained on nearly 15 trillion tokens, the reported evaluations..."
    },
    {
        "id": "deepseek/deepseek-chat-v3-0324",
        "name": "DeepSeek: DeepSeek V3 0324",
        "provider": "deepseek",
        "architecture": [
            "text"
        ],
        "context": 163840,
        "prompt_price": 0.19999999999999998,
        "completion_price": 0.77,
        "description": "DeepSeek V3, a 685B-parameter, mixture-of-experts model, is the latest iteration of the flagship chat model family from the DeepSeek team. It succeeds the [DeepSeek V3](/deepseek/deepseek-chat-v3) model and performs really well..."
    },
    {
        "id": "deepseek/deepseek-chat-v3.1",
        "name": "DeepSeek: DeepSeek V3.1",
        "provider": "deepseek",
        "architecture": [
            "text"
        ],
        "context": 32768,
        "prompt_price": 0.15,
        "completion_price": 0.75,
        "description": "DeepSeek-V3.1 is a large hybrid reasoning model (671B parameters, 37B active) that supports both thinking and non-thinking modes via prompt templates. It extends the DeepSeek-V3 base with a two-phase long-context..."
    },
    {
        "id": "deepseek/deepseek-v3.1-terminus",
        "name": "DeepSeek: DeepSeek V3.1 Terminus",
        "provider": "deepseek",
        "architecture": [
            "text"
        ],
        "context": 163840,
        "prompt_price": 0.21,
        "completion_price": 0.7899999999999999,
        "description": "DeepSeek-V3.1 Terminus is an update to [DeepSeek V3.1](/deepseek/deepseek-chat-v3.1) that maintains the model's original capabilities while addressing issues reported by users, including language consistency and agent capabilities, further optimizing the model's..."
    },
    {
        "id": "deepseek/deepseek-v3.2",
        "name": "DeepSeek: DeepSeek V3.2",
        "provider": "deepseek",
        "architecture": [
            "text"
        ],
        "context": 163840,
        "prompt_price": 0.26,
        "completion_price": 0.38,
        "description": "DeepSeek-V3.2 is a large language model designed to harmonize high computational efficiency with strong reasoning and agentic tool-use performance. It introduces DeepSeek Sparse Attention (DSA), a fine-grained sparse attention mechanism..."
    },
    {
        "id": "deepseek/deepseek-v3.2-exp",
        "name": "DeepSeek: DeepSeek V3.2 Exp",
        "provider": "deepseek",
        "architecture": [
            "text"
        ],
        "context": 163840,
        "prompt_price": 0.27,
        "completion_price": 0.41,
        "description": "DeepSeek-V3.2-Exp is an experimental large language model released by DeepSeek as an intermediate step between V3.1 and future architectures. It introduces DeepSeek Sparse Attention (DSA), a fine-grained sparse attention mechanism..."
    },
    {
        "id": "deepseek/deepseek-v3.2-speciale",
        "name": "DeepSeek: DeepSeek V3.2 Speciale",
        "provider": "deepseek",
        "architecture": [
            "text"
        ],
        "context": 163840,
        "prompt_price": 0.39999999999999997,
        "completion_price": 1.2,
        "description": "DeepSeek-V3.2-Speciale is a high-compute variant of DeepSeek-V3.2 optimized for maximum reasoning and agentic performance. It builds on DeepSeek Sparse Attention (DSA) for efficient long-context processing, then scales post-training reinforcement learning..."
    },
    {
        "id": "deepseek/deepseek-r1",
        "name": "DeepSeek: R1",
        "provider": "deepseek",
        "architecture": [
            "text"
        ],
        "context": 64000,
        "prompt_price": 0.7,
        "completion_price": 2.5,
        "description": "DeepSeek R1 is here: Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active in an inference pass...."
    },
    {
        "id": "deepseek/deepseek-r1-0528",
        "name": "DeepSeek: R1 0528",
        "provider": "deepseek",
        "architecture": [
            "text"
        ],
        "context": 163840,
        "prompt_price": 0.5,
        "completion_price": 2.1500000000000004,
        "description": "May 28th update to the [original DeepSeek R1](/deepseek/deepseek-r1) Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active..."
    },
    {
        "id": "deepseek/deepseek-r1-distill-llama-70b",
        "name": "DeepSeek: R1 Distill Llama 70B",
        "provider": "deepseek",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.7,
        "completion_price": 0.7999999999999999,
        "description": "DeepSeek R1 Distill Llama 70B is a distilled large language model based on [Llama-3.3-70B-Instruct](/meta-llama/llama-3.3-70b-instruct), using outputs from [DeepSeek R1](/deepseek/deepseek-r1). The model combines advanced distillation techniques to achieve high performance across..."
    },
    {
        "id": "deepseek/deepseek-r1-distill-qwen-32b",
        "name": "DeepSeek: R1 Distill Qwen 32B",
        "provider": "deepseek",
        "architecture": [
            "text"
        ],
        "context": 32768,
        "prompt_price": 0.29,
        "completion_price": 0.29,
        "description": "DeepSeek R1 Distill Qwen 32B is a distilled large language model based on [Qwen 2.5 32B](https://huggingface.co/Qwen/Qwen2.5-32B), using outputs from [DeepSeek R1](/deepseek/deepseek-r1). It outperforms OpenAI's o1-mini across various benchmarks, achieving new..."
    },
    {
        "id": "essentialai/rnj-1-instruct",
        "name": "EssentialAI: Rnj 1 Instruct",
        "provider": "essentialai",
        "architecture": [
            "text"
        ],
        "context": 32768,
        "prompt_price": 0.15,
        "completion_price": 0.15,
        "description": "Rnj-1 is an 8B-parameter, dense, open-weight model family developed by Essential AI and trained from scratch with a focus on programming, math, and scientific reasoning. The model demonstrates strong performance..."
    },
    {
        "id": "google/gemini-2.0-flash-001",
        "name": "Google: Gemini 2.0 Flash",
        "provider": "google",
        "architecture": [
            "image",
            "audio",
            "video"
        ],
        "context": 1048576,
        "prompt_price": 0.09999999999999999,
        "completion_price": 0.39999999999999997,
        "description": "Gemini Flash 2.0 offers a significantly faster time to first token (TTFT) compared to [Gemini Flash 1.5](/google/gemini-flash-1.5), while maintaining quality on par with larger models like [Gemini Pro 1.5](/google/gemini-pro-1.5). It..."
    },
    {
        "id": "google/gemini-2.0-flash-lite-001",
        "name": "Google: Gemini 2.0 Flash Lite",
        "provider": "google",
        "architecture": [
            "image",
            "audio",
            "video"
        ],
        "context": 1048576,
        "prompt_price": 0.075,
        "completion_price": 0.3,
        "description": "Gemini 2.0 Flash Lite offers a significantly faster time to first token (TTFT) compared to [Gemini Flash 1.5](/google/gemini-flash-1.5), while maintaining quality on par with larger models like [Gemini Pro 1.5](/google/gemini-pro-1.5),..."
    },
    {
        "id": "google/gemini-2.5-flash",
        "name": "Google: Gemini 2.5 Flash",
        "provider": "google",
        "architecture": [
            "image",
            "audio",
            "video"
        ],
        "context": 1048576,
        "prompt_price": 0.3,
        "completion_price": 2.5,
        "description": "Gemini 2.5 Flash is Google's state-of-the-art workhorse model, specifically designed for advanced reasoning, coding, mathematics, and scientific tasks. It includes built-in \"thinking\" capabilities, enabling it to provide responses with greater..."
    },
    {
        "id": "google/gemini-2.5-flash-lite",
        "name": "Google: Gemini 2.5 Flash Lite",
        "provider": "google",
        "architecture": [
            "image",
            "audio",
            "video"
        ],
        "context": 1048576,
        "prompt_price": 0.09999999999999999,
        "completion_price": 0.39999999999999997,
        "description": "Gemini 2.5 Flash-Lite is a lightweight reasoning model in the Gemini 2.5 family, optimized for ultra-low latency and cost efficiency. It offers improved throughput, faster token generation, and better performance..."
    },
    {
        "id": "google/gemini-2.5-flash-lite-preview-09-2025",
        "name": "Google: Gemini 2.5 Flash Lite Preview 09-2025",
        "provider": "google",
        "architecture": [
            "image",
            "audio",
            "video"
        ],
        "context": 1048576,
        "prompt_price": 0.09999999999999999,
        "completion_price": 0.39999999999999997,
        "description": "Gemini 2.5 Flash-Lite is a lightweight reasoning model in the Gemini 2.5 family, optimized for ultra-low latency and cost efficiency. It offers improved throughput, faster token generation, and better performance..."
    },
    {
        "id": "google/gemini-2.5-pro",
        "name": "Google: Gemini 2.5 Pro",
        "provider": "google",
        "architecture": [
            "image",
            "audio",
            "video"
        ],
        "context": 1048576,
        "prompt_price": 1.25,
        "completion_price": 10.0,
        "description": "Gemini 2.5 Pro is Google\u2019s state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks. It employs \u201cthinking\u201d capabilities, enabling it to reason through responses with enhanced accuracy..."
    },
    {
        "id": "google/gemini-2.5-pro-preview-05-06",
        "name": "Google: Gemini 2.5 Pro Preview 05-06",
        "provider": "google",
        "architecture": [
            "image",
            "audio",
            "video"
        ],
        "context": 1048576,
        "prompt_price": 1.25,
        "completion_price": 10.0,
        "description": "Gemini 2.5 Pro is Google\u2019s state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks. It employs \u201cthinking\u201d capabilities, enabling it to reason through responses with enhanced accuracy..."
    },
    {
        "id": "google/gemini-2.5-pro-preview",
        "name": "Google: Gemini 2.5 Pro Preview 06-05",
        "provider": "google",
        "architecture": [
            "image",
            "audio"
        ],
        "context": 1048576,
        "prompt_price": 1.25,
        "completion_price": 10.0,
        "description": "Gemini 2.5 Pro is Google\u2019s state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks. It employs \u201cthinking\u201d capabilities, enabling it to reason through responses with enhanced accuracy..."
    },
    {
        "id": "google/gemini-3-flash-preview",
        "name": "Google: Gemini 3 Flash Preview",
        "provider": "google",
        "architecture": [
            "image",
            "audio",
            "video"
        ],
        "context": 1048576,
        "prompt_price": 0.5,
        "completion_price": 3.0,
        "description": "Gemini 3 Flash Preview is a high speed, high value thinking model designed for agentic workflows, multi turn chat, and coding assistance. It delivers near Pro level reasoning and tool..."
    },
    {
        "id": "google/gemini-3.1-flash-lite-preview",
        "name": "Google: Gemini 3.1 Flash Lite Preview",
        "provider": "google",
        "architecture": [
            "image",
            "audio",
            "video"
        ],
        "context": 1048576,
        "prompt_price": 0.25,
        "completion_price": 1.5,
        "description": "Gemini 3.1 Flash Lite Preview is Google's high-efficiency model optimized for high-volume use cases. It outperforms Gemini 2.5 Flash Lite on overall quality and approaches Gemini 2.5 Flash performance across..."
    },
    {
        "id": "google/gemini-3.1-pro-preview",
        "name": "Google: Gemini 3.1 Pro Preview",
        "provider": "google",
        "architecture": [
            "image",
            "audio",
            "video"
        ],
        "context": 1048576,
        "prompt_price": 2.0,
        "completion_price": 12.0,
        "description": "Gemini 3.1 Pro Preview is Google\u2019s frontier reasoning model, delivering enhanced software engineering performance, improved agentic reliability, and more efficient token usage across complex workflows. Building on the multimodal foundation..."
    },
    {
        "id": "google/gemini-3.1-pro-preview-customtools",
        "name": "Google: Gemini 3.1 Pro Preview Custom Tools",
        "provider": "google",
        "architecture": [
            "image",
            "audio",
            "video"
        ],
        "context": 1048576,
        "prompt_price": 2.0,
        "completion_price": 12.0,
        "description": "Gemini 3.1 Pro Preview Custom Tools is a variant of Gemini 3.1 Pro that improves tool selection behavior by preventing overuse of a general bash tool when more efficient third-party..."
    },
    {
        "id": "google/gemma-2-27b-it",
        "name": "Google: Gemma 2 27B",
        "provider": "google",
        "architecture": [
            "text"
        ],
        "context": 8192,
        "prompt_price": 0.65,
        "completion_price": 0.65,
        "description": "Gemma 2 27B by Google is an open model built from the same research and technology used to create the [Gemini models](/models?q=gemini). Gemma models are well-suited for a variety of..."
    },
    {
        "id": "google/gemma-3-12b-it",
        "name": "Google: Gemma 3 12B",
        "provider": "google",
        "architecture": [
            "image"
        ],
        "context": 131072,
        "prompt_price": 0.04,
        "completion_price": 0.13,
        "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities,..."
    },
    {
        "id": "google/gemma-3-12b-it:free",
        "name": "Google: Gemma 3 12B (free)",
        "provider": "google",
        "architecture": [
            "image"
        ],
        "context": 32768,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities,..."
    },
    {
        "id": "google/gemma-3-27b-it",
        "name": "Google: Gemma 3 27B",
        "provider": "google",
        "architecture": [
            "image"
        ],
        "context": 131072,
        "prompt_price": 0.08,
        "completion_price": 0.16,
        "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities,..."
    },
    {
        "id": "google/gemma-3-27b-it:free",
        "name": "Google: Gemma 3 27B (free)",
        "provider": "google",
        "architecture": [
            "image"
        ],
        "context": 131072,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities,..."
    },
    {
        "id": "google/gemma-3-4b-it",
        "name": "Google: Gemma 3 4B",
        "provider": "google",
        "architecture": [
            "image"
        ],
        "context": 131072,
        "prompt_price": 0.04,
        "completion_price": 0.08,
        "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities,..."
    },
    {
        "id": "google/gemma-3-4b-it:free",
        "name": "Google: Gemma 3 4B (free)",
        "provider": "google",
        "architecture": [
            "image"
        ],
        "context": 32768,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities,..."
    },
    {
        "id": "google/gemma-3n-e2b-it:free",
        "name": "Google: Gemma 3n 2B (free)",
        "provider": "google",
        "architecture": [
            "text"
        ],
        "context": 8192,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "Gemma 3n E2B IT is a multimodal, instruction-tuned model developed by Google DeepMind, designed to operate efficiently at an effective parameter size of 2B while leveraging a 6B architecture. Based..."
    },
    {
        "id": "google/gemma-3n-e4b-it",
        "name": "Google: Gemma 3n 4B",
        "provider": "google",
        "architecture": [
            "text"
        ],
        "context": 32768,
        "prompt_price": 0.02,
        "completion_price": 0.04,
        "description": "Gemma 3n E4B-it is optimized for efficient execution on mobile and low-resource devices, such as phones, laptops, and tablets. It supports multimodal inputs\u2014including text, visual data, and audio\u2014enabling diverse tasks..."
    },
    {
        "id": "google/gemma-3n-e4b-it:free",
        "name": "Google: Gemma 3n 4B (free)",
        "provider": "google",
        "architecture": [
            "text"
        ],
        "context": 8192,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "Gemma 3n E4B-it is optimized for efficient execution on mobile and low-resource devices, such as phones, laptops, and tablets. It supports multimodal inputs\u2014including text, visual data, and audio\u2014enabling diverse tasks..."
    },
    {
        "id": "google/gemma-4-26b-a4b-it",
        "name": "Google: Gemma 4 26B A4B ",
        "provider": "google",
        "architecture": [
            "image",
            "video"
        ],
        "context": 262144,
        "prompt_price": 0.08,
        "completion_price": 0.35,
        "description": "Gemma 4 26B A4B IT is an instruction-tuned Mixture-of-Experts (MoE) model from Google DeepMind. Despite 25.2B total parameters, only 3.8B activate per token during inference \u2014 delivering near-31B quality at..."
    },
    {
        "id": "google/gemma-4-26b-a4b-it:free",
        "name": "Google: Gemma 4 26B A4B  (free)",
        "provider": "google",
        "architecture": [
            "image",
            "video"
        ],
        "context": 262144,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "Gemma 4 26B A4B IT is an instruction-tuned Mixture-of-Experts (MoE) model from Google DeepMind. Despite 25.2B total parameters, only 3.8B activate per token during inference \u2014 delivering near-31B quality at..."
    },
    {
        "id": "google/gemma-4-31b-it",
        "name": "Google: Gemma 4 31B",
        "provider": "google",
        "architecture": [
            "image",
            "video"
        ],
        "context": 262144,
        "prompt_price": 0.13,
        "completion_price": 0.38,
        "description": "Gemma 4 31B Instruct is Google DeepMind's 30.7B dense multimodal model supporting text and image input with text output. Features a 256K token context window, configurable thinking/reasoning mode, native function..."
    },
    {
        "id": "google/gemma-4-31b-it:free",
        "name": "Google: Gemma 4 31B (free)",
        "provider": "google",
        "architecture": [
            "image",
            "video"
        ],
        "context": 262144,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "Gemma 4 31B Instruct is Google DeepMind's 30.7B dense multimodal model supporting text and image input with text output. Features a 256K token context window, configurable thinking/reasoning mode, native function..."
    },
    {
        "id": "google/lyria-3-clip-preview",
        "name": "Google: Lyria 3 Clip Preview",
        "provider": "google",
        "architecture": [
            "image",
            "audio"
        ],
        "context": 1048576,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "30 second duration clips are priced at $0.04 per clip. Lyria 3 is Google's family of music generation models, available through the Gemini API. With Lyria 3, you can generate..."
    },
    {
        "id": "google/lyria-3-pro-preview",
        "name": "Google: Lyria 3 Pro Preview",
        "provider": "google",
        "architecture": [
            "image",
            "audio"
        ],
        "context": 1048576,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "Full-length songs are priced at $0.08 per song. Lyria 3 is Google's family of music generation models, available through the Gemini API. With Lyria 3, you can generate high-quality, 48kHz..."
    },
    {
        "id": "google/gemini-2.5-flash-image",
        "name": "Google: Nano Banana (Gemini 2.5 Flash Image)",
        "provider": "google",
        "architecture": [
            "image"
        ],
        "context": 32768,
        "prompt_price": 0.3,
        "completion_price": 2.5,
        "description": "Gemini 2.5 Flash Image, a.k.a. \"Nano Banana,\" is now generally available. It is a state of the art image generation model with contextual understanding. It is capable of image generation,..."
    },
    {
        "id": "google/gemini-3.1-flash-image-preview",
        "name": "Google: Nano Banana 2 (Gemini 3.1 Flash Image Preview)",
        "provider": "google",
        "architecture": [
            "image"
        ],
        "context": 65536,
        "prompt_price": 0.5,
        "completion_price": 3.0,
        "description": "Gemini 3.1 Flash Image Preview, a.k.a. \"Nano Banana 2,\" is Google\u2019s latest state of the art image generation and editing model, delivering Pro-level visual quality at Flash speed. It combines..."
    },
    {
        "id": "google/gemini-3-pro-image-preview",
        "name": "Google: Nano Banana Pro (Gemini 3 Pro Image Preview)",
        "provider": "google",
        "architecture": [
            "image"
        ],
        "context": 65536,
        "prompt_price": 2.0,
        "completion_price": 12.0,
        "description": "Nano Banana Pro is Google\u2019s most advanced image-generation and editing model, built on Gemini 3 Pro. It extends the original Nano Banana with significantly improved multimodal reasoning, real-world grounding, and..."
    },
    {
        "id": "gryphe/mythomax-l2-13b",
        "name": "MythoMax 13B",
        "provider": "gryphe",
        "architecture": [
            "text"
        ],
        "context": 4096,
        "prompt_price": 0.06,
        "completion_price": 0.06,
        "description": "One of the highest performing and most popular fine-tunes of Llama 2 13B, with rich descriptions and roleplay. #merge"
    },
    {
        "id": "ibm-granite/granite-4.0-h-micro",
        "name": "IBM: Granite 4.0 Micro",
        "provider": "ibm-granite",
        "architecture": [
            "text"
        ],
        "context": 131000,
        "prompt_price": 0.017,
        "completion_price": 0.11,
        "description": "Granite-4.0-H-Micro is a 3B parameter from the Granite 4 family of models. These models are the latest in a series of models released by IBM. They are fine-tuned for long..."
    },
    {
        "id": "inception/mercury",
        "name": "Inception: Mercury",
        "provider": "inception",
        "architecture": [
            "text"
        ],
        "context": 128000,
        "prompt_price": 0.25,
        "completion_price": 0.75,
        "description": "Mercury is the first diffusion large language model (dLLM). Applying a breakthrough discrete diffusion approach, the model runs 5-10x faster than even speed optimized models like GPT-4.1 Nano and Claude..."
    },
    {
        "id": "inception/mercury-2",
        "name": "Inception: Mercury 2",
        "provider": "inception",
        "architecture": [
            "text"
        ],
        "context": 128000,
        "prompt_price": 0.25,
        "completion_price": 0.75,
        "description": "Mercury 2 is an extremely fast reasoning LLM, and the first reasoning diffusion LLM (dLLM). Instead of generating tokens sequentially, Mercury 2 produces and refines multiple tokens in parallel, achieving..."
    },
    {
        "id": "inception/mercury-coder",
        "name": "Inception: Mercury Coder",
        "provider": "inception",
        "architecture": [
            "text"
        ],
        "context": 128000,
        "prompt_price": 0.25,
        "completion_price": 0.75,
        "description": "Mercury Coder is the first diffusion large language model (dLLM). Applying a breakthrough discrete diffusion approach, the model runs 5-10x faster than even speed optimized models like Claude 3.5 Haiku..."
    },
    {
        "id": "inflection/inflection-3-pi",
        "name": "Inflection: Inflection 3 Pi",
        "provider": "inflection",
        "architecture": [
            "text"
        ],
        "context": 8000,
        "prompt_price": 2.5,
        "completion_price": 10.0,
        "description": "Inflection 3 Pi powers Inflection's [Pi](https://pi.ai) chatbot, including backstory, emotional intelligence, productivity, and safety. It has access to recent news, and excels in scenarios like customer support and roleplay. Pi..."
    },
    {
        "id": "inflection/inflection-3-productivity",
        "name": "Inflection: Inflection 3 Productivity",
        "provider": "inflection",
        "architecture": [
            "text"
        ],
        "context": 8000,
        "prompt_price": 2.5,
        "completion_price": 10.0,
        "description": "Inflection 3 Productivity is optimized for following instructions. It is better for tasks requiring JSON output or precise adherence to provided guidelines. It has access to recent news. For emotional..."
    },
    {
        "id": "kwaipilot/kat-coder-pro-v2",
        "name": "Kwaipilot: KAT-Coder-Pro V2",
        "provider": "kwaipilot",
        "architecture": [
            "text"
        ],
        "context": 256000,
        "prompt_price": 0.3,
        "completion_price": 1.2,
        "description": "KAT-Coder-Pro V2 is the latest high-performance model in KwaiKAT\u2019s KAT-Coder series, designed for complex enterprise-grade software engineering and SaaS integration. It builds on the agentic coding strengths of earlier versions,..."
    },
    {
        "id": "liquid/lfm-2-24b-a2b",
        "name": "LiquidAI: LFM2-24B-A2B",
        "provider": "liquid",
        "architecture": [
            "text"
        ],
        "context": 32768,
        "prompt_price": 0.03,
        "completion_price": 0.12,
        "description": "LFM2-24B-A2B is the largest model in the LFM2 family of hybrid architectures designed for efficient on-device deployment. Built as a 24B parameter Mixture-of-Experts model with only 2B active parameters per..."
    },
    {
        "id": "liquid/lfm-2.5-1.2b-instruct:free",
        "name": "LiquidAI: LFM2.5-1.2B-Instruct (free)",
        "provider": "liquid",
        "architecture": [
            "text"
        ],
        "context": 32768,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "LFM2.5-1.2B-Instruct is a compact, high-performance instruction-tuned model built for fast on-device AI. It delivers strong chat quality in a 1.2B parameter footprint, with efficient edge inference and broad runtime support."
    },
    {
        "id": "liquid/lfm-2.5-1.2b-thinking:free",
        "name": "LiquidAI: LFM2.5-1.2B-Thinking (free)",
        "provider": "liquid",
        "architecture": [
            "text"
        ],
        "context": 32768,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "LFM2.5-1.2B-Thinking is a lightweight reasoning-focused model optimized for agentic tasks, data extraction, and RAG\u2014while still running comfortably on edge devices. It supports long context (up to 32K tokens) and is..."
    },
    {
        "id": "mancer/weaver",
        "name": "Mancer: Weaver (alpha)",
        "provider": "mancer",
        "architecture": [
            "text"
        ],
        "context": 8000,
        "prompt_price": 0.75,
        "completion_price": 1.0,
        "description": "An attempt to recreate Claude-style verbosity, but don't expect the same level of coherence or memory. Meant for use in roleplay/narrative situations."
    },
    {
        "id": "meta-llama/llama-guard-3-8b",
        "name": "Llama Guard 3 8B",
        "provider": "meta-llama",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.48,
        "completion_price": 0.03,
        "description": "Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification. Similar to previous versions, it can be used to classify content in both LLM inputs (prompt classification)..."
    },
    {
        "id": "meta-llama/llama-3-70b-instruct",
        "name": "Meta: Llama 3 70B Instruct",
        "provider": "meta-llama",
        "architecture": [
            "text"
        ],
        "context": 8192,
        "prompt_price": 0.51,
        "completion_price": 0.74,
        "description": "Meta's latest class of model (Llama 3) launched with a variety of sizes & flavors. This 70B instruct-tuned version was optimized for high quality dialogue usecases. It has demonstrated strong..."
    },
    {
        "id": "meta-llama/llama-3-8b-instruct",
        "name": "Meta: Llama 3 8B Instruct",
        "provider": "meta-llama",
        "architecture": [
            "text"
        ],
        "context": 8192,
        "prompt_price": 0.03,
        "completion_price": 0.04,
        "description": "Meta's latest class of model (Llama 3) launched with a variety of sizes & flavors. This 8B instruct-tuned version was optimized for high quality dialogue usecases. It has demonstrated strong..."
    },
    {
        "id": "meta-llama/llama-3.1-70b-instruct",
        "name": "Meta: Llama 3.1 70B Instruct",
        "provider": "meta-llama",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.39999999999999997,
        "completion_price": 0.39999999999999997,
        "description": "Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This 70B instruct-tuned version is optimized for high quality dialogue usecases. It has demonstrated strong..."
    },
    {
        "id": "meta-llama/llama-3.1-8b-instruct",
        "name": "Meta: Llama 3.1 8B Instruct",
        "provider": "meta-llama",
        "architecture": [
            "text"
        ],
        "context": 16384,
        "prompt_price": 0.02,
        "completion_price": 0.049999999999999996,
        "description": "Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This 8B instruct-tuned version is fast and efficient. It has demonstrated strong performance compared to..."
    },
    {
        "id": "meta-llama/llama-3.2-11b-vision-instruct",
        "name": "Meta: Llama 3.2 11B Vision Instruct",
        "provider": "meta-llama",
        "architecture": [
            "image"
        ],
        "context": 131072,
        "prompt_price": 0.245,
        "completion_price": 0.245,
        "description": "Llama 3.2 11B Vision is a multimodal model with 11 billion parameters, designed to handle tasks combining visual and textual data. It excels in tasks such as image captioning and..."
    },
    {
        "id": "meta-llama/llama-3.2-1b-instruct",
        "name": "Meta: Llama 3.2 1B Instruct",
        "provider": "meta-llama",
        "architecture": [
            "text"
        ],
        "context": 60000,
        "prompt_price": 0.027,
        "completion_price": 0.19999999999999998,
        "description": "Llama 3.2 1B is a 1-billion-parameter language model focused on efficiently performing natural language tasks, such as summarization, dialogue, and multilingual text analysis. Its smaller size allows it to operate..."
    },
    {
        "id": "meta-llama/llama-3.2-3b-instruct",
        "name": "Meta: Llama 3.2 3B Instruct",
        "provider": "meta-llama",
        "architecture": [
            "text"
        ],
        "context": 80000,
        "prompt_price": 0.051,
        "completion_price": 0.33999999999999997,
        "description": "Llama 3.2 3B is a 3-billion-parameter multilingual large language model, optimized for advanced natural language processing tasks like dialogue generation, reasoning, and summarization. Designed with the latest transformer architecture, it..."
    },
    {
        "id": "meta-llama/llama-3.2-3b-instruct:free",
        "name": "Meta: Llama 3.2 3B Instruct (free)",
        "provider": "meta-llama",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "Llama 3.2 3B is a 3-billion-parameter multilingual large language model, optimized for advanced natural language processing tasks like dialogue generation, reasoning, and summarization. Designed with the latest transformer architecture, it..."
    },
    {
        "id": "meta-llama/llama-3.3-70b-instruct",
        "name": "Meta: Llama 3.3 70B Instruct",
        "provider": "meta-llama",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.09999999999999999,
        "completion_price": 0.32,
        "description": "The Meta Llama 3.3 multilingual large language model (LLM) is a pretrained and instruction tuned generative model in 70B (text in/text out). The Llama 3.3 instruction tuned text only model..."
    },
    {
        "id": "meta-llama/llama-3.3-70b-instruct:free",
        "name": "Meta: Llama 3.3 70B Instruct (free)",
        "provider": "meta-llama",
        "architecture": [
            "text"
        ],
        "context": 65536,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "The Meta Llama 3.3 multilingual large language model (LLM) is a pretrained and instruction tuned generative model in 70B (text in/text out). The Llama 3.3 instruction tuned text only model..."
    },
    {
        "id": "meta-llama/llama-4-maverick",
        "name": "Meta: Llama 4 Maverick",
        "provider": "meta-llama",
        "architecture": [
            "image"
        ],
        "context": 1048576,
        "prompt_price": 0.15,
        "completion_price": 0.6,
        "description": "Llama 4 Maverick 17B Instruct (128E) is a high-capacity multimodal language model from Meta, built on a mixture-of-experts (MoE) architecture with 128 experts and 17 billion active parameters per forward..."
    },
    {
        "id": "meta-llama/llama-4-scout",
        "name": "Meta: Llama 4 Scout",
        "provider": "meta-llama",
        "architecture": [
            "image"
        ],
        "context": 327680,
        "prompt_price": 0.08,
        "completion_price": 0.3,
        "description": "Llama 4 Scout 17B Instruct (16E) is a mixture-of-experts (MoE) language model developed by Meta, activating 17 billion parameters out of a total of 109B. It supports native multimodal input..."
    },
    {
        "id": "meta-llama/llama-guard-4-12b",
        "name": "Meta: Llama Guard 4 12B",
        "provider": "meta-llama",
        "architecture": [
            "image"
        ],
        "context": 163840,
        "prompt_price": 0.18,
        "completion_price": 0.18,
        "description": "Llama Guard 4 is a Llama 4 Scout-derived multimodal pretrained model, fine-tuned for content safety classification. Similar to previous versions, it can be used to classify content in both LLM..."
    },
    {
        "id": "microsoft/phi-4",
        "name": "Microsoft: Phi 4",
        "provider": "microsoft",
        "architecture": [
            "text"
        ],
        "context": 16384,
        "prompt_price": 0.065,
        "completion_price": 0.14,
        "description": "[Microsoft Research](/microsoft) Phi-4 is designed to perform well in complex reasoning tasks and can operate efficiently in situations with limited memory or where quick responses are needed. At 14 billion..."
    },
    {
        "id": "microsoft/wizardlm-2-8x22b",
        "name": "WizardLM-2 8x22B",
        "provider": "microsoft",
        "architecture": [
            "text"
        ],
        "context": 65535,
        "prompt_price": 0.62,
        "completion_price": 0.62,
        "description": "WizardLM-2 8x22B is Microsoft AI's most advanced Wizard model. It demonstrates highly competitive performance compared to leading proprietary models, and it consistently outperforms all existing state-of-the-art opensource models. It is..."
    },
    {
        "id": "minimax/minimax-m1",
        "name": "MiniMax: MiniMax M1",
        "provider": "minimax",
        "architecture": [
            "text"
        ],
        "context": 1000000,
        "prompt_price": 0.39999999999999997,
        "completion_price": 2.2,
        "description": "MiniMax-M1 is a large-scale, open-weight reasoning model designed for extended context and high-efficiency inference. It leverages a hybrid Mixture-of-Experts (MoE) architecture paired with a custom \"lightning attention\" mechanism, allowing it..."
    },
    {
        "id": "minimax/minimax-m2",
        "name": "MiniMax: MiniMax M2",
        "provider": "minimax",
        "architecture": [
            "text"
        ],
        "context": 196608,
        "prompt_price": 0.255,
        "completion_price": 1.0,
        "description": "MiniMax-M2 is a compact, high-efficiency large language model optimized for end-to-end coding and agentic workflows. With 10 billion activated parameters (230 billion total), it delivers near-frontier intelligence across general reasoning,..."
    },
    {
        "id": "minimax/minimax-m2-her",
        "name": "MiniMax: MiniMax M2-her",
        "provider": "minimax",
        "architecture": [
            "text"
        ],
        "context": 65536,
        "prompt_price": 0.3,
        "completion_price": 1.2,
        "description": "MiniMax M2-her is a dialogue-first large language model built for immersive roleplay, character-driven chat, and expressive multi-turn conversations. Designed to stay consistent in tone and personality, it supports rich message..."
    },
    {
        "id": "minimax/minimax-m2.1",
        "name": "MiniMax: MiniMax M2.1",
        "provider": "minimax",
        "architecture": [
            "text"
        ],
        "context": 196608,
        "prompt_price": 0.29,
        "completion_price": 0.95,
        "description": "MiniMax-M2.1 is a lightweight, state-of-the-art large language model optimized for coding, agentic workflows, and modern application development. With only 10 billion activated parameters, it delivers a major jump in real-world..."
    },
    {
        "id": "minimax/minimax-m2.5",
        "name": "MiniMax: MiniMax M2.5",
        "provider": "minimax",
        "architecture": [
            "text"
        ],
        "context": 196608,
        "prompt_price": 0.118,
        "completion_price": 0.9900000000000001,
        "description": "MiniMax-M2.5 is a SOTA large language model designed for real-world productivity. Trained in a diverse range of complex real-world digital working environments, M2.5 builds upon the coding expertise of M2.1..."
    },
    {
        "id": "minimax/minimax-m2.5:free",
        "name": "MiniMax: MiniMax M2.5 (free)",
        "provider": "minimax",
        "architecture": [
            "text"
        ],
        "context": 196608,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "MiniMax-M2.5 is a SOTA large language model designed for real-world productivity. Trained in a diverse range of complex real-world digital working environments, M2.5 builds upon the coding expertise of M2.1..."
    },
    {
        "id": "minimax/minimax-m2.7",
        "name": "MiniMax: MiniMax M2.7",
        "provider": "minimax",
        "architecture": [
            "text"
        ],
        "context": 196608,
        "prompt_price": 0.3,
        "completion_price": 1.2,
        "description": "MiniMax-M2.7 is a next-generation large language model designed for autonomous, real-world productivity and continuous improvement. Built to actively participate in its own evolution, M2.7 integrates advanced agentic capabilities through multi-agent..."
    },
    {
        "id": "minimax/minimax-01",
        "name": "MiniMax: MiniMax-01",
        "provider": "minimax",
        "architecture": [
            "image"
        ],
        "context": 1000192,
        "prompt_price": 0.19999999999999998,
        "completion_price": 1.1,
        "description": "MiniMax-01 is a combines MiniMax-Text-01 for text generation and MiniMax-VL-01 for image understanding. It has 456 billion parameters, with 45.9 billion parameters activated per inference, and can handle a context..."
    },
    {
        "id": "mistralai/mistral-large",
        "name": "Mistral Large",
        "provider": "mistralai",
        "architecture": [
            "text"
        ],
        "context": 128000,
        "prompt_price": 2.0,
        "completion_price": 6.0,
        "description": "This is Mistral AI's flagship model, Mistral Large 2 (version `mistral-large-2407`). It's a proprietary weights-available model and excels at reasoning, code, JSON, chat, and more. Read the launch announcement [here](https://mistral.ai/news/mistral-large-2407/)...."
    },
    {
        "id": "mistralai/mistral-large-2407",
        "name": "Mistral Large 2407",
        "provider": "mistralai",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 2.0,
        "completion_price": 6.0,
        "description": "This is Mistral AI's flagship model, Mistral Large 2 (version mistral-large-2407). It's a proprietary weights-available model and excels at reasoning, code, JSON, chat, and more. Read the launch announcement [here](https://mistral.ai/news/mistral-large-2407/)...."
    },
    {
        "id": "mistralai/mistral-large-2411",
        "name": "Mistral Large 2411",
        "provider": "mistralai",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 2.0,
        "completion_price": 6.0,
        "description": "Mistral Large 2 2411 is an update of [Mistral Large 2](/mistralai/mistral-large) released together with [Pixtral Large 2411](/mistralai/pixtral-large-2411) It provides a significant upgrade on the previous [Mistral Large 24.07](/mistralai/mistral-large-2407), with notable..."
    },
    {
        "id": "mistralai/codestral-2508",
        "name": "Mistral: Codestral 2508",
        "provider": "mistralai",
        "architecture": [
            "text"
        ],
        "context": 256000,
        "prompt_price": 0.3,
        "completion_price": 0.8999999999999999,
        "description": "Mistral's cutting-edge language model for coding released end of July 2025. Codestral specializes in low-latency, high-frequency tasks such as fill-in-the-middle (FIM), code correction and test generation.\n\n[Blog Post](https://mistral.ai/news/codestral-25-08)"
    },
    {
        "id": "mistralai/devstral-2512",
        "name": "Mistral: Devstral 2 2512",
        "provider": "mistralai",
        "architecture": [
            "text"
        ],
        "context": 262144,
        "prompt_price": 0.39999999999999997,
        "completion_price": 2.0,
        "description": "Devstral 2 is a state-of-the-art open-source model by Mistral AI specializing in agentic coding. It is a 123B-parameter dense transformer model supporting a 256K context window. Devstral 2 supports exploring..."
    },
    {
        "id": "mistralai/devstral-medium",
        "name": "Mistral: Devstral Medium",
        "provider": "mistralai",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.39999999999999997,
        "completion_price": 2.0,
        "description": "Devstral Medium is a high-performance code generation and agentic reasoning model developed jointly by Mistral AI and All Hands AI. Positioned as a step up from Devstral Small, it achieves..."
    },
    {
        "id": "mistralai/devstral-small",
        "name": "Mistral: Devstral Small 1.1",
        "provider": "mistralai",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.09999999999999999,
        "completion_price": 0.3,
        "description": "Devstral Small 1.1 is a 24B parameter open-weight language model for software engineering agents, developed by Mistral AI in collaboration with All Hands AI. Finetuned from Mistral Small 3.1 and..."
    },
    {
        "id": "mistralai/ministral-14b-2512",
        "name": "Mistral: Ministral 3 14B 2512",
        "provider": "mistralai",
        "architecture": [
            "image"
        ],
        "context": 262144,
        "prompt_price": 0.19999999999999998,
        "completion_price": 0.19999999999999998,
        "description": "The largest model in the Ministral 3 family, Ministral 3 14B offers frontier capabilities and performance comparable to its larger Mistral Small 3.2 24B counterpart. A powerful and efficient language..."
    },
    {
        "id": "mistralai/ministral-3b-2512",
        "name": "Mistral: Ministral 3 3B 2512",
        "provider": "mistralai",
        "architecture": [
            "image"
        ],
        "context": 131072,
        "prompt_price": 0.09999999999999999,
        "completion_price": 0.09999999999999999,
        "description": "The smallest model in the Ministral 3 family, Ministral 3 3B is a powerful, efficient tiny language model with vision capabilities."
    },
    {
        "id": "mistralai/ministral-8b-2512",
        "name": "Mistral: Ministral 3 8B 2512",
        "provider": "mistralai",
        "architecture": [
            "image"
        ],
        "context": 262144,
        "prompt_price": 0.15,
        "completion_price": 0.15,
        "description": "A balanced model in the Ministral 3 family, Ministral 3 8B is a powerful, efficient tiny language model with vision capabilities."
    },
    {
        "id": "mistralai/mistral-7b-instruct-v0.1",
        "name": "Mistral: Mistral 7B Instruct v0.1",
        "provider": "mistralai",
        "architecture": [
            "text"
        ],
        "context": 2824,
        "prompt_price": 0.11,
        "completion_price": 0.19,
        "description": "A 7.3B parameter model that outperforms Llama 2 13B on all benchmarks, with optimizations for speed and context length."
    },
    {
        "id": "mistralai/mistral-large-2512",
        "name": "Mistral: Mistral Large 3 2512",
        "provider": "mistralai",
        "architecture": [
            "image"
        ],
        "context": 262144,
        "prompt_price": 0.5,
        "completion_price": 1.5,
        "description": "Mistral Large 3 2512 is Mistral\u2019s most capable model to date, featuring a sparse mixture-of-experts architecture with 41B active parameters (675B total), and released under the Apache 2.0 license."
    },
    {
        "id": "mistralai/mistral-medium-3",
        "name": "Mistral: Mistral Medium 3",
        "provider": "mistralai",
        "architecture": [
            "image"
        ],
        "context": 131072,
        "prompt_price": 0.39999999999999997,
        "completion_price": 2.0,
        "description": "Mistral Medium 3 is a high-performance enterprise-grade language model designed to deliver frontier-level capabilities at significantly reduced operational cost. It balances state-of-the-art reasoning and multimodal performance with 8\u00d7 lower cost..."
    },
    {
        "id": "mistralai/mistral-medium-3.1",
        "name": "Mistral: Mistral Medium 3.1",
        "provider": "mistralai",
        "architecture": [
            "image"
        ],
        "context": 131072,
        "prompt_price": 0.39999999999999997,
        "completion_price": 2.0,
        "description": "Mistral Medium 3.1 is an updated version of Mistral Medium 3, which is a high-performance enterprise-grade language model designed to deliver frontier-level capabilities at significantly reduced operational cost. It balances..."
    },
    {
        "id": "mistralai/mistral-nemo",
        "name": "Mistral: Mistral Nemo",
        "provider": "mistralai",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.02,
        "completion_price": 0.04,
        "description": "A 12B parameter model with a 128k token context length built by Mistral in collaboration with NVIDIA. The model is multilingual, supporting English, French, German, Spanish, Italian, Portuguese, Chinese, Japanese,..."
    },
    {
        "id": "mistralai/mistral-small-24b-instruct-2501",
        "name": "Mistral: Mistral Small 3",
        "provider": "mistralai",
        "architecture": [
            "text"
        ],
        "context": 32768,
        "prompt_price": 0.049999999999999996,
        "completion_price": 0.08,
        "description": "Mistral Small 3 is a 24B-parameter language model optimized for low-latency performance across common AI tasks. Released under the Apache 2.0 license, it features both pre-trained and instruction-tuned versions designed..."
    },
    {
        "id": "mistralai/mistral-small-3.1-24b-instruct",
        "name": "Mistral: Mistral Small 3.1 24B",
        "provider": "mistralai",
        "architecture": [
            "image"
        ],
        "context": 128000,
        "prompt_price": 0.35,
        "completion_price": 0.56,
        "description": "Mistral Small 3.1 24B Instruct is an upgraded variant of Mistral Small 3 (2501), featuring 24 billion parameters with advanced multimodal capabilities. It provides state-of-the-art performance in text-based reasoning and..."
    },
    {
        "id": "mistralai/mistral-small-3.2-24b-instruct",
        "name": "Mistral: Mistral Small 3.2 24B",
        "provider": "mistralai",
        "architecture": [
            "image"
        ],
        "context": 128000,
        "prompt_price": 0.075,
        "completion_price": 0.19999999999999998,
        "description": "Mistral-Small-3.2-24B-Instruct-2506 is an updated 24B parameter model from Mistral optimized for instruction following, repetition reduction, and improved function calling. Compared to the 3.1 release, version 3.2 significantly improves accuracy on..."
    },
    {
        "id": "mistralai/mistral-small-2603",
        "name": "Mistral: Mistral Small 4",
        "provider": "mistralai",
        "architecture": [
            "image"
        ],
        "context": 262144,
        "prompt_price": 0.15,
        "completion_price": 0.6,
        "description": "Mistral Small 4 is the next major release in the Mistral Small family, unifying the capabilities of several flagship Mistral models into a single system. It combines strong reasoning from..."
    },
    {
        "id": "mistralai/mistral-small-creative",
        "name": "Mistral: Mistral Small Creative",
        "provider": "mistralai",
        "architecture": [
            "text"
        ],
        "context": 32768,
        "prompt_price": 0.09999999999999999,
        "completion_price": 0.3,
        "description": "Mistral Small Creative is an experimental small model designed for creative writing, narrative generation, roleplay and character-driven dialogue, general-purpose instruction following, and conversational agents."
    },
    {
        "id": "mistralai/mixtral-8x22b-instruct",
        "name": "Mistral: Mixtral 8x22B Instruct",
        "provider": "mistralai",
        "architecture": [
            "text"
        ],
        "context": 65536,
        "prompt_price": 2.0,
        "completion_price": 6.0,
        "description": "Mistral's official instruct fine-tuned version of [Mixtral 8x22B](/models/mistralai/mixtral-8x22b). It uses 39B active parameters out of 141B, offering unparalleled cost efficiency for its size. Its strengths include: - strong math, coding,..."
    },
    {
        "id": "mistralai/mixtral-8x7b-instruct",
        "name": "Mistral: Mixtral 8x7B Instruct",
        "provider": "mistralai",
        "architecture": [
            "text"
        ],
        "context": 32768,
        "prompt_price": 0.54,
        "completion_price": 0.54,
        "description": "Mixtral 8x7B Instruct is a pretrained generative Sparse Mixture of Experts, by Mistral AI, for chat and instruction use. Incorporates 8 experts (feed-forward networks) for a total of 47 billion..."
    },
    {
        "id": "mistralai/pixtral-large-2411",
        "name": "Mistral: Pixtral Large 2411",
        "provider": "mistralai",
        "architecture": [
            "image"
        ],
        "context": 131072,
        "prompt_price": 2.0,
        "completion_price": 6.0,
        "description": "Pixtral Large is a 124B parameter, open-weight, multimodal model built on top of [Mistral Large 2](/mistralai/mistral-large-2411). The model is able to understand documents, charts and natural images. The model is..."
    },
    {
        "id": "mistralai/mistral-saba",
        "name": "Mistral: Saba",
        "provider": "mistralai",
        "architecture": [
            "text"
        ],
        "context": 32768,
        "prompt_price": 0.19999999999999998,
        "completion_price": 0.6,
        "description": "Mistral Saba is a 24B-parameter language model specifically designed for the Middle East and South Asia, delivering accurate and contextually relevant responses while maintaining efficient performance. Trained on curated regional..."
    },
    {
        "id": "mistralai/voxtral-small-24b-2507",
        "name": "Mistral: Voxtral Small 24B 2507",
        "provider": "mistralai",
        "architecture": [
            "audio"
        ],
        "context": 32000,
        "prompt_price": 0.09999999999999999,
        "completion_price": 0.3,
        "description": "Voxtral Small is an enhancement of Mistral Small 3, incorporating state-of-the-art audio input capabilities while retaining best-in-class text performance. It excels at speech transcription, translation and audio understanding. Input audio..."
    },
    {
        "id": "moonshotai/kimi-k2",
        "name": "MoonshotAI: Kimi K2 0711",
        "provider": "moonshotai",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.5700000000000001,
        "completion_price": 2.3,
        "description": "Kimi K2 Instruct is a large-scale Mixture-of-Experts (MoE) language model developed by Moonshot AI, featuring 1 trillion total parameters with 32 billion active per forward pass. It is optimized for..."
    },
    {
        "id": "moonshotai/kimi-k2-0905",
        "name": "MoonshotAI: Kimi K2 0905",
        "provider": "moonshotai",
        "architecture": [
            "text"
        ],
        "context": 262144,
        "prompt_price": 0.39999999999999997,
        "completion_price": 2.0,
        "description": "Kimi K2 0905 is the September update of [Kimi K2 0711](moonshotai/kimi-k2). It is a large-scale Mixture-of-Experts (MoE) language model developed by Moonshot AI, featuring 1 trillion total parameters with 32..."
    },
    {
        "id": "moonshotai/kimi-k2-thinking",
        "name": "MoonshotAI: Kimi K2 Thinking",
        "provider": "moonshotai",
        "architecture": [
            "text"
        ],
        "context": 262144,
        "prompt_price": 0.6,
        "completion_price": 2.5,
        "description": "Kimi K2 Thinking is Moonshot AI\u2019s most advanced open reasoning model to date, extending the K2 series into agentic, long-horizon reasoning. Built on the trillion-parameter Mixture-of-Experts (MoE) architecture introduced in..."
    },
    {
        "id": "moonshotai/kimi-k2.5",
        "name": "MoonshotAI: Kimi K2.5",
        "provider": "moonshotai",
        "architecture": [
            "image"
        ],
        "context": 262144,
        "prompt_price": 0.3827,
        "completion_price": 1.72,
        "description": "Kimi K2.5 is Moonshot AI's native multimodal model, delivering state-of-the-art visual coding capability and a self-directed agent swarm paradigm. Built on Kimi K2 with continued pretraining over approximately 15T mixed..."
    },
    {
        "id": "morph/morph-v3-fast",
        "name": "Morph: Morph V3 Fast",
        "provider": "morph",
        "architecture": [
            "text"
        ],
        "context": 81920,
        "prompt_price": 0.7999999999999999,
        "completion_price": 1.2,
        "description": "Morph's fastest apply model for code edits. ~10,500 tokens/sec with 96% accuracy for rapid code transformations. The model requires the prompt to be in the following format: <instruction>{instruction}</instruction> <code>{initial_code}</code> <update>{edit_snippet}</update>..."
    },
    {
        "id": "morph/morph-v3-large",
        "name": "Morph: Morph V3 Large",
        "provider": "morph",
        "architecture": [
            "text"
        ],
        "context": 262144,
        "prompt_price": 0.8999999999999999,
        "completion_price": 1.9,
        "description": "Morph's high-accuracy apply model for complex code edits. ~4,500 tokens/sec with 98% accuracy for precise code transformations. The model requires the prompt to be in the following format: <instruction>{instruction}</instruction> <code>{initial_code}</code>..."
    },
    {
        "id": "nex-agi/deepseek-v3.1-nex-n1",
        "name": "Nex AGI: DeepSeek V3.1 Nex N1",
        "provider": "nex-agi",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.135,
        "completion_price": 0.5,
        "description": "DeepSeek V3.1 Nex-N1 is the flagship release of the Nex-N1 series \u2014 a post-trained model designed to highlight agent autonomy, tool use, and real-world productivity. Nex-N1 demonstrates competitive performance across..."
    },
    {
        "id": "nousresearch/hermes-3-llama-3.1-405b",
        "name": "Nous: Hermes 3 405B Instruct",
        "provider": "nousresearch",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 1.0,
        "completion_price": 1.0,
        "description": "Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the..."
    },
    {
        "id": "nousresearch/hermes-3-llama-3.1-405b:free",
        "name": "Nous: Hermes 3 405B Instruct (free)",
        "provider": "nousresearch",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the..."
    },
    {
        "id": "nousresearch/hermes-3-llama-3.1-70b",
        "name": "Nous: Hermes 3 70B Instruct",
        "provider": "nousresearch",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.3,
        "completion_price": 0.3,
        "description": "Hermes 3 is a generalist language model with many improvements over [Hermes 2](/models/nousresearch/nous-hermes-2-mistral-7b-dpo), including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the..."
    },
    {
        "id": "nousresearch/hermes-4-405b",
        "name": "Nous: Hermes 4 405B",
        "provider": "nousresearch",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 1.0,
        "completion_price": 3.0,
        "description": "Hermes 4 is a large-scale reasoning model built on Meta-Llama-3.1-405B and released by Nous Research. It introduces a hybrid reasoning mode, where the model can choose to deliberate internally with..."
    },
    {
        "id": "nousresearch/hermes-4-70b",
        "name": "Nous: Hermes 4 70B",
        "provider": "nousresearch",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.13,
        "completion_price": 0.39999999999999997,
        "description": "Hermes 4 70B is a hybrid reasoning model from Nous Research, built on Meta-Llama-3.1-70B. It introduces the same hybrid mode as the larger 405B release, allowing the model to either..."
    },
    {
        "id": "nousresearch/hermes-2-pro-llama-3-8b",
        "name": "NousResearch: Hermes 2 Pro - Llama-3 8B",
        "provider": "nousresearch",
        "architecture": [
            "text"
        ],
        "context": 8192,
        "prompt_price": 0.14,
        "completion_price": 0.14,
        "description": "Hermes 2 Pro is an upgraded, retrained version of Nous Hermes 2, consisting of an updated and cleaned version of the OpenHermes 2.5 Dataset, as well as a newly introduced..."
    },
    {
        "id": "nvidia/llama-3.1-nemotron-70b-instruct",
        "name": "NVIDIA: Llama 3.1 Nemotron 70B Instruct",
        "provider": "nvidia",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 1.2,
        "completion_price": 1.2,
        "description": "NVIDIA's Llama 3.1 Nemotron 70B is a language model designed for generating precise and useful responses. Leveraging [Llama 3.1 70B](/models/meta-llama/llama-3.1-70b-instruct) architecture and Reinforcement Learning from Human Feedback (RLHF), it excels..."
    },
    {
        "id": "nvidia/llama-3.3-nemotron-super-49b-v1.5",
        "name": "NVIDIA: Llama 3.3 Nemotron Super 49B V1.5",
        "provider": "nvidia",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.09999999999999999,
        "completion_price": 0.39999999999999997,
        "description": "Llama-3.3-Nemotron-Super-49B-v1.5 is a 49B-parameter, English-centric reasoning/chat model derived from Meta\u2019s Llama-3.3-70B-Instruct with a 128K context. It\u2019s post-trained for agentic workflows (RAG, tool calling) via SFT across math, code, science, and..."
    },
    {
        "id": "nvidia/nemotron-3-nano-30b-a3b",
        "name": "NVIDIA: Nemotron 3 Nano 30B A3B",
        "provider": "nvidia",
        "architecture": [
            "text"
        ],
        "context": 262144,
        "prompt_price": 0.049999999999999996,
        "completion_price": 0.19999999999999998,
        "description": "NVIDIA Nemotron 3 Nano 30B A3B is a small language MoE model with highest compute efficiency and accuracy for developers to build specialized agentic AI systems. The model is fully..."
    },
    {
        "id": "nvidia/nemotron-3-nano-30b-a3b:free",
        "name": "NVIDIA: Nemotron 3 Nano 30B A3B (free)",
        "provider": "nvidia",
        "architecture": [
            "text"
        ],
        "context": 256000,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "NVIDIA Nemotron 3 Nano 30B A3B is a small language MoE model with highest compute efficiency and accuracy for developers to build specialized agentic AI systems. The model is fully..."
    },
    {
        "id": "nvidia/nemotron-3-super-120b-a12b",
        "name": "NVIDIA: Nemotron 3 Super",
        "provider": "nvidia",
        "architecture": [
            "text"
        ],
        "context": 262144,
        "prompt_price": 0.09999999999999999,
        "completion_price": 0.5,
        "description": "NVIDIA Nemotron 3 Super is a 120B-parameter open hybrid MoE model, activating just 12B parameters for maximum compute efficiency and accuracy in complex multi-agent applications. Built on a hybrid Mamba-Transformer..."
    },
    {
        "id": "nvidia/nemotron-3-super-120b-a12b:free",
        "name": "NVIDIA: Nemotron 3 Super (free)",
        "provider": "nvidia",
        "architecture": [
            "text"
        ],
        "context": 262144,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "NVIDIA Nemotron 3 Super is a 120B-parameter open hybrid MoE model, activating just 12B parameters for maximum compute efficiency and accuracy in complex multi-agent applications. Built on a hybrid Mamba-Transformer..."
    },
    {
        "id": "nvidia/nemotron-nano-12b-v2-vl",
        "name": "NVIDIA: Nemotron Nano 12B 2 VL",
        "provider": "nvidia",
        "architecture": [
            "image",
            "video"
        ],
        "context": 131072,
        "prompt_price": 0.19999999999999998,
        "completion_price": 0.6,
        "description": "NVIDIA Nemotron Nano 2 VL is a 12-billion-parameter open multimodal reasoning model designed for video understanding and document intelligence. It introduces a hybrid Transformer-Mamba architecture, combining transformer-level accuracy with Mamba\u2019s..."
    },
    {
        "id": "nvidia/nemotron-nano-12b-v2-vl:free",
        "name": "NVIDIA: Nemotron Nano 12B 2 VL (free)",
        "provider": "nvidia",
        "architecture": [
            "image",
            "video"
        ],
        "context": 128000,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "NVIDIA Nemotron Nano 2 VL is a 12-billion-parameter open multimodal reasoning model designed for video understanding and document intelligence. It introduces a hybrid Transformer-Mamba architecture, combining transformer-level accuracy with Mamba\u2019s..."
    },
    {
        "id": "nvidia/nemotron-nano-9b-v2",
        "name": "NVIDIA: Nemotron Nano 9B V2",
        "provider": "nvidia",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.04,
        "completion_price": 0.16,
        "description": "NVIDIA-Nemotron-Nano-9B-v2 is a large language model (LLM) trained from scratch by NVIDIA, and designed as a unified model for both reasoning and non-reasoning tasks. It responds to user queries and..."
    },
    {
        "id": "nvidia/nemotron-nano-9b-v2:free",
        "name": "NVIDIA: Nemotron Nano 9B V2 (free)",
        "provider": "nvidia",
        "architecture": [
            "text"
        ],
        "context": 128000,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "NVIDIA-Nemotron-Nano-9B-v2 is a large language model (LLM) trained from scratch by NVIDIA, and designed as a unified model for both reasoning and non-reasoning tasks. It responds to user queries and..."
    },
    {
        "id": "openai/gpt-audio",
        "name": "OpenAI: GPT Audio",
        "provider": "openai",
        "architecture": [
            "audio"
        ],
        "context": 128000,
        "prompt_price": 2.5,
        "completion_price": 10.0,
        "description": "The gpt-audio model is OpenAI's first generally available audio model. The new snapshot features an upgraded decoder for more natural sounding voices and maintains better voice consistency. Audio is priced..."
    },
    {
        "id": "openai/gpt-audio-mini",
        "name": "OpenAI: GPT Audio Mini",
        "provider": "openai",
        "architecture": [
            "audio"
        ],
        "context": 128000,
        "prompt_price": 0.6,
        "completion_price": 2.4,
        "description": "A cost-efficient version of GPT Audio. The new snapshot features an upgraded decoder for more natural sounding voices and maintains better voice consistency. Input is priced at $0.60 per million..."
    },
    {
        "id": "openai/gpt-3.5-turbo",
        "name": "OpenAI: GPT-3.5 Turbo",
        "provider": "openai",
        "architecture": [
            "text"
        ],
        "context": 16385,
        "prompt_price": 0.5,
        "completion_price": 1.5,
        "description": "GPT-3.5 Turbo is OpenAI's fastest model. It can understand and generate natural language or code, and is optimized for chat and traditional completion tasks.\n\nTraining data up to Sep 2021."
    },
    {
        "id": "openai/gpt-3.5-turbo-0613",
        "name": "OpenAI: GPT-3.5 Turbo (older v0613)",
        "provider": "openai",
        "architecture": [
            "text"
        ],
        "context": 4095,
        "prompt_price": 1.0,
        "completion_price": 2.0,
        "description": "GPT-3.5 Turbo is OpenAI's fastest model. It can understand and generate natural language or code, and is optimized for chat and traditional completion tasks.\n\nTraining data up to Sep 2021."
    },
    {
        "id": "openai/gpt-3.5-turbo-16k",
        "name": "OpenAI: GPT-3.5 Turbo 16k",
        "provider": "openai",
        "architecture": [
            "text"
        ],
        "context": 16385,
        "prompt_price": 3.0,
        "completion_price": 4.0,
        "description": "This model offers four times the context length of gpt-3.5-turbo, allowing it to support approximately 20 pages of text in a single request at a higher cost. Training data: up..."
    },
    {
        "id": "openai/gpt-3.5-turbo-instruct",
        "name": "OpenAI: GPT-3.5 Turbo Instruct",
        "provider": "openai",
        "architecture": [
            "text"
        ],
        "context": 4095,
        "prompt_price": 1.5,
        "completion_price": 2.0,
        "description": "This model is a variant of GPT-3.5 Turbo tuned for instructional prompts and omitting chat-related optimizations. Training data: up to Sep 2021."
    },
    {
        "id": "openai/gpt-4",
        "name": "OpenAI: GPT-4",
        "provider": "openai",
        "architecture": [
            "text"
        ],
        "context": 8191,
        "prompt_price": 30.0,
        "completion_price": 60.0,
        "description": "OpenAI's flagship model, GPT-4 is a large-scale multimodal language model capable of solving difficult problems with greater accuracy than previous models due to its broader general knowledge and advanced reasoning..."
    },
    {
        "id": "openai/gpt-4-0314",
        "name": "OpenAI: GPT-4 (older v0314)",
        "provider": "openai",
        "architecture": [
            "text"
        ],
        "context": 8191,
        "prompt_price": 30.0,
        "completion_price": 60.0,
        "description": "GPT-4-0314 is the first version of GPT-4 released, with a context length of 8,192 tokens, and was supported until June 14. Training data: up to Sep 2021."
    },
    {
        "id": "openai/gpt-4-turbo",
        "name": "OpenAI: GPT-4 Turbo",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 128000,
        "prompt_price": 10.0,
        "completion_price": 30.0,
        "description": "The latest GPT-4 Turbo model with vision capabilities. Vision requests can now use JSON mode and function calling.\n\nTraining data: up to December 2023."
    },
    {
        "id": "openai/gpt-4-1106-preview",
        "name": "OpenAI: GPT-4 Turbo (older v1106)",
        "provider": "openai",
        "architecture": [
            "text"
        ],
        "context": 128000,
        "prompt_price": 10.0,
        "completion_price": 30.0,
        "description": "The latest GPT-4 Turbo model with vision capabilities. Vision requests can now use JSON mode and function calling.\n\nTraining data: up to April 2023."
    },
    {
        "id": "openai/gpt-4-turbo-preview",
        "name": "OpenAI: GPT-4 Turbo Preview",
        "provider": "openai",
        "architecture": [
            "text"
        ],
        "context": 128000,
        "prompt_price": 10.0,
        "completion_price": 30.0,
        "description": "The preview GPT-4 model with improved instruction following, JSON mode, reproducible outputs, parallel function calling, and more. Training data: up to Dec 2023. **Note:** heavily rate limited by OpenAI while..."
    },
    {
        "id": "openai/gpt-4.1",
        "name": "OpenAI: GPT-4.1",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 1047576,
        "prompt_price": 2.0,
        "completion_price": 8.0,
        "description": "GPT-4.1 is a flagship large language model optimized for advanced instruction following, real-world software engineering, and long-context reasoning. It supports a 1 million token context window and outperforms GPT-4o and..."
    },
    {
        "id": "openai/gpt-4.1-mini",
        "name": "OpenAI: GPT-4.1 Mini",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 1047576,
        "prompt_price": 0.39999999999999997,
        "completion_price": 1.5999999999999999,
        "description": "GPT-4.1 Mini is a mid-sized model delivering performance competitive with GPT-4o at substantially lower latency and cost. It retains a 1 million token context window and scores 45.1% on hard..."
    },
    {
        "id": "openai/gpt-4.1-nano",
        "name": "OpenAI: GPT-4.1 Nano",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 1047576,
        "prompt_price": 0.09999999999999999,
        "completion_price": 0.39999999999999997,
        "description": "For tasks that demand low latency, GPT\u20114.1 nano is the fastest and cheapest model in the GPT-4.1 series. It delivers exceptional performance at a small size with its 1 million..."
    },
    {
        "id": "openai/gpt-4o",
        "name": "OpenAI: GPT-4o",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 128000,
        "prompt_price": 2.5,
        "completion_price": 10.0,
        "description": "GPT-4o (\"o\" for \"omni\") is OpenAI's latest AI model, supporting both text and image inputs with text outputs. It maintains the intelligence level of [GPT-4 Turbo](/models/openai/gpt-4-turbo) while being twice as..."
    },
    {
        "id": "openai/gpt-4o-2024-05-13",
        "name": "OpenAI: GPT-4o (2024-05-13)",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 128000,
        "prompt_price": 5.0,
        "completion_price": 15.0,
        "description": "GPT-4o (\"o\" for \"omni\") is OpenAI's latest AI model, supporting both text and image inputs with text outputs. It maintains the intelligence level of [GPT-4 Turbo](/models/openai/gpt-4-turbo) while being twice as..."
    },
    {
        "id": "openai/gpt-4o-2024-08-06",
        "name": "OpenAI: GPT-4o (2024-08-06)",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 128000,
        "prompt_price": 2.5,
        "completion_price": 10.0,
        "description": "The 2024-08-06 version of GPT-4o offers improved performance in structured outputs, with the ability to supply a JSON schema in the respone_format. Read more [here](https://openai.com/index/introducing-structured-outputs-in-the-api/). GPT-4o (\"o\" for \"omni\") is..."
    },
    {
        "id": "openai/gpt-4o-2024-11-20",
        "name": "OpenAI: GPT-4o (2024-11-20)",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 128000,
        "prompt_price": 2.5,
        "completion_price": 10.0,
        "description": "The 2024-11-20 version of GPT-4o offers a leveled-up creative writing ability with more natural, engaging, and tailored writing to improve relevance & readability. It\u2019s also better at working with uploaded..."
    },
    {
        "id": "openai/gpt-4o:extended",
        "name": "OpenAI: GPT-4o (extended)",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 128000,
        "prompt_price": 6.0,
        "completion_price": 18.0,
        "description": "GPT-4o (\"o\" for \"omni\") is OpenAI's latest AI model, supporting both text and image inputs with text outputs. It maintains the intelligence level of [GPT-4 Turbo](/models/openai/gpt-4-turbo) while being twice as..."
    },
    {
        "id": "openai/gpt-4o-audio-preview",
        "name": "OpenAI: GPT-4o Audio",
        "provider": "openai",
        "architecture": [
            "audio"
        ],
        "context": 128000,
        "prompt_price": 2.5,
        "completion_price": 10.0,
        "description": "The gpt-4o-audio-preview model adds support for audio inputs as prompts. This enhancement allows the model to detect nuances within audio recordings and add depth to generated user experiences. Audio outputs..."
    },
    {
        "id": "openai/gpt-4o-search-preview",
        "name": "OpenAI: GPT-4o Search Preview",
        "provider": "openai",
        "architecture": [
            "text"
        ],
        "context": 128000,
        "prompt_price": 2.5,
        "completion_price": 10.0,
        "description": "GPT-4o Search Previewis a specialized model for web search in Chat Completions. It is trained to understand and execute web search queries."
    },
    {
        "id": "openai/gpt-4o-mini",
        "name": "OpenAI: GPT-4o-mini",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 128000,
        "prompt_price": 0.15,
        "completion_price": 0.6,
        "description": "GPT-4o mini is OpenAI's newest model after [GPT-4 Omni](/models/openai/gpt-4o), supporting both text and image inputs with text outputs. As their most advanced small model, it is many multiples more affordable..."
    },
    {
        "id": "openai/gpt-4o-mini-2024-07-18",
        "name": "OpenAI: GPT-4o-mini (2024-07-18)",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 128000,
        "prompt_price": 0.15,
        "completion_price": 0.6,
        "description": "GPT-4o mini is OpenAI's newest model after [GPT-4 Omni](/models/openai/gpt-4o), supporting both text and image inputs with text outputs. As their most advanced small model, it is many multiples more affordable..."
    },
    {
        "id": "openai/gpt-4o-mini-search-preview",
        "name": "OpenAI: GPT-4o-mini Search Preview",
        "provider": "openai",
        "architecture": [
            "text"
        ],
        "context": 128000,
        "prompt_price": 0.15,
        "completion_price": 0.6,
        "description": "GPT-4o mini Search Preview is a specialized model for web search in Chat Completions. It is trained to understand and execute web search queries."
    },
    {
        "id": "openai/gpt-5",
        "name": "OpenAI: GPT-5",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 400000,
        "prompt_price": 1.25,
        "completion_price": 10.0,
        "description": "GPT-5 is OpenAI\u2019s most advanced model, offering major improvements in reasoning, code quality, and user experience. It is optimized for complex tasks that require step-by-step reasoning, instruction following, and accuracy..."
    },
    {
        "id": "openai/gpt-5-chat",
        "name": "OpenAI: GPT-5 Chat",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 128000,
        "prompt_price": 1.25,
        "completion_price": 10.0,
        "description": "GPT-5 Chat is designed for advanced, natural, multimodal, and context-aware conversations for enterprise applications."
    },
    {
        "id": "openai/gpt-5-codex",
        "name": "OpenAI: GPT-5 Codex",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 400000,
        "prompt_price": 1.25,
        "completion_price": 10.0,
        "description": "GPT-5-Codex is a specialized version of GPT-5 optimized for software engineering and coding workflows. It is designed for both interactive development sessions and long, independent execution of complex engineering tasks...."
    },
    {
        "id": "openai/gpt-5-image",
        "name": "OpenAI: GPT-5 Image",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 400000,
        "prompt_price": 10.0,
        "completion_price": 10.0,
        "description": "[GPT-5](https://openrouter.ai/openai/gpt-5) Image combines OpenAI's GPT-5 model with state-of-the-art image generation capabilities. It offers major improvements in reasoning, code quality, and user experience while incorporating GPT Image 1's superior instruction following,..."
    },
    {
        "id": "openai/gpt-5-image-mini",
        "name": "OpenAI: GPT-5 Image Mini",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 400000,
        "prompt_price": 2.5,
        "completion_price": 2.0,
        "description": "GPT-5 Image Mini combines OpenAI's advanced language capabilities, powered by [GPT-5 Mini](https://openrouter.ai/openai/gpt-5-mini), with GPT Image 1 Mini for efficient image generation. This natively multimodal model features superior instruction following, text..."
    },
    {
        "id": "openai/gpt-5-mini",
        "name": "OpenAI: GPT-5 Mini",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 400000,
        "prompt_price": 0.25,
        "completion_price": 2.0,
        "description": "GPT-5 Mini is a compact version of GPT-5, designed to handle lighter-weight reasoning tasks. It provides the same instruction-following and safety-tuning benefits as GPT-5, but with reduced latency and cost...."
    },
    {
        "id": "openai/gpt-5-nano",
        "name": "OpenAI: GPT-5 Nano",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 400000,
        "prompt_price": 0.049999999999999996,
        "completion_price": 0.39999999999999997,
        "description": "GPT-5-Nano is the smallest and fastest variant in the GPT-5 system, optimized for developer tools, rapid interactions, and ultra-low latency environments. While limited in reasoning depth compared to its larger..."
    },
    {
        "id": "openai/gpt-5-pro",
        "name": "OpenAI: GPT-5 Pro",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 400000,
        "prompt_price": 15.0,
        "completion_price": 120.0,
        "description": "GPT-5 Pro is OpenAI\u2019s most advanced model, offering major improvements in reasoning, code quality, and user experience. It is optimized for complex tasks that require step-by-step reasoning, instruction following, and..."
    },
    {
        "id": "openai/gpt-5.1",
        "name": "OpenAI: GPT-5.1",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 400000,
        "prompt_price": 1.25,
        "completion_price": 10.0,
        "description": "GPT-5.1 is the latest frontier-grade model in the GPT-5 series, offering stronger general-purpose reasoning, improved instruction adherence, and a more natural conversational style compared to GPT-5. It uses adaptive reasoning..."
    },
    {
        "id": "openai/gpt-5.1-chat",
        "name": "OpenAI: GPT-5.1 Chat",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 128000,
        "prompt_price": 1.25,
        "completion_price": 10.0,
        "description": "GPT-5.1 Chat (AKA Instant is the fast, lightweight member of the 5.1 family, optimized for low-latency chat while retaining strong general intelligence. It uses adaptive reasoning to selectively \u201cthink\u201d on..."
    },
    {
        "id": "openai/gpt-5.1-codex",
        "name": "OpenAI: GPT-5.1-Codex",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 400000,
        "prompt_price": 1.25,
        "completion_price": 10.0,
        "description": "GPT-5.1-Codex is a specialized version of GPT-5.1 optimized for software engineering and coding workflows. It is designed for both interactive development sessions and long, independent execution of complex engineering tasks...."
    },
    {
        "id": "openai/gpt-5.1-codex-max",
        "name": "OpenAI: GPT-5.1-Codex-Max",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 400000,
        "prompt_price": 1.25,
        "completion_price": 10.0,
        "description": "GPT-5.1-Codex-Max is OpenAI\u2019s latest agentic coding model, designed for long-running, high-context software development tasks. It is based on an updated version of the 5.1 reasoning stack and trained on agentic..."
    },
    {
        "id": "openai/gpt-5.1-codex-mini",
        "name": "OpenAI: GPT-5.1-Codex-Mini",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 400000,
        "prompt_price": 0.25,
        "completion_price": 2.0,
        "description": "GPT-5.1-Codex-Mini is a smaller and faster version of GPT-5.1-Codex"
    },
    {
        "id": "openai/gpt-5.2",
        "name": "OpenAI: GPT-5.2",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 400000,
        "prompt_price": 1.75,
        "completion_price": 14.0,
        "description": "GPT-5.2 is the latest frontier-grade model in the GPT-5 series, offering stronger agentic and long context perfomance compared to GPT-5.1. It uses adaptive reasoning to allocate computation dynamically, responding quickly..."
    },
    {
        "id": "openai/gpt-5.2-chat",
        "name": "OpenAI: GPT-5.2 Chat",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 128000,
        "prompt_price": 1.75,
        "completion_price": 14.0,
        "description": "GPT-5.2 Chat (AKA Instant) is the fast, lightweight member of the 5.2 family, optimized for low-latency chat while retaining strong general intelligence. It uses adaptive reasoning to selectively \u201cthink\u201d on..."
    },
    {
        "id": "openai/gpt-5.2-pro",
        "name": "OpenAI: GPT-5.2 Pro",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 400000,
        "prompt_price": 21.0,
        "completion_price": 168.0,
        "description": "GPT-5.2 Pro is OpenAI\u2019s most advanced model, offering major improvements in agentic coding and long context performance over GPT-5 Pro. It is optimized for complex tasks that require step-by-step reasoning,..."
    },
    {
        "id": "openai/gpt-5.2-codex",
        "name": "OpenAI: GPT-5.2-Codex",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 400000,
        "prompt_price": 1.75,
        "completion_price": 14.0,
        "description": "GPT-5.2-Codex is an upgraded version of GPT-5.1-Codex optimized for software engineering and coding workflows. It is designed for both interactive development sessions and long, independent execution of complex engineering tasks...."
    },
    {
        "id": "openai/gpt-5.3-chat",
        "name": "OpenAI: GPT-5.3 Chat",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 128000,
        "prompt_price": 1.75,
        "completion_price": 14.0,
        "description": "GPT-5.3 Chat is an update to ChatGPT's most-used model that makes everyday conversations smoother, more useful, and more directly helpful. It delivers more accurate answers with better contextualization and significantly..."
    },
    {
        "id": "openai/gpt-5.3-codex",
        "name": "OpenAI: GPT-5.3-Codex",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 400000,
        "prompt_price": 1.75,
        "completion_price": 14.0,
        "description": "GPT-5.3-Codex is OpenAI\u2019s most advanced agentic coding model, combining the frontier software engineering performance of GPT-5.2-Codex with the broader reasoning and professional knowledge capabilities of GPT-5.2. It achieves state-of-the-art results..."
    },
    {
        "id": "openai/gpt-5.4",
        "name": "OpenAI: GPT-5.4",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 1050000,
        "prompt_price": 2.5,
        "completion_price": 15.0,
        "description": "GPT-5.4 is OpenAI\u2019s latest frontier model, unifying the Codex and GPT lines into a single system. It features a 1M+ token context window (922K input, 128K output) with support for..."
    },
    {
        "id": "openai/gpt-5.4-mini",
        "name": "OpenAI: GPT-5.4 Mini",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 400000,
        "prompt_price": 0.75,
        "completion_price": 4.5,
        "description": "GPT-5.4 mini brings the core capabilities of GPT-5.4 to a faster, more efficient model optimized for high-throughput workloads. It supports text and image inputs with strong performance across reasoning, coding,..."
    },
    {
        "id": "openai/gpt-5.4-nano",
        "name": "OpenAI: GPT-5.4 Nano",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 400000,
        "prompt_price": 0.19999999999999998,
        "completion_price": 1.25,
        "description": "GPT-5.4 nano is the most lightweight and cost-efficient variant of the GPT-5.4 family, optimized for speed-critical and high-volume tasks. It supports text and image inputs and is designed for low-latency..."
    },
    {
        "id": "openai/gpt-5.4-pro",
        "name": "OpenAI: GPT-5.4 Pro",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 1050000,
        "prompt_price": 30.0,
        "completion_price": 180.0,
        "description": "GPT-5.4 Pro is OpenAI's most advanced model, building on GPT-5.4's unified architecture with enhanced reasoning capabilities for complex, high-stakes tasks. It features a 1M+ token context window (922K input, 128K..."
    },
    {
        "id": "openai/gpt-oss-120b",
        "name": "OpenAI: gpt-oss-120b",
        "provider": "openai",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.039,
        "completion_price": 0.19,
        "description": "gpt-oss-120b is an open-weight, 117B-parameter Mixture-of-Experts (MoE) language model from OpenAI designed for high-reasoning, agentic, and general-purpose production use cases. It activates 5.1B parameters per forward pass and is optimized..."
    },
    {
        "id": "openai/gpt-oss-120b:free",
        "name": "OpenAI: gpt-oss-120b (free)",
        "provider": "openai",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "gpt-oss-120b is an open-weight, 117B-parameter Mixture-of-Experts (MoE) language model from OpenAI designed for high-reasoning, agentic, and general-purpose production use cases. It activates 5.1B parameters per forward pass and is optimized..."
    },
    {
        "id": "openai/gpt-oss-20b",
        "name": "OpenAI: gpt-oss-20b",
        "provider": "openai",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.03,
        "completion_price": 0.14,
        "description": "gpt-oss-20b is an open-weight 21B parameter model released by OpenAI under the Apache 2.0 license. It uses a Mixture-of-Experts (MoE) architecture with 3.6B active parameters per forward pass, optimized for..."
    },
    {
        "id": "openai/gpt-oss-20b:free",
        "name": "OpenAI: gpt-oss-20b (free)",
        "provider": "openai",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "gpt-oss-20b is an open-weight 21B parameter model released by OpenAI under the Apache 2.0 license. It uses a Mixture-of-Experts (MoE) architecture with 3.6B active parameters per forward pass, optimized for..."
    },
    {
        "id": "openai/gpt-oss-safeguard-20b",
        "name": "OpenAI: gpt-oss-safeguard-20b",
        "provider": "openai",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.075,
        "completion_price": 0.3,
        "description": "gpt-oss-safeguard-20b is a safety reasoning model from OpenAI built upon gpt-oss-20b. This open-weight, 21B-parameter Mixture-of-Experts (MoE) model offers lower latency for safety tasks like content classification, LLM filtering, and trust..."
    },
    {
        "id": "openai/o1",
        "name": "OpenAI: o1",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 200000,
        "prompt_price": 15.0,
        "completion_price": 60.0,
        "description": "The latest and strongest model family from OpenAI, o1 is designed to spend more time thinking before responding. The o1 model series is trained with large-scale reinforcement learning to reason..."
    },
    {
        "id": "openai/o1-pro",
        "name": "OpenAI: o1-pro",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 200000,
        "prompt_price": 150.0,
        "completion_price": 600.0,
        "description": "The o1 series of models are trained with reinforcement learning to think before they answer and perform complex reasoning. The o1-pro model uses more compute to think harder and provide..."
    },
    {
        "id": "openai/o3",
        "name": "OpenAI: o3",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 200000,
        "prompt_price": 2.0,
        "completion_price": 8.0,
        "description": "o3 is a well-rounded and powerful model across domains. It sets a new standard for math, science, coding, and visual reasoning tasks. It also excels at technical writing and instruction-following...."
    },
    {
        "id": "openai/o3-deep-research",
        "name": "OpenAI: o3 Deep Research",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 200000,
        "prompt_price": 10.0,
        "completion_price": 40.0,
        "description": "o3-deep-research is OpenAI's advanced model for deep research, designed to tackle complex, multi-step research tasks.\n\nNote: This model always uses the 'web_search' tool which adds additional cost."
    },
    {
        "id": "openai/o3-mini",
        "name": "OpenAI: o3 Mini",
        "provider": "openai",
        "architecture": [
            "text"
        ],
        "context": 200000,
        "prompt_price": 1.1,
        "completion_price": 4.4,
        "description": "OpenAI o3-mini is a cost-efficient language model optimized for STEM reasoning tasks, particularly excelling in science, mathematics, and coding. This model supports the `reasoning_effort` parameter, which can be set to..."
    },
    {
        "id": "openai/o3-mini-high",
        "name": "OpenAI: o3 Mini High",
        "provider": "openai",
        "architecture": [
            "text"
        ],
        "context": 200000,
        "prompt_price": 1.1,
        "completion_price": 4.4,
        "description": "OpenAI o3-mini-high is the same model as [o3-mini](/openai/o3-mini) with reasoning_effort set to high. o3-mini is a cost-efficient language model optimized for STEM reasoning tasks, particularly excelling in science, mathematics, and..."
    },
    {
        "id": "openai/o3-pro",
        "name": "OpenAI: o3 Pro",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 200000,
        "prompt_price": 20.0,
        "completion_price": 80.0,
        "description": "The o-series of models are trained with reinforcement learning to think before they answer and perform complex reasoning. The o3-pro model uses more compute to think harder and provide consistently..."
    },
    {
        "id": "openai/o4-mini",
        "name": "OpenAI: o4 Mini",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 200000,
        "prompt_price": 1.1,
        "completion_price": 4.4,
        "description": "OpenAI o4-mini is a compact reasoning model in the o-series, optimized for fast, cost-efficient performance while retaining strong multimodal and agentic capabilities. It supports tool use and demonstrates competitive reasoning..."
    },
    {
        "id": "openai/o4-mini-deep-research",
        "name": "OpenAI: o4 Mini Deep Research",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 200000,
        "prompt_price": 2.0,
        "completion_price": 8.0,
        "description": "o4-mini-deep-research is OpenAI's faster, more affordable deep research model\u2014ideal for tackling complex, multi-step research tasks.\n\nNote: This model always uses the 'web_search' tool which adds additional cost."
    },
    {
        "id": "openai/o4-mini-high",
        "name": "OpenAI: o4 Mini High",
        "provider": "openai",
        "architecture": [
            "image"
        ],
        "context": 200000,
        "prompt_price": 1.1,
        "completion_price": 4.4,
        "description": "OpenAI o4-mini-high is the same model as [o4-mini](/openai/o4-mini) with reasoning_effort set to high. OpenAI o4-mini is a compact reasoning model in the o-series, optimized for fast, cost-efficient performance while retaining..."
    },
    {
        "id": "openrouter/auto",
        "name": "Auto Router",
        "provider": "openrouter",
        "architecture": [
            "image",
            "audio",
            "video"
        ],
        "context": 2000000,
        "prompt_price": -1000000.0,
        "completion_price": -1000000.0,
        "description": "Your prompt will be processed by a meta-model and routed to one of dozens of models (see below), optimizing for the best possible output. To see which model was used,..."
    },
    {
        "id": "openrouter/bodybuilder",
        "name": "Body Builder (beta)",
        "provider": "openrouter",
        "architecture": [
            "text"
        ],
        "context": 128000,
        "prompt_price": -1000000.0,
        "completion_price": -1000000.0,
        "description": "Transform your natural language requests into structured OpenRouter API request objects. Describe what you want to accomplish with AI models, and Body Builder will construct the appropriate API calls. Example:..."
    },
    {
        "id": "openrouter/elephant-alpha",
        "name": "Elephant",
        "provider": "openrouter",
        "architecture": [
            "text"
        ],
        "context": 262144,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "Elephant Alpha is a 100B-parameter text model focused on intelligence efficiency, delivering strong performance while minimizing token usage. It supports a 256K context window with up to 32K output tokens,..."
    },
    {
        "id": "openrouter/free",
        "name": "Free Models Router",
        "provider": "openrouter",
        "architecture": [
            "image"
        ],
        "context": 200000,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "The simplest way to get free inference. openrouter/free is a router that selects free models at random from the models available on OpenRouter. The router smartly filters for models that..."
    },
    {
        "id": "perplexity/sonar",
        "name": "Perplexity: Sonar",
        "provider": "perplexity",
        "architecture": [
            "image"
        ],
        "context": 127072,
        "prompt_price": 1.0,
        "completion_price": 1.0,
        "description": "Sonar is lightweight, affordable, fast, and simple to use \u2014 now featuring citations and the ability to customize sources. It is designed for companies seeking to integrate lightweight question-and-answer features..."
    },
    {
        "id": "perplexity/sonar-deep-research",
        "name": "Perplexity: Sonar Deep Research",
        "provider": "perplexity",
        "architecture": [
            "text"
        ],
        "context": 128000,
        "prompt_price": 2.0,
        "completion_price": 8.0,
        "description": "Sonar Deep Research is a research-focused model designed for multi-step retrieval, synthesis, and reasoning across complex topics. It autonomously searches, reads, and evaluates sources, refining its approach as it gathers..."
    },
    {
        "id": "perplexity/sonar-pro",
        "name": "Perplexity: Sonar Pro",
        "provider": "perplexity",
        "architecture": [
            "image"
        ],
        "context": 200000,
        "prompt_price": 3.0,
        "completion_price": 15.0,
        "description": "Note: Sonar Pro pricing includes Perplexity search pricing. See [details here](https://docs.perplexity.ai/guides/pricing#detailed-pricing-breakdown-for-sonar-reasoning-pro-and-sonar-pro) For enterprises seeking more advanced capabilities, the Sonar Pro API can handle in-depth, multi-step queries with added extensibility, like..."
    },
    {
        "id": "perplexity/sonar-pro-search",
        "name": "Perplexity: Sonar Pro Search",
        "provider": "perplexity",
        "architecture": [
            "image"
        ],
        "context": 200000,
        "prompt_price": 3.0,
        "completion_price": 15.0,
        "description": "Exclusively available on the OpenRouter API, Sonar Pro's new Pro Search mode is Perplexity's most advanced agentic search system. It is designed for deeper reasoning and analysis. Pricing is based..."
    },
    {
        "id": "perplexity/sonar-reasoning-pro",
        "name": "Perplexity: Sonar Reasoning Pro",
        "provider": "perplexity",
        "architecture": [
            "image"
        ],
        "context": 128000,
        "prompt_price": 2.0,
        "completion_price": 8.0,
        "description": "Note: Sonar Pro pricing includes Perplexity search pricing. See [details here](https://docs.perplexity.ai/guides/pricing#detailed-pricing-breakdown-for-sonar-reasoning-pro-and-sonar-pro) Sonar Reasoning Pro is a premier reasoning model powered by DeepSeek R1 with Chain of Thought (CoT). Designed for..."
    },
    {
        "id": "prime-intellect/intellect-3",
        "name": "Prime Intellect: INTELLECT-3",
        "provider": "prime-intellect",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.19999999999999998,
        "completion_price": 1.1,
        "description": "INTELLECT-3 is a 106B-parameter Mixture-of-Experts model (12B active) post-trained from GLM-4.5-Air-Base using supervised fine-tuning (SFT) followed by large-scale reinforcement learning (RL). It offers state-of-the-art performance for its size across math,..."
    },
    {
        "id": "qwen/qwen-2.5-72b-instruct",
        "name": "Qwen2.5 72B Instruct",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 32768,
        "prompt_price": 0.12,
        "completion_price": 0.39,
        "description": "Qwen2.5 72B is the latest series of Qwen large language models. Qwen2.5 brings the following improvements upon Qwen2: - Significantly more knowledge and has greatly improved capabilities in coding and..."
    },
    {
        "id": "qwen/qwen-2.5-coder-32b-instruct",
        "name": "Qwen2.5 Coder 32B Instruct",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 32768,
        "prompt_price": 0.66,
        "completion_price": 1.0,
        "description": "Qwen2.5-Coder is the latest series of Code-Specific Qwen large language models (formerly known as CodeQwen). Qwen2.5-Coder brings the following improvements upon CodeQwen1.5: - Significantly improvements in **code generation**, **code reasoning**..."
    },
    {
        "id": "qwen/qwen-plus-2025-07-28",
        "name": "Qwen: Qwen Plus 0728",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 1000000,
        "prompt_price": 0.26,
        "completion_price": 0.78,
        "description": "Qwen Plus 0728, based on the Qwen3 foundation model, is a 1 million context hybrid reasoning model with a balanced performance, speed, and cost combination."
    },
    {
        "id": "qwen/qwen-plus-2025-07-28:thinking",
        "name": "Qwen: Qwen Plus 0728 (thinking)",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 1000000,
        "prompt_price": 0.26,
        "completion_price": 0.78,
        "description": "Qwen Plus 0728, based on the Qwen3 foundation model, is a 1 million context hybrid reasoning model with a balanced performance, speed, and cost combination."
    },
    {
        "id": "qwen/qwen-vl-max",
        "name": "Qwen: Qwen VL Max",
        "provider": "qwen",
        "architecture": [
            "image"
        ],
        "context": 131072,
        "prompt_price": 0.52,
        "completion_price": 2.08,
        "description": "Qwen VL Max is a visual understanding model with 7500 tokens context length. It excels in delivering optimal performance for a broader spectrum of complex tasks.\n"
    },
    {
        "id": "qwen/qwen-vl-plus",
        "name": "Qwen: Qwen VL Plus",
        "provider": "qwen",
        "architecture": [
            "image"
        ],
        "context": 131072,
        "prompt_price": 0.1365,
        "completion_price": 0.40950000000000003,
        "description": "Qwen's Enhanced Large Visual Language Model. Significantly upgraded for detailed recognition capabilities and text recognition abilities, supporting ultra-high pixel resolutions up to millions of pixels and extreme aspect ratios for..."
    },
    {
        "id": "qwen/qwen-max",
        "name": "Qwen: Qwen-Max ",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 32768,
        "prompt_price": 1.04,
        "completion_price": 4.16,
        "description": "Qwen-Max, based on Qwen2.5, provides the best inference performance among [Qwen models](/qwen), especially for complex multi-step tasks. It's a large-scale MoE model that has been pretrained on over 20 trillion..."
    },
    {
        "id": "qwen/qwen-plus",
        "name": "Qwen: Qwen-Plus",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 1000000,
        "prompt_price": 0.26,
        "completion_price": 0.78,
        "description": "Qwen-Plus, based on the Qwen2.5 foundation model, is a 131K context model with a balanced performance, speed, and cost combination."
    },
    {
        "id": "qwen/qwen-turbo",
        "name": "Qwen: Qwen-Turbo",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.0325,
        "completion_price": 0.13,
        "description": "Qwen-Turbo, based on Qwen2.5, is a 1M context model that provides fast speed and low cost, suitable for simple tasks."
    },
    {
        "id": "qwen/qwen-2.5-7b-instruct",
        "name": "Qwen: Qwen2.5 7B Instruct",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 32768,
        "prompt_price": 0.04,
        "completion_price": 0.09999999999999999,
        "description": "Qwen2.5 7B is the latest series of Qwen large language models. Qwen2.5 brings the following improvements upon Qwen2: - Significantly more knowledge and has greatly improved capabilities in coding and..."
    },
    {
        "id": "qwen/qwen2.5-vl-32b-instruct",
        "name": "Qwen: Qwen2.5 VL 32B Instruct",
        "provider": "qwen",
        "architecture": [
            "image"
        ],
        "context": 128000,
        "prompt_price": 0.19999999999999998,
        "completion_price": 0.6,
        "description": "Qwen2.5-VL-32B is a multimodal vision-language model fine-tuned through reinforcement learning for enhanced mathematical reasoning, structured outputs, and visual problem-solving capabilities. It excels at visual analysis tasks, including object recognition, textual..."
    },
    {
        "id": "qwen/qwen2.5-vl-72b-instruct",
        "name": "Qwen: Qwen2.5 VL 72B Instruct",
        "provider": "qwen",
        "architecture": [
            "image"
        ],
        "context": 32768,
        "prompt_price": 0.7999999999999999,
        "completion_price": 0.7999999999999999,
        "description": "Qwen2.5-VL is proficient in recognizing common objects such as flowers, birds, fish, and insects. It is also highly capable of analyzing texts, charts, icons, graphics, and layouts within images."
    },
    {
        "id": "qwen/qwen3-14b",
        "name": "Qwen: Qwen3 14B",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 40960,
        "prompt_price": 0.06,
        "completion_price": 0.24,
        "description": "Qwen3-14B is a dense 14.8B parameter causal language model from the Qwen3 series, designed for both complex reasoning and efficient dialogue. It supports seamless switching between a \"thinking\" mode for..."
    },
    {
        "id": "qwen/qwen3-235b-a22b",
        "name": "Qwen: Qwen3 235B A22B",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.45499999999999996,
        "completion_price": 1.8199999999999998,
        "description": "Qwen3-235B-A22B is a 235B parameter mixture-of-experts (MoE) model developed by Qwen, activating 22B parameters per forward pass. It supports seamless switching between a \"thinking\" mode for complex reasoning, math, and..."
    },
    {
        "id": "qwen/qwen3-235b-a22b-2507",
        "name": "Qwen: Qwen3 235B A22B Instruct 2507",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 262144,
        "prompt_price": 0.071,
        "completion_price": 0.09999999999999999,
        "description": "Qwen3-235B-A22B-Instruct-2507 is a multilingual, instruction-tuned mixture-of-experts language model based on the Qwen3-235B architecture, with 22B active parameters per forward pass. It is optimized for general-purpose text generation, including instruction following,..."
    },
    {
        "id": "qwen/qwen3-235b-a22b-thinking-2507",
        "name": "Qwen: Qwen3 235B A22B Thinking 2507",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.14950000000000002,
        "completion_price": 1.495,
        "description": "Qwen3-235B-A22B-Thinking-2507 is a high-performance, open-weight Mixture-of-Experts (MoE) language model optimized for complex reasoning tasks. It activates 22B of its 235B parameters per forward pass and natively supports up to 262,144..."
    },
    {
        "id": "qwen/qwen3-30b-a3b",
        "name": "Qwen: Qwen3 30B A3B",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 40960,
        "prompt_price": 0.08,
        "completion_price": 0.28,
        "description": "Qwen3, the latest generation in the Qwen large language model series, features both dense and mixture-of-experts (MoE) architectures to excel in reasoning, multilingual support, and advanced agent tasks. Its unique..."
    },
    {
        "id": "qwen/qwen3-30b-a3b-instruct-2507",
        "name": "Qwen: Qwen3 30B A3B Instruct 2507",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 262144,
        "prompt_price": 0.09,
        "completion_price": 0.3,
        "description": "Qwen3-30B-A3B-Instruct-2507 is a 30.5B-parameter mixture-of-experts language model from Qwen, with 3.3B active parameters per inference. It operates in non-thinking mode and is designed for high-quality instruction following, multilingual understanding, and..."
    },
    {
        "id": "qwen/qwen3-30b-a3b-thinking-2507",
        "name": "Qwen: Qwen3 30B A3B Thinking 2507",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.08,
        "completion_price": 0.39999999999999997,
        "description": "Qwen3-30B-A3B-Thinking-2507 is a 30B parameter Mixture-of-Experts reasoning model optimized for complex tasks requiring extended multi-step thinking. The model is designed specifically for \u201cthinking mode,\u201d where internal reasoning traces are separated..."
    },
    {
        "id": "qwen/qwen3-32b",
        "name": "Qwen: Qwen3 32B",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 40960,
        "prompt_price": 0.08,
        "completion_price": 0.24,
        "description": "Qwen3-32B is a dense 32.8B parameter causal language model from the Qwen3 series, optimized for both complex reasoning and efficient dialogue. It supports seamless switching between a \"thinking\" mode for..."
    },
    {
        "id": "qwen/qwen3-8b",
        "name": "Qwen: Qwen3 8B",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 40960,
        "prompt_price": 0.049999999999999996,
        "completion_price": 0.39999999999999997,
        "description": "Qwen3-8B is a dense 8.2B parameter causal language model from the Qwen3 series, designed for both reasoning-heavy tasks and efficient dialogue. It supports seamless switching between \"thinking\" mode for math,..."
    },
    {
        "id": "qwen/qwen3-coder-30b-a3b-instruct",
        "name": "Qwen: Qwen3 Coder 30B A3B Instruct",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 160000,
        "prompt_price": 0.07,
        "completion_price": 0.27,
        "description": "Qwen3-Coder-30B-A3B-Instruct is a 30.5B parameter Mixture-of-Experts (MoE) model with 128 experts (8 active per forward pass), designed for advanced code generation, repository-scale understanding, and agentic tool use. Built on the..."
    },
    {
        "id": "qwen/qwen3-coder",
        "name": "Qwen: Qwen3 Coder 480B A35B",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 262144,
        "prompt_price": 0.22,
        "completion_price": 1.0,
        "description": "Qwen3-Coder-480B-A35B-Instruct is a Mixture-of-Experts (MoE) code generation model developed by the Qwen team. It is optimized for agentic coding tasks such as function calling, tool use, and long-context reasoning over..."
    },
    {
        "id": "qwen/qwen3-coder:free",
        "name": "Qwen: Qwen3 Coder 480B A35B (free)",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 262000,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "Qwen3-Coder-480B-A35B-Instruct is a Mixture-of-Experts (MoE) code generation model developed by the Qwen team. It is optimized for agentic coding tasks such as function calling, tool use, and long-context reasoning over..."
    },
    {
        "id": "qwen/qwen3-coder-flash",
        "name": "Qwen: Qwen3 Coder Flash",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 1000000,
        "prompt_price": 0.195,
        "completion_price": 0.975,
        "description": "Qwen3 Coder Flash is Alibaba's fast and cost efficient version of their proprietary Qwen3 Coder Plus. It is a powerful coding agent model specializing in autonomous programming via tool calling..."
    },
    {
        "id": "qwen/qwen3-coder-next",
        "name": "Qwen: Qwen3 Coder Next",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 262144,
        "prompt_price": 0.15,
        "completion_price": 0.7999999999999999,
        "description": "Qwen3-Coder-Next is an open-weight causal language model optimized for coding agents and local development workflows. It uses a sparse MoE design with 80B total parameters and only 3B activated per..."
    },
    {
        "id": "qwen/qwen3-coder-plus",
        "name": "Qwen: Qwen3 Coder Plus",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 1000000,
        "prompt_price": 0.65,
        "completion_price": 3.25,
        "description": "Qwen3 Coder Plus is Alibaba's proprietary version of the Open Source Qwen3 Coder 480B A35B. It is a powerful coding agent model specializing in autonomous programming via tool calling and..."
    },
    {
        "id": "qwen/qwen3-max",
        "name": "Qwen: Qwen3 Max",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 262144,
        "prompt_price": 0.78,
        "completion_price": 3.9,
        "description": "Qwen3-Max is an updated release built on the Qwen3 series, offering major improvements in reasoning, instruction following, multilingual support, and long-tail knowledge coverage compared to the January 2025 version. It..."
    },
    {
        "id": "qwen/qwen3-max-thinking",
        "name": "Qwen: Qwen3 Max Thinking",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 262144,
        "prompt_price": 0.78,
        "completion_price": 3.9,
        "description": "Qwen3-Max-Thinking is the flagship reasoning model in the Qwen3 series, designed for high-stakes cognitive tasks that require deep, multi-step reasoning. By significantly scaling model capacity and reinforcement learning compute, it..."
    },
    {
        "id": "qwen/qwen3-next-80b-a3b-instruct",
        "name": "Qwen: Qwen3 Next 80B A3B Instruct",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 262144,
        "prompt_price": 0.09,
        "completion_price": 1.1,
        "description": "Qwen3-Next-80B-A3B-Instruct is an instruction-tuned chat model in the Qwen3-Next series optimized for fast, stable responses without \u201cthinking\u201d traces. It targets complex tasks across reasoning, code generation, knowledge QA, and multilingual..."
    },
    {
        "id": "qwen/qwen3-next-80b-a3b-instruct:free",
        "name": "Qwen: Qwen3 Next 80B A3B Instruct (free)",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 262144,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "Qwen3-Next-80B-A3B-Instruct is an instruction-tuned chat model in the Qwen3-Next series optimized for fast, stable responses without \u201cthinking\u201d traces. It targets complex tasks across reasoning, code generation, knowledge QA, and multilingual..."
    },
    {
        "id": "qwen/qwen3-next-80b-a3b-thinking",
        "name": "Qwen: Qwen3 Next 80B A3B Thinking",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.0975,
        "completion_price": 0.78,
        "description": "Qwen3-Next-80B-A3B-Thinking is a reasoning-first chat model in the Qwen3-Next line that outputs structured \u201cthinking\u201d traces by default. It\u2019s designed for hard multi-step problems; math proofs, code synthesis/debugging, logic, and agentic..."
    },
    {
        "id": "qwen/qwen3-vl-235b-a22b-instruct",
        "name": "Qwen: Qwen3 VL 235B A22B Instruct",
        "provider": "qwen",
        "architecture": [
            "image"
        ],
        "context": 262144,
        "prompt_price": 0.19999999999999998,
        "completion_price": 0.88,
        "description": "Qwen3-VL-235B-A22B Instruct is an open-weight multimodal model that unifies strong text generation with visual understanding across images and video. The Instruct model targets general vision-language use (VQA, document parsing, chart/table..."
    },
    {
        "id": "qwen/qwen3-vl-235b-a22b-thinking",
        "name": "Qwen: Qwen3 VL 235B A22B Thinking",
        "provider": "qwen",
        "architecture": [
            "image"
        ],
        "context": 131072,
        "prompt_price": 0.26,
        "completion_price": 2.6,
        "description": "Qwen3-VL-235B-A22B Thinking is a multimodal model that unifies strong text generation with visual understanding across images and video. The Thinking model is optimized for multimodal reasoning in STEM and math...."
    },
    {
        "id": "qwen/qwen3-vl-30b-a3b-instruct",
        "name": "Qwen: Qwen3 VL 30B A3B Instruct",
        "provider": "qwen",
        "architecture": [
            "image"
        ],
        "context": 131072,
        "prompt_price": 0.13,
        "completion_price": 0.52,
        "description": "Qwen3-VL-30B-A3B-Instruct is a multimodal model that unifies strong text generation with visual understanding for images and videos. Its Instruct variant optimizes instruction-following for general multimodal tasks. It excels in perception..."
    },
    {
        "id": "qwen/qwen3-vl-30b-a3b-thinking",
        "name": "Qwen: Qwen3 VL 30B A3B Thinking",
        "provider": "qwen",
        "architecture": [
            "image"
        ],
        "context": 131072,
        "prompt_price": 0.13,
        "completion_price": 1.56,
        "description": "Qwen3-VL-30B-A3B-Thinking is a multimodal model that unifies strong text generation with visual understanding for images and videos. Its Thinking variant enhances reasoning in STEM, math, and complex tasks. It excels..."
    },
    {
        "id": "qwen/qwen3-vl-32b-instruct",
        "name": "Qwen: Qwen3 VL 32B Instruct",
        "provider": "qwen",
        "architecture": [
            "image"
        ],
        "context": 131072,
        "prompt_price": 0.10400000000000001,
        "completion_price": 0.41600000000000004,
        "description": "Qwen3-VL-32B-Instruct is a large-scale multimodal vision-language model designed for high-precision understanding and reasoning across text, images, and video. With 32 billion parameters, it combines deep visual perception with advanced text..."
    },
    {
        "id": "qwen/qwen3-vl-8b-instruct",
        "name": "Qwen: Qwen3 VL 8B Instruct",
        "provider": "qwen",
        "architecture": [
            "image"
        ],
        "context": 131072,
        "prompt_price": 0.08,
        "completion_price": 0.5,
        "description": "Qwen3-VL-8B-Instruct is a multimodal vision-language model from the Qwen3-VL series, built for high-fidelity understanding and reasoning across text, images, and video. It features improved multimodal fusion with Interleaved-MRoPE for long-horizon..."
    },
    {
        "id": "qwen/qwen3-vl-8b-thinking",
        "name": "Qwen: Qwen3 VL 8B Thinking",
        "provider": "qwen",
        "architecture": [
            "image"
        ],
        "context": 131072,
        "prompt_price": 0.117,
        "completion_price": 1.365,
        "description": "Qwen3-VL-8B-Thinking is the reasoning-optimized variant of the Qwen3-VL-8B multimodal model, designed for advanced visual and textual reasoning across complex scenes, documents, and temporal sequences. It integrates enhanced multimodal alignment and..."
    },
    {
        "id": "qwen/qwen3.5-397b-a17b",
        "name": "Qwen: Qwen3.5 397B A17B",
        "provider": "qwen",
        "architecture": [
            "image",
            "video"
        ],
        "context": 262144,
        "prompt_price": 0.39,
        "completion_price": 2.34,
        "description": "The Qwen3.5 series 397B-A17B native vision-language model is built on a hybrid architecture that integrates a linear attention mechanism with a sparse mixture-of-experts model, achieving higher inference efficiency. It delivers..."
    },
    {
        "id": "qwen/qwen3.5-plus-02-15",
        "name": "Qwen: Qwen3.5 Plus 2026-02-15",
        "provider": "qwen",
        "architecture": [
            "image",
            "video"
        ],
        "context": 1000000,
        "prompt_price": 0.26,
        "completion_price": 1.56,
        "description": "The Qwen3.5 native vision-language series Plus models are built on a hybrid architecture that integrates linear attention mechanisms with sparse mixture-of-experts models, achieving higher inference efficiency. In a variety of..."
    },
    {
        "id": "qwen/qwen3.5-122b-a10b",
        "name": "Qwen: Qwen3.5-122B-A10B",
        "provider": "qwen",
        "architecture": [
            "image",
            "video"
        ],
        "context": 262144,
        "prompt_price": 0.26,
        "completion_price": 2.08,
        "description": "The Qwen3.5 122B-A10B native vision-language model is built on a hybrid architecture that integrates a linear attention mechanism with a sparse mixture-of-experts model, achieving higher inference efficiency. In terms of..."
    },
    {
        "id": "qwen/qwen3.5-27b",
        "name": "Qwen: Qwen3.5-27B",
        "provider": "qwen",
        "architecture": [
            "image",
            "video"
        ],
        "context": 262144,
        "prompt_price": 0.195,
        "completion_price": 1.56,
        "description": "The Qwen3.5 27B native vision-language Dense model incorporates a linear attention mechanism, delivering fast response times while balancing inference speed and performance. Its overall capabilities are comparable to those of..."
    },
    {
        "id": "qwen/qwen3.5-35b-a3b",
        "name": "Qwen: Qwen3.5-35B-A3B",
        "provider": "qwen",
        "architecture": [
            "image",
            "video"
        ],
        "context": 262144,
        "prompt_price": 0.1625,
        "completion_price": 1.3,
        "description": "The Qwen3.5 Series 35B-A3B is a native vision-language model designed with a hybrid architecture that integrates linear attention mechanisms and a sparse mixture-of-experts model, achieving higher inference efficiency. Its overall..."
    },
    {
        "id": "qwen/qwen3.5-9b",
        "name": "Qwen: Qwen3.5-9B",
        "provider": "qwen",
        "architecture": [
            "image",
            "video"
        ],
        "context": 256000,
        "prompt_price": 0.049999999999999996,
        "completion_price": 0.15,
        "description": "Qwen3.5-9B is a multimodal foundation model from the Qwen3.5 family, designed to deliver strong reasoning, coding, and visual understanding in an efficient 9B-parameter architecture. It uses a unified vision-language design..."
    },
    {
        "id": "qwen/qwen3.5-flash-02-23",
        "name": "Qwen: Qwen3.5-Flash",
        "provider": "qwen",
        "architecture": [
            "image",
            "video"
        ],
        "context": 1000000,
        "prompt_price": 0.065,
        "completion_price": 0.26,
        "description": "The Qwen3.5 native vision-language Flash models are built on a hybrid architecture that integrates a linear attention mechanism with a sparse mixture-of-experts model, achieving higher inference efficiency. Compared to the..."
    },
    {
        "id": "qwen/qwen3.6-plus",
        "name": "Qwen: Qwen3.6 Plus",
        "provider": "qwen",
        "architecture": [
            "image",
            "video"
        ],
        "context": 1000000,
        "prompt_price": 0.325,
        "completion_price": 1.95,
        "description": "Qwen 3.6 Plus builds on a hybrid architecture that combines efficient linear attention with sparse mixture-of-experts routing, enabling strong scalability and high-performance inference. Compared to the 3.5 series, it delivers..."
    },
    {
        "id": "qwen/qwq-32b",
        "name": "Qwen: QwQ 32B",
        "provider": "qwen",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.15,
        "completion_price": 0.58,
        "description": "QwQ is the reasoning model of the Qwen series. Compared with conventional instruction-tuned models, QwQ, which is capable of thinking and reasoning, can achieve significantly enhanced performance in downstream tasks,..."
    },
    {
        "id": "rekaai/reka-edge",
        "name": "Reka Edge",
        "provider": "rekaai",
        "architecture": [
            "image",
            "video"
        ],
        "context": 16384,
        "prompt_price": 0.09999999999999999,
        "completion_price": 0.09999999999999999,
        "description": "Reka Edge is an extremely efficient 7B multimodal vision-language model that accepts image/video+text inputs and generates text outputs. This model is optimized specifically to deliver industry-leading performance in image understanding,..."
    },
    {
        "id": "rekaai/reka-flash-3",
        "name": "Reka Flash 3",
        "provider": "rekaai",
        "architecture": [
            "text"
        ],
        "context": 65536,
        "prompt_price": 0.09999999999999999,
        "completion_price": 0.19999999999999998,
        "description": "Reka Flash 3 is a general-purpose, instruction-tuned large language model with 21 billion parameters, developed by Reka. It excels at general chat, coding tasks, instruction-following, and function calling. Featuring a..."
    },
    {
        "id": "relace/relace-apply-3",
        "name": "Relace: Relace Apply 3",
        "provider": "relace",
        "architecture": [
            "text"
        ],
        "context": 256000,
        "prompt_price": 0.85,
        "completion_price": 1.25,
        "description": "Relace Apply 3 is a specialized code-patching LLM that merges AI-suggested edits straight into your source files. It can apply updates from GPT-4o, Claude, and others into your files at..."
    },
    {
        "id": "relace/relace-search",
        "name": "Relace: Relace Search",
        "provider": "relace",
        "architecture": [
            "text"
        ],
        "context": 256000,
        "prompt_price": 1.0,
        "completion_price": 3.0,
        "description": "The relace-search model uses 4-12 `view_file` and `grep` tools in parallel to explore a codebase and return relevant files to the user request. In contrast to RAG, relace-search performs agentic..."
    },
    {
        "id": "sao10k/l3-lunaris-8b",
        "name": "Sao10K: Llama 3 8B Lunaris",
        "provider": "sao10k",
        "architecture": [
            "text"
        ],
        "context": 8192,
        "prompt_price": 0.04,
        "completion_price": 0.049999999999999996,
        "description": "Lunaris 8B is a versatile generalist and roleplaying model based on Llama 3. It's a strategic merge of multiple models, designed to balance creativity with improved logic and general knowledge...."
    },
    {
        "id": "sao10k/l3-euryale-70b",
        "name": "Sao10k: Llama 3 Euryale 70B v2.1",
        "provider": "sao10k",
        "architecture": [
            "text"
        ],
        "context": 8192,
        "prompt_price": 1.48,
        "completion_price": 1.48,
        "description": "Euryale 70B v2.1 is a model focused on creative roleplay from [Sao10k](https://ko-fi.com/sao10k). - Better prompt adherence. - Better anatomy / spatial awareness. - Adapts much better to unique and custom..."
    },
    {
        "id": "sao10k/l3.1-70b-hanami-x1",
        "name": "Sao10K: Llama 3.1 70B Hanami x1",
        "provider": "sao10k",
        "architecture": [
            "text"
        ],
        "context": 16000,
        "prompt_price": 3.0,
        "completion_price": 3.0,
        "description": "This is [Sao10K](/sao10k)'s experiment over [Euryale v2.2](/sao10k/l3.1-euryale-70b)."
    },
    {
        "id": "sao10k/l3.1-euryale-70b",
        "name": "Sao10K: Llama 3.1 Euryale 70B v2.2",
        "provider": "sao10k",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.85,
        "completion_price": 0.85,
        "description": "Euryale L3.1 70B v2.2 is a model focused on creative roleplay from [Sao10k](https://ko-fi.com/sao10k). It is the successor of [Euryale L3 70B v2.1](/models/sao10k/l3-euryale-70b)."
    },
    {
        "id": "sao10k/l3.3-euryale-70b",
        "name": "Sao10K: Llama 3.3 Euryale 70B",
        "provider": "sao10k",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.65,
        "completion_price": 0.75,
        "description": "Euryale L3.3 70B is a model focused on creative roleplay from [Sao10k](https://ko-fi.com/sao10k). It is the successor of [Euryale L3 70B v2.2](/models/sao10k/l3-euryale-70b)."
    },
    {
        "id": "stepfun/step-3.5-flash",
        "name": "StepFun: Step 3.5 Flash",
        "provider": "stepfun",
        "architecture": [
            "text"
        ],
        "context": 262144,
        "prompt_price": 0.09999999999999999,
        "completion_price": 0.3,
        "description": "Step 3.5 Flash is StepFun's most capable open-source foundation model. Built on a sparse Mixture of Experts (MoE) architecture, it selectively activates only 11B of its 196B parameters per token...."
    },
    {
        "id": "switchpoint/router",
        "name": "Switchpoint Router",
        "provider": "switchpoint",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.85,
        "completion_price": 3.4,
        "description": "Switchpoint AI's router instantly analyzes your request and directs it to the optimal AI from an ever-evolving library. As the world of LLMs advances, our router gets smarter, ensuring you..."
    },
    {
        "id": "tencent/hunyuan-a13b-instruct",
        "name": "Tencent: Hunyuan A13B Instruct",
        "provider": "tencent",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.14,
        "completion_price": 0.5700000000000001,
        "description": "Hunyuan-A13B is a 13B active parameter Mixture-of-Experts (MoE) language model developed by Tencent, with a total parameter count of 80B and support for reasoning via Chain-of-Thought. It offers competitive benchmark..."
    },
    {
        "id": "thedrummer/cydonia-24b-v4.1",
        "name": "TheDrummer: Cydonia 24B V4.1",
        "provider": "thedrummer",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.3,
        "completion_price": 0.5,
        "description": "Uncensored and creative writing model based on Mistral Small 3.2 24B with good recall, prompt adherence, and intelligence."
    },
    {
        "id": "thedrummer/rocinante-12b",
        "name": "TheDrummer: Rocinante 12B",
        "provider": "thedrummer",
        "architecture": [
            "text"
        ],
        "context": 32768,
        "prompt_price": 0.16999999999999998,
        "completion_price": 0.43,
        "description": "Rocinante 12B is designed for engaging storytelling and rich prose. Early testers have reported: - Expanded vocabulary with unique and expressive word choices - Enhanced creativity for vivid narratives -..."
    },
    {
        "id": "thedrummer/skyfall-36b-v2",
        "name": "TheDrummer: Skyfall 36B V2",
        "provider": "thedrummer",
        "architecture": [
            "text"
        ],
        "context": 32768,
        "prompt_price": 0.55,
        "completion_price": 0.7999999999999999,
        "description": "Skyfall 36B v2 is an enhanced iteration of Mistral Small 2501, specifically fine-tuned for improved creativity, nuanced writing, role-playing, and coherent storytelling."
    },
    {
        "id": "thedrummer/unslopnemo-12b",
        "name": "TheDrummer: UnslopNemo 12B",
        "provider": "thedrummer",
        "architecture": [
            "text"
        ],
        "context": 32768,
        "prompt_price": 0.39999999999999997,
        "completion_price": 0.39999999999999997,
        "description": "UnslopNemo v4.1 is the latest addition from the creator of Rocinante, designed for adventure writing and role-play scenarios."
    },
    {
        "id": "tngtech/deepseek-r1t2-chimera",
        "name": "TNG: DeepSeek R1T2 Chimera",
        "provider": "tngtech",
        "architecture": [
            "text"
        ],
        "context": 163840,
        "prompt_price": 0.3,
        "completion_price": 1.1,
        "description": "DeepSeek-TNG-R1T2-Chimera is the second-generation Chimera model from TNG Tech. It is a 671 B-parameter mixture-of-experts text-generation model assembled from DeepSeek-AI\u2019s R1-0528, R1, and V3-0324 checkpoints with an Assembly-of-Experts merge. The..."
    },
    {
        "id": "undi95/remm-slerp-l2-13b",
        "name": "ReMM SLERP 13B",
        "provider": "undi95",
        "architecture": [
            "text"
        ],
        "context": 6144,
        "prompt_price": 0.44999999999999996,
        "completion_price": 0.65,
        "description": "A recreation trial of the original MythoMax-L2-B13 but with updated models. #merge"
    },
    {
        "id": "upstage/solar-pro-3",
        "name": "Upstage: Solar Pro 3",
        "provider": "upstage",
        "architecture": [
            "text"
        ],
        "context": 128000,
        "prompt_price": 0.15,
        "completion_price": 0.6,
        "description": "Solar Pro 3 is Upstage's powerful Mixture-of-Experts (MoE) language model. With 102B total parameters and 12B active parameters per forward pass, it delivers exceptional performance while maintaining computational efficiency. Optimized..."
    },
    {
        "id": "writer/palmyra-x5",
        "name": "Writer: Palmyra X5",
        "provider": "writer",
        "architecture": [
            "text"
        ],
        "context": 1040000,
        "prompt_price": 0.6,
        "completion_price": 6.0,
        "description": "Palmyra X5 is Writer's most advanced model, purpose-built for building and scaling AI agents across the enterprise. It delivers industry-leading speed and efficiency on context windows up to 1 million..."
    },
    {
        "id": "x-ai/grok-3",
        "name": "xAI: Grok 3",
        "provider": "x-ai",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 3.0,
        "completion_price": 15.0,
        "description": "Grok 3 is the latest model from xAI. It's their flagship model that excels at enterprise use cases like data extraction, coding, and text summarization. Possesses deep domain knowledge in..."
    },
    {
        "id": "x-ai/grok-3-beta",
        "name": "xAI: Grok 3 Beta",
        "provider": "x-ai",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 3.0,
        "completion_price": 15.0,
        "description": "Grok 3 is the latest model from xAI. It's their flagship model that excels at enterprise use cases like data extraction, coding, and text summarization. Possesses deep domain knowledge in..."
    },
    {
        "id": "x-ai/grok-3-mini",
        "name": "xAI: Grok 3 Mini",
        "provider": "x-ai",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.3,
        "completion_price": 0.5,
        "description": "A lightweight model that thinks before responding. Fast, smart, and great for logic-based tasks that do not require deep domain knowledge. The raw thinking traces are accessible."
    },
    {
        "id": "x-ai/grok-3-mini-beta",
        "name": "xAI: Grok 3 Mini Beta",
        "provider": "x-ai",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.3,
        "completion_price": 0.5,
        "description": "Grok 3 Mini is a lightweight, smaller thinking model. Unlike traditional models that generate answers immediately, Grok 3 Mini thinks before responding. It\u2019s ideal for reasoning-heavy tasks that don\u2019t demand..."
    },
    {
        "id": "x-ai/grok-4",
        "name": "xAI: Grok 4",
        "provider": "x-ai",
        "architecture": [
            "image"
        ],
        "context": 256000,
        "prompt_price": 3.0,
        "completion_price": 15.0,
        "description": "Grok 4 is xAI's latest reasoning model with a 256k context window. It supports parallel tool calling, structured outputs, and both image and text inputs. Note that reasoning is not..."
    },
    {
        "id": "x-ai/grok-4-fast",
        "name": "xAI: Grok 4 Fast",
        "provider": "x-ai",
        "architecture": [
            "image"
        ],
        "context": 2000000,
        "prompt_price": 0.19999999999999998,
        "completion_price": 0.5,
        "description": "Grok 4 Fast is xAI's latest multimodal model with SOTA cost-efficiency and a 2M token context window. It comes in two flavors: non-reasoning and reasoning. Read more about the model..."
    },
    {
        "id": "x-ai/grok-4.1-fast",
        "name": "xAI: Grok 4.1 Fast",
        "provider": "x-ai",
        "architecture": [
            "image"
        ],
        "context": 2000000,
        "prompt_price": 0.19999999999999998,
        "completion_price": 0.5,
        "description": "Grok 4.1 Fast is xAI's best agentic tool calling model that shines in real-world use cases like customer support and deep research. 2M context window. Reasoning can be enabled/disabled using..."
    },
    {
        "id": "x-ai/grok-4.20",
        "name": "xAI: Grok 4.20",
        "provider": "x-ai",
        "architecture": [
            "image"
        ],
        "context": 2000000,
        "prompt_price": 2.0,
        "completion_price": 6.0,
        "description": "Grok 4.20 is xAI's newest flagship model with industry-leading speed and agentic tool calling capabilities. It combines the lowest hallucination rate on the market with strict prompt adherance, delivering consistently..."
    },
    {
        "id": "x-ai/grok-4.20-multi-agent",
        "name": "xAI: Grok 4.20 Multi-Agent",
        "provider": "x-ai",
        "architecture": [
            "image"
        ],
        "context": 2000000,
        "prompt_price": 2.0,
        "completion_price": 6.0,
        "description": "Grok 4.20 Multi-Agent is a variant of xAI\u2019s Grok 4.20 designed for collaborative, agent-based workflows. Multiple agents operate in parallel to conduct deep research, coordinate tool use, and synthesize information..."
    },
    {
        "id": "x-ai/grok-code-fast-1",
        "name": "xAI: Grok Code Fast 1",
        "provider": "x-ai",
        "architecture": [
            "text"
        ],
        "context": 256000,
        "prompt_price": 0.19999999999999998,
        "completion_price": 1.5,
        "description": "Grok Code Fast 1 is a speedy and economical reasoning model that excels at agentic coding. With reasoning traces visible in the response, developers can steer Grok Code for high-quality..."
    },
    {
        "id": "xiaomi/mimo-v2-flash",
        "name": "Xiaomi: MiMo-V2-Flash",
        "provider": "xiaomi",
        "architecture": [
            "text"
        ],
        "context": 262144,
        "prompt_price": 0.09,
        "completion_price": 0.29,
        "description": "MiMo-V2-Flash is an open-source foundation language model developed by Xiaomi. It is a Mixture-of-Experts model with 309B total parameters and 15B active parameters, adopting hybrid attention architecture. MiMo-V2-Flash supports a..."
    },
    {
        "id": "xiaomi/mimo-v2-omni",
        "name": "Xiaomi: MiMo-V2-Omni",
        "provider": "xiaomi",
        "architecture": [
            "image",
            "audio",
            "video"
        ],
        "context": 262144,
        "prompt_price": 0.39999999999999997,
        "completion_price": 2.0,
        "description": "MiMo-V2-Omni is a frontier omni-modal model that natively processes image, video, and audio inputs within a unified architecture. It combines strong multimodal perception with agentic capability - visual grounding, multi-step..."
    },
    {
        "id": "xiaomi/mimo-v2-pro",
        "name": "Xiaomi: MiMo-V2-Pro",
        "provider": "xiaomi",
        "architecture": [
            "text"
        ],
        "context": 1048576,
        "prompt_price": 1.0,
        "completion_price": 3.0,
        "description": "MiMo-V2-Pro is Xiaomi's flagship foundation model, featuring over 1T total parameters and a 1M context length, deeply optimized for agentic scenarios. It is highly adaptable to general agent frameworks like..."
    },
    {
        "id": "z-ai/glm-4-32b",
        "name": "Z.ai: GLM 4 32B ",
        "provider": "z-ai",
        "architecture": [
            "text"
        ],
        "context": 128000,
        "prompt_price": 0.09999999999999999,
        "completion_price": 0.09999999999999999,
        "description": "GLM 4 32B is a cost-effective foundation language model. It can efficiently perform complex tasks and has significantly enhanced capabilities in tool use, online search, and code-related intelligent tasks. It..."
    },
    {
        "id": "z-ai/glm-4.5",
        "name": "Z.ai: GLM 4.5",
        "provider": "z-ai",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.6,
        "completion_price": 2.2,
        "description": "GLM-4.5 is our latest flagship foundation model, purpose-built for agent-based applications. It leverages a Mixture-of-Experts (MoE) architecture and supports a context length of up to 128k tokens. GLM-4.5 delivers significantly..."
    },
    {
        "id": "z-ai/glm-4.5-air",
        "name": "Z.ai: GLM 4.5 Air",
        "provider": "z-ai",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.13,
        "completion_price": 0.85,
        "description": "GLM-4.5-Air is the lightweight variant of our latest flagship model family, also purpose-built for agent-centric applications. Like GLM-4.5, it adopts the Mixture-of-Experts (MoE) architecture but with a more compact parameter..."
    },
    {
        "id": "z-ai/glm-4.5-air:free",
        "name": "Z.ai: GLM 4.5 Air (free)",
        "provider": "z-ai",
        "architecture": [
            "text"
        ],
        "context": 131072,
        "prompt_price": 0.0,
        "completion_price": 0.0,
        "description": "GLM-4.5-Air is the lightweight variant of our latest flagship model family, also purpose-built for agent-centric applications. Like GLM-4.5, it adopts the Mixture-of-Experts (MoE) architecture but with a more compact parameter..."
    },
    {
        "id": "z-ai/glm-4.5v",
        "name": "Z.ai: GLM 4.5V",
        "provider": "z-ai",
        "architecture": [
            "image"
        ],
        "context": 65536,
        "prompt_price": 0.6,
        "completion_price": 1.7999999999999998,
        "description": "GLM-4.5V is a vision-language foundation model for multimodal agent applications. Built on a Mixture-of-Experts (MoE) architecture with 106B parameters and 12B activated parameters, it achieves state-of-the-art results in video understanding,..."
    },
    {
        "id": "z-ai/glm-4.6",
        "name": "Z.ai: GLM 4.6",
        "provider": "z-ai",
        "architecture": [
            "text"
        ],
        "context": 204800,
        "prompt_price": 0.39,
        "completion_price": 1.9,
        "description": "Compared with GLM-4.5, this generation brings several key improvements: Longer context window: The context window has been expanded from 128K to 200K tokens, enabling the model to handle more complex..."
    },
    {
        "id": "z-ai/glm-4.6v",
        "name": "Z.ai: GLM 4.6V",
        "provider": "z-ai",
        "architecture": [
            "image",
            "video"
        ],
        "context": 131072,
        "prompt_price": 0.3,
        "completion_price": 0.8999999999999999,
        "description": "GLM-4.6V is a large multimodal model designed for high-fidelity visual understanding and long-context reasoning across images, documents, and mixed media. It supports up to 128K tokens, processes complex page layouts..."
    },
    {
        "id": "z-ai/glm-4.7",
        "name": "Z.ai: GLM 4.7",
        "provider": "z-ai",
        "architecture": [
            "text"
        ],
        "context": 202752,
        "prompt_price": 0.39,
        "completion_price": 1.75,
        "description": "GLM-4.7 is Z.ai\u2019s latest flagship model, featuring upgrades in two key areas: enhanced programming capabilities and more stable multi-step reasoning/execution. It demonstrates significant improvements in executing complex agent tasks while..."
    },
    {
        "id": "z-ai/glm-4.7-flash",
        "name": "Z.ai: GLM 4.7 Flash",
        "provider": "z-ai",
        "architecture": [
            "text"
        ],
        "context": 202752,
        "prompt_price": 0.06,
        "completion_price": 0.39999999999999997,
        "description": "As a 30B-class SOTA model, GLM-4.7-Flash offers a new option that balances performance and efficiency. It is further optimized for agentic coding use cases, strengthening coding capabilities, long-horizon task planning,..."
    },
    {
        "id": "z-ai/glm-5",
        "name": "Z.ai: GLM 5",
        "provider": "z-ai",
        "architecture": [
            "text"
        ],
        "context": 80000,
        "prompt_price": 0.72,
        "completion_price": 2.3,
        "description": "GLM-5 is Z.ai\u2019s flagship open-source foundation model engineered for complex systems design and long-horizon agent workflows. Built for expert developers, it delivers production-grade performance on large-scale programming tasks, rivaling leading..."
    },
    {
        "id": "z-ai/glm-5-turbo",
        "name": "Z.ai: GLM 5 Turbo",
        "provider": "z-ai",
        "architecture": [
            "text"
        ],
        "context": 202752,
        "prompt_price": 1.2,
        "completion_price": 4.0,
        "description": "GLM-5 Turbo is a new model from Z.ai designed for fast inference and strong performance in agent-driven environments such as OpenClaw scenarios. It is deeply optimized for real-world agent workflows..."
    },
    {
        "id": "z-ai/glm-5.1",
        "name": "Z.ai: GLM 5.1",
        "provider": "z-ai",
        "architecture": [
            "text"
        ],
        "context": 202752,
        "prompt_price": 0.95,
        "completion_price": 3.15,
        "description": "GLM-5.1 delivers a major leap in coding capability, with particularly significant gains in handling long-horizon tasks. Unlike previous models built around minute-level interactions, GLM-5.1 can work independently and continuously on..."
    },
    {
        "id": "z-ai/glm-5v-turbo",
        "name": "Z.ai: GLM 5V Turbo",
        "provider": "z-ai",
        "architecture": [
            "image",
            "video"
        ],
        "context": 202752,
        "prompt_price": 1.2,
        "completion_price": 4.0,
        "description": "GLM-5V-Turbo is Z.ai\u2019s first native multimodal agent foundation model, built for vision-based coding and agent-driven tasks. It natively handles image, video, and text inputs, excels at long-horizon planning, complex coding,..."
    }
]