From 9a836d8771eb545e2ae6953739dab604fac2038b Mon Sep 17 00:00:00 2001 From: Jacky Date: Thu, 29 Jan 2026 18:32:39 +0800 Subject: [PATCH 1/2] feat: add Volcengine and BytePlus providers Add two new providers for ByteDance's AI coding service: - Volcengine: China region (ark.cn-beijing.volces.com) - BytePlus: International region (ark.ap-southeast.bytepluses.com) Both providers support the ark-code-latest model with: - 256K context window - 32K max output tokens - Reasoning/thinking capability - Image input support - Tool calling support Co-Authored-By: Claude Opus 4.5 --- providers/byteplus/logo.svg | 5 ++++ .../byteplus/models/ark-code-latest.toml | 25 ++++++++++++++++++ providers/byteplus/provider.toml | 5 ++++ providers/volcengine/logo.svg | 26 +++++++++++++++++++ .../volcengine/models/ark-code-latest.toml | 25 ++++++++++++++++++ providers/volcengine/provider.toml | 5 ++++ 6 files changed, 91 insertions(+) create mode 100644 providers/byteplus/logo.svg create mode 100644 providers/byteplus/models/ark-code-latest.toml create mode 100644 providers/byteplus/provider.toml create mode 100644 providers/volcengine/logo.svg create mode 100644 providers/volcengine/models/ark-code-latest.toml create mode 100644 providers/volcengine/provider.toml diff --git a/providers/byteplus/logo.svg b/providers/byteplus/logo.svg new file mode 100644 index 000000000..2f3a122bb --- /dev/null +++ b/providers/byteplus/logo.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/providers/byteplus/models/ark-code-latest.toml b/providers/byteplus/models/ark-code-latest.toml new file mode 100644 index 000000000..a5958e449 --- /dev/null +++ b/providers/byteplus/models/ark-code-latest.toml @@ -0,0 +1,25 @@ +name = "Ark Code Latest" +release_date = "2025-01" +last_updated = "2025-01" +attachment = true +reasoning = true +temperature = true +tool_call = true +open_weights = false + +[interleaved] +field = "reasoning_content" + +[cost] +input = 0 +output = 0 +cache_read = 0 +cache_write = 0 + +[limit] +context = 256_000 +output = 32_000 + +[modalities] +input = ["text", "image"] +output = ["text"] diff --git a/providers/byteplus/provider.toml b/providers/byteplus/provider.toml new file mode 100644 index 000000000..afa21fad5 --- /dev/null +++ b/providers/byteplus/provider.toml @@ -0,0 +1,5 @@ +name = "BytePlus" +env = ["ARK_API_KEY"] +npm = "@ai-sdk/openai-compatible" +api = "https://ark.ap-southeast.bytepluses.com/api/coding/v3" +doc = "https://www.byteplus.com/en/activity/codingplan" diff --git a/providers/volcengine/logo.svg b/providers/volcengine/logo.svg new file mode 100644 index 000000000..0f8529e76 --- /dev/null +++ b/providers/volcengine/logo.svg @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/providers/volcengine/models/ark-code-latest.toml b/providers/volcengine/models/ark-code-latest.toml new file mode 100644 index 000000000..a5958e449 --- /dev/null +++ b/providers/volcengine/models/ark-code-latest.toml @@ -0,0 +1,25 @@ +name = "Ark Code Latest" +release_date = "2025-01" +last_updated = "2025-01" +attachment = true +reasoning = true +temperature = true +tool_call = true +open_weights = false + +[interleaved] +field = "reasoning_content" + +[cost] +input = 0 +output = 0 +cache_read = 0 +cache_write = 0 + +[limit] +context = 256_000 +output = 32_000 + +[modalities] +input = ["text", "image"] +output = ["text"] diff --git a/providers/volcengine/provider.toml b/providers/volcengine/provider.toml new file mode 100644 index 000000000..979aada30 --- /dev/null +++ b/providers/volcengine/provider.toml @@ -0,0 +1,5 @@ +name = "Volcengine" +env = ["ARK_API_KEY"] +npm = "@ai-sdk/openai-compatible" +api = "https://ark.cn-beijing.volces.com/api/coding/v3" +doc = "https://www.volcengine.com/activity/codingplan" From a93cae926f4bfc15f9561974fe5e39340f6332c3 Mon Sep 17 00:00:00 2001 From: Jacky Date: Tue, 3 Feb 2026 13:33:03 +0800 Subject: [PATCH 2/2] feat: add BytePlus and Volcengine coding plan providers with associated models and logos --- .../logo.svg | 0 .../models/ark-code-latest.toml | 0 .../models/doubao-seed-code.toml | 25 +++++++++++++++++++ .../byteplus-coding-plan/models/glm-4.7.toml | 25 +++++++++++++++++++ .../models/kimi-k2-thinking.toml | 25 +++++++++++++++++++ .../models/kimi-k2.5.toml | 25 +++++++++++++++++++ .../provider.toml | 2 +- .../logo.svg | 0 .../models/ark-code-latest.toml | 0 .../models/deepseek-v3.2.toml | 25 +++++++++++++++++++ .../models/doubao-seed-code.toml | 25 +++++++++++++++++++ .../models/glm-4.7.toml | 25 +++++++++++++++++++ .../models/kimi-k2-thinking.toml | 25 +++++++++++++++++++ .../models/kimi-k2.5.toml | 25 +++++++++++++++++++ .../provider.toml | 2 +- 15 files changed, 227 insertions(+), 2 deletions(-) rename providers/{byteplus => byteplus-coding-plan}/logo.svg (100%) rename providers/{byteplus => byteplus-coding-plan}/models/ark-code-latest.toml (100%) create mode 100644 providers/byteplus-coding-plan/models/doubao-seed-code.toml create mode 100644 providers/byteplus-coding-plan/models/glm-4.7.toml create mode 100644 providers/byteplus-coding-plan/models/kimi-k2-thinking.toml create mode 100644 providers/byteplus-coding-plan/models/kimi-k2.5.toml rename providers/{byteplus => byteplus-coding-plan}/provider.toml (85%) rename providers/{volcengine => volcengine-coding-plan}/logo.svg (100%) rename providers/{volcengine => volcengine-coding-plan}/models/ark-code-latest.toml (100%) create mode 100644 providers/volcengine-coding-plan/models/deepseek-v3.2.toml create mode 100644 providers/volcengine-coding-plan/models/doubao-seed-code.toml create mode 100644 providers/volcengine-coding-plan/models/glm-4.7.toml create mode 100644 providers/volcengine-coding-plan/models/kimi-k2-thinking.toml create mode 100644 providers/volcengine-coding-plan/models/kimi-k2.5.toml rename providers/{volcengine => volcengine-coding-plan}/provider.toml (83%) diff --git a/providers/byteplus/logo.svg b/providers/byteplus-coding-plan/logo.svg similarity index 100% rename from providers/byteplus/logo.svg rename to providers/byteplus-coding-plan/logo.svg diff --git a/providers/byteplus/models/ark-code-latest.toml b/providers/byteplus-coding-plan/models/ark-code-latest.toml similarity index 100% rename from providers/byteplus/models/ark-code-latest.toml rename to providers/byteplus-coding-plan/models/ark-code-latest.toml diff --git a/providers/byteplus-coding-plan/models/doubao-seed-code.toml b/providers/byteplus-coding-plan/models/doubao-seed-code.toml new file mode 100644 index 000000000..badcc800b --- /dev/null +++ b/providers/byteplus-coding-plan/models/doubao-seed-code.toml @@ -0,0 +1,25 @@ +name = "Doubao Seed Code" +release_date = "2025-01" +last_updated = "2025-01" +attachment = true +reasoning = true +temperature = true +tool_call = true +open_weights = true + +[interleaved] +field = "reasoning_content" + +[cost] +input = 0 +output = 0 +cache_read = 0 +cache_write = 0 + +[limit] +context = 256_000 +output = 32_000 + +[modalities] +input = ["text", "image"] +output = ["text"] diff --git a/providers/byteplus-coding-plan/models/glm-4.7.toml b/providers/byteplus-coding-plan/models/glm-4.7.toml new file mode 100644 index 000000000..5946a282d --- /dev/null +++ b/providers/byteplus-coding-plan/models/glm-4.7.toml @@ -0,0 +1,25 @@ +name = "GLM-4.7" +release_date = "2025-01" +last_updated = "2025-01" +attachment = false +reasoning = true +temperature = true +tool_call = true +open_weights = true + +[interleaved] +field = "reasoning_content" + +[cost] +input = 0 +output = 0 +cache_read = 0 +cache_write = 0 + +[limit] +context = 200_000 +output = 128_000 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/byteplus-coding-plan/models/kimi-k2-thinking.toml b/providers/byteplus-coding-plan/models/kimi-k2-thinking.toml new file mode 100644 index 000000000..7aebc5514 --- /dev/null +++ b/providers/byteplus-coding-plan/models/kimi-k2-thinking.toml @@ -0,0 +1,25 @@ +name = "Kimi K2 Thinking" +release_date = "2025-01" +last_updated = "2025-01" +attachment = false +reasoning = true +temperature = true +tool_call = true +open_weights = true + +[interleaved] +field = "reasoning_content" + +[cost] +input = 0 +output = 0 +cache_read = 0 +cache_write = 0 + +[limit] +context = 200_000 +output = 32_000 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/byteplus-coding-plan/models/kimi-k2.5.toml b/providers/byteplus-coding-plan/models/kimi-k2.5.toml new file mode 100644 index 000000000..036f9bc2e --- /dev/null +++ b/providers/byteplus-coding-plan/models/kimi-k2.5.toml @@ -0,0 +1,25 @@ +name = "Kimi K2.5" +release_date = "2025-01" +last_updated = "2025-01" +attachment = false +reasoning = true +temperature = true +tool_call = true +open_weights = true + +[interleaved] +field = "reasoning_content" + +[cost] +input = 0 +output = 0 +cache_read = 0 +cache_write = 0 + +[limit] +context = 256_000 +output = 32_000 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/byteplus/provider.toml b/providers/byteplus-coding-plan/provider.toml similarity index 85% rename from providers/byteplus/provider.toml rename to providers/byteplus-coding-plan/provider.toml index afa21fad5..c3df90c02 100644 --- a/providers/byteplus/provider.toml +++ b/providers/byteplus-coding-plan/provider.toml @@ -1,4 +1,4 @@ -name = "BytePlus" +name = "BytePlus Coding Plan" env = ["ARK_API_KEY"] npm = "@ai-sdk/openai-compatible" api = "https://ark.ap-southeast.bytepluses.com/api/coding/v3" diff --git a/providers/volcengine/logo.svg b/providers/volcengine-coding-plan/logo.svg similarity index 100% rename from providers/volcengine/logo.svg rename to providers/volcengine-coding-plan/logo.svg diff --git a/providers/volcengine/models/ark-code-latest.toml b/providers/volcengine-coding-plan/models/ark-code-latest.toml similarity index 100% rename from providers/volcengine/models/ark-code-latest.toml rename to providers/volcengine-coding-plan/models/ark-code-latest.toml diff --git a/providers/volcengine-coding-plan/models/deepseek-v3.2.toml b/providers/volcengine-coding-plan/models/deepseek-v3.2.toml new file mode 100644 index 000000000..fe9370be6 --- /dev/null +++ b/providers/volcengine-coding-plan/models/deepseek-v3.2.toml @@ -0,0 +1,25 @@ +name = "DeepSeek V3.2" +release_date = "2025-01" +last_updated = "2025-01" +attachment = false +reasoning = true +temperature = true +tool_call = true +open_weights = true + +[interleaved] +field = "reasoning_content" + +[cost] +input = 0 +output = 0 +cache_read = 0 +cache_write = 0 + +[limit] +context = 128_000 +output = 32_000 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/volcengine-coding-plan/models/doubao-seed-code.toml b/providers/volcengine-coding-plan/models/doubao-seed-code.toml new file mode 100644 index 000000000..2c52f88af --- /dev/null +++ b/providers/volcengine-coding-plan/models/doubao-seed-code.toml @@ -0,0 +1,25 @@ +name = "Doubao Seed Code" +release_date = "2025-01" +last_updated = "2025-01" +attachment = true +reasoning = true +temperature = true +tool_call = true +open_weights = false + +[interleaved] +field = "reasoning_content" + +[cost] +input = 0 +output = 0 +cache_read = 0 +cache_write = 0 + +[limit] +context = 256_000 +output = 32_000 + +[modalities] +input = ["text", "image"] +output = ["text"] diff --git a/providers/volcengine-coding-plan/models/glm-4.7.toml b/providers/volcengine-coding-plan/models/glm-4.7.toml new file mode 100644 index 000000000..5946a282d --- /dev/null +++ b/providers/volcengine-coding-plan/models/glm-4.7.toml @@ -0,0 +1,25 @@ +name = "GLM-4.7" +release_date = "2025-01" +last_updated = "2025-01" +attachment = false +reasoning = true +temperature = true +tool_call = true +open_weights = true + +[interleaved] +field = "reasoning_content" + +[cost] +input = 0 +output = 0 +cache_read = 0 +cache_write = 0 + +[limit] +context = 200_000 +output = 128_000 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/volcengine-coding-plan/models/kimi-k2-thinking.toml b/providers/volcengine-coding-plan/models/kimi-k2-thinking.toml new file mode 100644 index 000000000..7aebc5514 --- /dev/null +++ b/providers/volcengine-coding-plan/models/kimi-k2-thinking.toml @@ -0,0 +1,25 @@ +name = "Kimi K2 Thinking" +release_date = "2025-01" +last_updated = "2025-01" +attachment = false +reasoning = true +temperature = true +tool_call = true +open_weights = true + +[interleaved] +field = "reasoning_content" + +[cost] +input = 0 +output = 0 +cache_read = 0 +cache_write = 0 + +[limit] +context = 200_000 +output = 32_000 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/volcengine-coding-plan/models/kimi-k2.5.toml b/providers/volcengine-coding-plan/models/kimi-k2.5.toml new file mode 100644 index 000000000..036f9bc2e --- /dev/null +++ b/providers/volcengine-coding-plan/models/kimi-k2.5.toml @@ -0,0 +1,25 @@ +name = "Kimi K2.5" +release_date = "2025-01" +last_updated = "2025-01" +attachment = false +reasoning = true +temperature = true +tool_call = true +open_weights = true + +[interleaved] +field = "reasoning_content" + +[cost] +input = 0 +output = 0 +cache_read = 0 +cache_write = 0 + +[limit] +context = 256_000 +output = 32_000 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/volcengine/provider.toml b/providers/volcengine-coding-plan/provider.toml similarity index 83% rename from providers/volcengine/provider.toml rename to providers/volcengine-coding-plan/provider.toml index 979aada30..2d4548060 100644 --- a/providers/volcengine/provider.toml +++ b/providers/volcengine-coding-plan/provider.toml @@ -1,4 +1,4 @@ -name = "Volcengine" +name = "Volcengine Coding Plan" env = ["ARK_API_KEY"] npm = "@ai-sdk/openai-compatible" api = "https://ark.cn-beijing.volces.com/api/coding/v3"