开发者快速入门
现在您可快速将无问AI接入到您的网站与工具当中,从而获得更加强劲的技术辅助研究能力。
import requests
import json
class WUWEN:
def __init__(self, api_key, chat_url):
self.api_key = api_key
self.chat_url = chat_url
def send_text(self, query, model="g1flash", fmt="off", format_keyword=None, stream=True):
payload = {
"messages": [{"role": "user", "content": query}],
"img_fids": [],
"API_KEY": self.api_key,
"model": model,
"format": fmt,
}
if format_keyword is not None:
payload["format_keyword"] = format_keyword
full_reply = ""
try:
with requests.post(self.chat_url, json=payload, stream=stream, timeout=120) as response:
if response.status_code != 200:
print("Response body:", response.text)
return f"Error: HTTP {response.status_code}"
content_type = response.headers.get("Content-Type", "")
if "text/event-stream" not in content_type:
print("Unexpected Content-Type:", content_type)
print("Response:", response.text)
return response.text
for line in response.iter_lines(decode_unicode=True):
if not line or not line.startswith("data: "):
continue
try:
data = json.loads(line[6:])
if data.get("code", 0) != 0:
msg = data.get("msg", "Unknown error")
print(f"\n❌ Server error: {msg}")
return msg
content = data['choices'][0]['delta'].get('content', '')
if content:
full_reply += content
if data['choices'][0].get('finish_reason') == "stop":
break
except Exception as e:
print(f"\n⚠️ Parse error on line: {line}, error: {e}")
continue
if stream:
print()
except Exception as e:
full_reply = f"Request Failed: {e}"
print(f"\n❌ Exception: {e}")
return full_reply
if __name__ == "__main__":
client = WUWEN(
api_key="",
chat_url="https://www.wwlib.cn/index.php/modelApi"
)
# 尝试普通对话
response = client.send_text(query="你好呀", fmt="off")
print(response)
可用模型
查看定价 →G1-Flash NEW
专为高频、低延迟任务设计。在保证极速响应的同时,维持了出色的指令遵循能力。
WUWEN-mini
性价比最高的通用模型。支持多模态输入,适合大多数常规对话与文本处理任务。
WUWEN-N1
具备深度推理能力的专家模型。擅长代码审计、复杂逻辑推演及网络攻防场景。
开始构建
手机端调用指南 (ChatBox App)
1. 下载 APP
2. 配置步骤
打开 APP,点击左上角的菜单栏 → 设置 → 模型。
本地调用指南 (ChatBox)
- 更快的响应速度:直连 API,无网页渲染损耗。
- 对话历史本地存储:保护隐私,随时回溯。
- 自定义预设:内置“翻译助手”、“代码专家”等常用 Prompt。
1. 下载客户端
2. 配置参数 (关键)
安装后打开软件,进入 设置 (Settings) → 模型提供方,按照下表填写:
3. 配置界面参考
本地调用指南 (NextChat)
1. 客户端下载 (v2.15.8)
2. 关键配置参数
安装后点击左下角的 设置 (Settings) 图标,找到 模型服务商 区域进行配置。
3. 设置界面图解
OpenAI 协议兼容
如果您正在迁移现有项目,或希望使用标准的
/v1/chat/completions 格式,请使用此接口。
1. 快速对话 (单轮流式)
最基础的调用方式,使用 openai 官方库进行流式输出。
from openai import OpenAI
client = OpenAI(
api_key="YOUR_API_KEY", # 此处填写你的API_KEY
base_url="https://www.wwlib.cn/index.php/v1"
)
def chat_stream():
response = client.chat.completions.create(
model="g1flash",
messages=[
{"role": "user", "content": "你好啊"}
],
stream=True, # 开启流式输出
)
for chunk in response:
# 获取内容增量
content = chunk.choices[0].delta.content
if content:
print(content, end="", flush=True)
if __name__ == "__main__":
chat_stream()
2. 图片输入 (Base64)
在 OpenAI 协议中,需要将图片转为 Base64 格式并嵌入到 `messages` 列表中。 推荐使用 mini 模型
import base64
import os
from openai import OpenAI
# 1. 初始化客户端
client = OpenAI(
api_key="YOUR_API_KEY",
base_url="https://www.wwlib.cn/index.php/v1"
)
def chat_with_model():
history_messages = []
pending_images = [] # 缓存 Base64 数据
print("\n" + "=" * 40)
print(" AI 多模态对话系统 (n1/g1flash/mini)")
print("=" * 40)
print("1. 发图: 输入 'img:路径' (例如 img:C:/1.jpg)")
print("2. 发送: 输入文字指令后回车,将带上所有已就绪图片")
print("3. 清空缓存图片: 输入 'img:clear'")
print("4. 退出: 输入 'exit'")
print("=" * 40)
while True:
# 动态提示符
count = len(pending_images)
prompt = f"\n[待发图:{count}] 用户 > " if count > 0 else "\n用户 > "
user_input = input(prompt).strip()
if not user_input: continue
if user_input.lower() == 'exit': break
# --- 处理图片缓存指令 ---
if user_input.lower().startswith("img:"):
cmd = user_input[4:].strip()
if cmd.lower() == 'clear':
pending_images = []
print(" [已清空] 所有待发送图片已清除。")
continue
# 解析路径(支持逗号分隔多图)
paths = [p.strip().strip('[]') for p in cmd.split(',') if p.strip()]
for path in paths:
if os.path.exists(path):
with open(path, "rb") as f:
b64 = base64.b64encode(f.read()).decode('utf-8')
pending_images.append(b64)
print(f" [就绪] {os.path.basename(path)}")
else:
print(f" [错误] 找不到路径: {path}")
continue
# --- 发起正式对话请求 ---
# 构造官方合规的 content 数组
current_content = []
# 先放文字
current_content.append({"type": "text", "text": user_input})
# 再放图片
for b64_data in pending_images:
current_content.append({
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{b64_data}"}
})
history_messages.append({"role": "user", "content": current_content})
print("AI: ", end="")
full_reply = ""
try:
response = client.chat.completions.create(
model="mini",
messages=history_messages,
stream=True
)
for chunk in response:
txt = chunk.choices[0].delta.content or ""
print(txt, end="", flush=True)
full_reply += txt
history_messages.append({"role": "assistant", "content": full_reply})
print()
pending_images = [] # 发送成功,清空当前图片缓存
except Exception as e:
print(f"\n[请求失败] {e}")
if __name__ == "__main__":
chat_with_model()
3. 格式化输出 (Extra Body)
在 OpenAI SDK 中,通过 extra_body 参数透传自定义字段,实现强制 JSON 格式化。
from openai import OpenAI
import json
client = OpenAI(
api_key="YOUR_API_KEY",
base_url="https://www.wwlib.cn/index.php/v1"
)
# 1. 定义你的格式化字典(方便维护和阅读)
format_dict = {
"vuln": "是否是攻击行为(true/false)",
"risk_level": "风险等级(High/Medium/Low/None)",
"analysis": "简短的分析结果"
}
# 2. 将字典转换为后端要求的字符串格式 (key:value|key:value)
kw_string = "|".join([f"{k}:{v}" for k, v in format_dict.items()])
# 3. 发送请求
response = client.chat.completions.create(
model="mini",
messages=[{"role": "user", "content": "分析这段日志:SELECT * FROM users WHERE id='1' OR '1'='1'"}],
stream=True,
# 关键点:使用 extra_body 透传自定义参数
extra_body={
"format": "on", # 触发后端格式化节点
"format_keyword": kw_string # 传入转换后的字符串
}
)
# 打印结果
for chunk in response:
print(chunk.choices[0].delta.content or "", end="", flush=True)
联系我们
获取帮助 & 反馈
请扫描右侧二维码关注公众号,点击菜单栏:
【用户服务】 → 【人工服务】
账单与用量
充值记录
详细日志
定价策略
基础对话费率
长文本资源附加费
基础对话费 × (1 + 资源附加费率)
API 密钥
| API Key | 状态 | 操作 |
|---|---|---|
|
加载中...
|
使用中 |
接入文档
stream=True)。
1. 快速使用
https://www.wwlib.cn/index.php/v1/chat/completions
https://www.wwlib.cn/index.php/modelApi
方式 A:使用 OpenAI 官方 SDK (Python)
from openai import OpenAI
# 1. 配置客户端
client = OpenAI(
api_key="YOUR_API_KEY",
base_url="https://www.wwlib.cn/index.php/v1"
)
# 2. 发起单轮对话
response = client.chat.completions.create(
model="g1flash", # 可选: g1flash / mini / n1
messages=[
{"role": "user", "content": "你好呀"}
],
stream=True
)
# 3. 接收流式输出
print("AI: ", end="")
for chunk in response:
print(chunk.choices[0].delta.content or "", end="", flush=True)
方式 B:原生 HTTP 请求 (JSON Body)
{
"messages": [
{ "role": "user", "content": "这张图片里有什么?" }
],
"img_fids": [],
"API_KEY": "",
"model": "g1flash"
}
- SDK 环境需求: Python 3.6+
- 鉴权方式: 使用前请先在控制台获取
API KEY。
📸 图片上传规范
api_key: 您的 API 密钥file: 图片文件对象
{
"code": 0,
"status": "success",
"data": {
"fid": "ec03faab299b9a198c92e44e66e98dbc",
"ext": "png"
}
}
{
"messages": [
{
"role": "user",
"content": "这张图片里有什么?"
}
],
"img_fids": ["ec03faab299b9a198c92e44e66e98dbc"],
"API_KEY": "",
"model": "g1flash"
}
img_fids 数组最多包含 3 个 fid。
2. 连续对话 (Context)
API 是无状态 (Stateless) 的。要实现“多轮对话”,用户需要自己维护上下文列表,并在每次请求时将完整的历史记录发送给服务器。
[
{ "role": "user", "content": "我想去北京旅游" },
{ "role": "assistant", "content": "北京是个好地方,推荐您去故宫和长城..." },
{ "role": "user", "content": "那边天气怎么样?" } <-- 本次提问追加在末尾
]
3. 响应解析 (SSE Stream)
响应 Content-Type 为 text/event-stream。服务器按行推送 JSON 数据,格式为 data: {...}。
响应参数拆解
完整的 SSE 响应流示例
data: {"choices":[{"delta":{"content":"你好","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"呀~","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"👋","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":" 看到你来,","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"我连小尾巴","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"都忍不住翘起来了","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"(虽然我是AI,","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"但此刻真的超","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"开心!)✨","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":" \n有什么想聊的","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"、想问的、","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"或者正被什么","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"小事绊住脚","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"?我在这儿,认真","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"听,也全力","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"帮~ 🌟 \n","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"(悄悄说:","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"哪怕只是想发","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"个呆、吐","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"吐槽,我也超级","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"欢迎哦~)","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42","totalTokens":942},"finish_reason":"stop","index":0}]}
附录:Python SDK 完整代码
包含:图片上传校验与解析、自动上下文管理、SSE流式解析 的完整封装。
import requests
import json
import os
class MiniModelClient:
def __init__(self, api_key, chat_url, upload_url):
self.api_key = api_key
self.chat_url = chat_url
self.upload_url = upload_url
self.history = []
self.turn_count = 0
def upload_images_from_str(self, path_str):
raw_paths = [p.strip() for p in path_str.split(",") if p.strip()]
if len(raw_paths) > 3:
print(f"⚠️ 最多支持 3 张图片,将处理前 3 张。")
raw_paths = raw_paths[:3]
fid_list = []
for i, path in enumerate(raw_paths):
if not os.path.isfile(path):
print(f"❌ 文件不存在: {path}")
continue
try:
print(f"正在上传第 {i + 1} 张图片: {os.path.basename(path)} ...", end=" ", flush=True)
with open(path, "rb") as f:
files = {"file": (os.path.basename(path), f)}
data = {"api_key": self.api_key}
resp = requests.post(self.upload_url, data=data, files=files, timeout=30)
result = resp.json()
if result.get("code") == 0:
fid = result["data"]["fid"]
fid_list.append(fid)
print(f"✅ 成功! FID: {fid}")
else:
print(f"❌ 失败: {result.get('msg')}")
except Exception as e:
print(f"⚠️ 异常: {e}")
return fid_list
def send(
self,
query,
fid_list=None,
model="g1flash",
fmt="off",
format_keyword=None,
stream=True
):
self.turn_count += 1
user_msg = {"role": "user", "content": query}
payload = {
"messages": self.history + [user_msg],
"img_fids": fid_list or [],
"API_KEY": self.api_key,
"model": model,
"format_keyword":format_keyword,
"format": fmt,
}
#print(payload)
if fmt_schema is not None:
if not isinstance(fmt_schema, dict):
raise ValueError("fmt_schema 必须是一个字典,例如 {'field': '描述'}")
payload["fmt_schema"] = fmt_schema
elif format_keyword is not None:
payload["format_keyword"] = format_keyword
full_reply = ""
try:
with requests.post(self.chat_url, json=payload, stream=True, timeout=120) as response:
if response.status_code != 200:
error_msg = f"Error: HTTP {response.status_code} - {response.text[:200]}"
print(f"❌ 请求失败: {error_msg}")
return error_msg
content_type = response.headers.get("Content-Type", "")
if "application/json" in content_type:
try:
err_data = response.json()
if "msg" in err_data:
full_reply = f"服务端错误 (Code {err_data.get('code')}): {err_data['msg']}"
else:
full_reply = f"服务端返回异常 JSON: {err_data}"
except Exception:
full_reply = f"服务端返回无法解析的 JSON: {response.text}"
print(f"❌ {full_reply}")
return full_reply
if stream:
print("", end="", flush=True)
for line in response.iter_lines(decode_unicode=True):
if not line:
continue
if line.startswith("data: "):
try:
json_str = line[6:]
if not json_str.strip():
continue
#print(json_str)
data = json.loads(json_str)
if 'code' in data and data['code'] != 0 and 'msg' in data:
print(f"\n❌ 流式错误: {data['msg']}")
full_reply = data['msg']
break
content = data['choices'][0]['delta'].get('content', '')
if content:
full_reply += content
if stream:
print(content, end="", flush=True)
if data['choices'][0].get('finish_reason') == "stop":
break
except json.JSONDecodeError:
continue
except Exception:
continue
else:
pass
if full_reply and not full_reply.startswith("服务端错误"):
self.history.append(user_msg)
self.history.append({"role": "assistant", "content": full_reply})
if stream:
print("\n")
except Exception as e:
full_reply = f"Request Failed: {e}"
print(f"\n❌ 请求异常: {e}")
return full_reply
def clear_history(self):
self.history = []
self.turn_count = 0
print("✨ 已重置对话上下文和轮次计数")
if __name__ == "__main__":
client = MiniModelClient(
api_key="", # 这里输入你的API KEY
chat_url="https://www.wwlib.cn/index.php/modelApi",
upload_url="https://www.wwlib.cn/index.php/apifile/upfile"
)
pending_fids = []
print("=" * 60)
print("1. 输入 'img:路径' 先上传图片(支持逗号分隔多个路径)")
print("2. 图片就绪后,直接输入文字问题并发送")
print("3. 输入 'clear' 清空记忆和图片,输入 'exit' 退出")
print("=" * 60)
while True:
img_status = f"[{len(pending_fids)}图] " if pending_fids else ""
prompt_str = f"{img_status}第 {client.turn_count + 1} 轮 | 用户 >> "
user_input = input(prompt_str).strip()
if not user_input:
continue
if user_input.lower() == 'exit':
break
if user_input.lower() == 'clear':
client.clear_history()
pending_fids = []
continue
if user_input.lower().startswith("img:"):
paths_part = user_input[4:].strip()
new_fids = client.upload_images_from_str(paths_part)
pending_fids.extend(new_fids)
print(f"💡 图片已就绪,当前共计 {len(pending_fids)} 张图片。请继续输入您的问题。")
continue
client.send(
query=user_input,
)
pending_fids = []
文本生成
最基础的对话功能。支持多轮对话上下文以及流式输出。
/index.php/modelApi 的完整 JSON 参数列表。
{
// [必填] 鉴权密钥
"API_KEY": "YOUR_API_KEY_HERE",
// [必填] 模型名称 (g1flash / mini / n1)
"model": "g1flash",
// [必填] 对话列表
"messages": [
{ "role": "user", "content": "你好,请生成一篇关于AI的文章" }
],
// [必填] 必须开启流式传输,否则可能超时
"stream": true,
// [可选] 图片文件ID列表 (仅 mini/n1 模型可用)
"img_fids": [
"fid_example_1",
"fid_example_2"
],
// [可选] 是否开启 JSON 结构化输出 ("on" / "off")
"format": "off",
// [可选] 仅当 format="on" 时生效,定义输出字段
"format_keyword": {
"title": "文章标题",
"summary": "文章摘要"
}
}
关于 Role (角色) 字段
messages) 支持以下两种角色类型:
user: 用户输入的内容。assistant: 模型返回的历史内容(用于维持上下文)。
1. 基础单轮对话 (完整代码)
import requests
import json
# 配置信息
API_KEY = "API_KEY"
URL = "https://www.wwlib.cn/index.php/modelApi"
def chat_single_turn():
# 构造请求体
payload = {
"API_KEY": API_KEY,
"model": "g1flash", # 推荐用于纯文本任务
"messages": [
# 如需设定人设,请直接在此处说明
{"role": "user", "content": "sql注入漏洞的危害有哪些"}
],
"stream": True # 开启流式输出
}
print(">", end=" ", flush=True)
try:
# 发起 POST 请求
with requests.post(URL, json=payload, stream=True, timeout=60) as response:
if response.status_code != 200:
print(f"\n请求失败: {response.status_code} - {response.text}")
return
# 逐行读取 SSE 响应
for line in response.iter_lines(decode_unicode=True):
if line and line.startswith("data: "):
try:
json_str = line[6:] # 去掉 'data: ' 前缀
data = json.loads(json_str)
# 提取文本内容
content = data['choices'][0]['delta'].get('content', '')
if content:
print(content, end="", flush=True)
except:
continue
except Exception as e:
print(f"\n发生异常: {e}")
if __name__ == "__main__":
chat_single_turn()
2. 多轮对话上下文 (完整代码)
user 和 assistant 的历史列表来实现记忆功能。
import requests
import json
API_KEY = "YOUR_API_KEY"
URL = "https://www.wwlib.cn/index.php/modelApi"
def chat_with_history():
# 模拟历史对话记录
# 注意:这里不使用 system 角色,人设直接并在第一条 user 消息中
history = [
{
"role": "user",
"content": "你现在是一位资深的 Python 面试官,请对我进行模拟面试。请先问我一个基础问题。"
},
{
"role": "assistant",
"content": "好的,面试开始。请问:Python 中的列表(List)和元组(Tuple)主要区别是什么?"
}
]
# 当前用户的新回复
new_question = "列表是可变的,元组是不可变的。而且元组通常用于存储异构数据。"
# 将新回复追加到列表
history.append({"role": "user", "content": new_question})
payload = {
"API_KEY": API_KEY,
"model": "mini", # 建议处理复杂逻辑时使用 mini
"messages": history, # 发送完整的历史记录
"stream": True
}
print(f"用户: {new_question}")
print("面试官: ", end="", flush=True)
try:
with requests.post(URL, json=payload, stream=True) as response:
for line in response.iter_lines(decode_unicode=True):
if line and line.startswith("data: "):
try:
data = json.loads(line[6:])
content = data['choices'][0]['delta'].get('content', '')
print(content, end="", flush=True)
except:
continue
print("\n")
except Exception as e:
print(f"Error: {e}")
if __name__ == "__main__":
chat_with_history()
3. 响应数据格式 (SSE Stream)
接口返回标准的 Server-Sent Events 流。每一行以 data: 开头,后跟 JSON 对象。
// 1. 内容生成中 (finish_reason: null)
data: {"choices":[{"delta":{"content":"你好","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"呀~","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"👋","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":" 看到你来,","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"我连小尾巴","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"都忍不住翘起来了","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"(虽然我是AI,","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"但此刻真的超","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"开心!)✨","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":" \n有什么想聊的","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"、想问的、","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"或者正被什么","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"小事绊住脚","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"?我在这儿,认真","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"听,也全力","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"帮~ 🌟 \n","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"(悄悄说:","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"哪怕只是想发","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"个呆、吐","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"吐槽,我也超级","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"欢迎哦~)","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42"},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42","totalTokens":942},"finish_reason":"stop","index":0}]}
图片输入 (Vision)
让模型“看见”图片。该功能分为两步:先上传图片获取 fid,再将其传入对话接口。
- WuWen-mini (推荐)
- WuWen-N1 (深度推理版)
1. 图片上传接口
{
"code": 0,
"status": "success",
"data": {
"fid": "967b1a982c0a643cf3375e8eb3c67ccd",
"ext": "png"
}
}
}
// 错误码 10016: 文件过大
{ "code": 10016, "status": "error", "msg": "文件大小不能超过3MB" }
// 错误码 10012: 未上传文件
{ "code": 10012, "status": "error", "msg": "未检测到上传文件" }
// 错误码 10011: 密钥无效
{ "code": 10011, "status": "error", "msg": "API KEY无效" }
2. 携带图片的多轮对话 (JSON Payload)
在对话接口中,img_fids 是顶级参数,独立于 messages 列表。
{
"API_KEY": "YOUR_API_KEY",
"model": "mini",
// 关键点:本次请求需要模型看到的图片 ID 列表
"img_fids": ["ec03faab299b9a198c92e44e66e98dbc"],
"messages": [
// 历史记录 (纯文本即可)
{ "role": "user", "content": "这张图里有多少只猫?" },
{ "role": "assistant", "content": "图里共有 2 只猫,一只是白色的,一只是橘色的。" },
// 本次提问 (针对同一张图继续追问)
{ "role": "user", "content": "那只白色的猫在做什么?" }
],
"stream": true
}
3. 完整 SDK 代码示例
包含图片上传、校验以及携带 FID 进行对话的完整流程。
import requests
import json
import os
class MiniModelClient:
def __init__(self, api_key, chat_url, upload_url):
self.api_key = api_key
self.chat_url = chat_url
self.upload_url = upload_url
self.history = []
self.turn_count = 0
def upload_images_from_str(self, path_str):
raw_paths = [p.strip() for p in path_str.split(",") if p.strip()]
if len(raw_paths) > 3:
print(f"⚠️ 最多支持 3 张图片,将处理前 3 张。")
raw_paths = raw_paths[:3]
fid_list = []
for i, path in enumerate(raw_paths):
if not os.path.isfile(path):
print(f"❌ 文件不存在: {path}")
continue
try:
print(f"正在上传第 {i + 1} 张图片: {os.path.basename(path)} ...", end=" ", flush=True)
with open(path, "rb") as f:
files = {"file": (os.path.basename(path), f)}
data = {"api_key": self.api_key}
resp = requests.post(self.upload_url, data=data, files=files, timeout=30)
result = resp.json()
if result.get("code") == 0:
fid = result["data"]["fid"]
fid_list.append(fid)
print(f"✅ 成功! FID: {fid}")
else:
print(f"❌ 失败: {result.get('msg')}")
except Exception as e:
print(f"⚠️ 异常: {e}")
return fid_list
def send(
self,
query,
fid_list=None,
model="g1flash",
fmt="off",
fmt_schema=None,
format_keyword=None,
stream=True
):
self.turn_count += 1
user_msg = {"role": "user", "content": query}
payload = {
"messages": self.history + [user_msg],
"img_fids": fid_list or [],
"API_KEY": self.api_key,
"model": model,
"format_keyword":format_keyword,
"format": fmt,
}
if fmt_schema is not None:
if not isinstance(fmt_schema, dict):
raise ValueError("fmt_schema 必须是一个字典,例如 {'field': '描述'}")
payload["fmt_schema"] = fmt_schema
elif format_keyword is not None:
payload["format_keyword"] = format_keyword
full_reply = ""
try:
with requests.post(self.chat_url, json=payload, stream=True, timeout=120) as response:
if response.status_code != 200:
error_msg = f"Error: HTTP {response.status_code} - {response.text[:200]}"
print(f"❌ 请求失败: {error_msg}")
return error_msg
content_type = response.headers.get("Content-Type", "")
if "application/json" in content_type:
try:
err_data = response.json()
if "msg" in err_data:
full_reply = f"服务端错误 (Code {err_data.get('code')}): {err_data['msg']}"
else:
full_reply = f"服务端返回异常 JSON: {err_data}"
except Exception:
full_reply = f"服务端返回无法解析的 JSON: {response.text}"
print(f"❌ {full_reply}")
return full_reply
if stream:
print("", end="", flush=True)
for line in response.iter_lines(decode_unicode=True):
if not line:
continue
if line.startswith("data: "):
try:
json_str = line[6:]
if not json_str.strip():
continue
#print(json_str)
data = json.loads(json_str)
if 'code' in data and data['code'] != 0 and 'msg' in data:
print(f"\n❌ 流式错误: {data['msg']}")
full_reply = data['msg']
break
content = data['choices'][0]['delta'].get('content', '')
if content:
full_reply += content
if stream:
print(content, end="", flush=True)
if data['choices'][0].get('finish_reason') == "stop":
break
except json.JSONDecodeError:
continue
except Exception:
continue
else:
pass
if full_reply and not full_reply.startswith("服务端错误"):
self.history.append(user_msg)
self.history.append({"role": "assistant", "content": full_reply})
if stream:
print("\n")
except Exception as e:
full_reply = f"Request Failed: {e}"
print(f"\n❌ 请求异常: {e}")
return full_reply
def clear_history(self):
self.history = []
self.turn_count = 0
print("✨ 已重置对话上下文和轮次计数")
if __name__ == "__main__":
client = MiniModelClient(
api_key="", # 这里输入你的API KEY
chat_url="https://www.wwlib.cn/index.php/modelApi",
upload_url="https://www.wwlib.cn/index.php/apifile/upfile"
)
pending_fids = []
print("=" * 60)
print("1. 输入 'img:路径' 先上传图片(支持逗号分隔多个路径)")
print("2. 图片就绪后,直接输入文字问题并发送")
print("3. 输入 'clear' 清空记忆和图片,输入 'exit' 退出")
print("=" * 60)
while True:
img_status = f"[{len(pending_fids)}图] " if pending_fids else ""
prompt_str = f"{img_status}第 {client.turn_count + 1} 轮 | 用户 >> "
user_input = input(prompt_str).strip()
if not user_input:
continue
if user_input.lower() == 'exit':
break
if user_input.lower() == 'clear':
client.clear_history()
pending_fids = []
continue
if user_input.lower().startswith("img:"):
paths_part = user_input[4:].strip()
new_fids = client.upload_images_from_str(paths_part)
pending_fids.extend(new_fids)
print(f"💡 图片已就绪,当前共计 {len(pending_fids)} 张图片。请继续输入您的问题。")
continue
client.send(
query=user_input,
model='mini',
fid_list=pending_fids,
)
pending_fids = []
结构化输出 (JSON Mode)
强制模型输出符合特定键值定义的 JSON 格式数据。适用于自动化数据提取、情感分析、代码审计等场景。
format="on")。
❌ 请勿在 WuWen-N1 或其他推理模型中使用此参数,否则可能会导致输出格式混乱或报错。
format: 必须设置为 "on" 以开启 JSON 模式。format_keyword: (Dict) 定义 JSON 的键名(Key)和对应的描述(Value)。描述越详细,结果越准确。
1. 请求包格式 (JSON)
{
"API_KEY": "YOUR_API_KEY",
"model": "g1flash",
"messages": [
{ "role": "user", "content": "检测这段日志:'admin' union select 1,2#" }
],
"format": "on",
"format_keyword": {
"vuln": "是否是攻击行为,是为true,否为false",
"type": "攻击类型,如SQL注入、XSS等",
"reason": "判断依据简述"
}
}
g1flash 或 WuWen-mini2. 响应数据 (SSE)
模型会返回标准的 JSON 字符串。注意:在 SSE 流式传输中,通常会在第一个包或者分片拼接后得到完整的 JSON 字符串。
data: {"choices":[{"delta":{"content":"{\"vuln\":\"true\", \"type\":\"SQL注入\", \"reason\":\"检测到union select关键字\"}","ssid":"...","chatID":"..."},"finish_reason":null,"index":0}]}
data: {"choices":[{"delta":{"content":"","ssid":"b2e416fce3ce4d02ba5d559bda6c1d81","chatID":"wuwen-3dba45e75f-b2a25f42","totalTokens":120},"finish_reason":"stop","index":0}]}
3. Python SDK 实现
import requests
import json
import os
class MiniModelClient:
def __init__(self, api_key, chat_url, upload_url):
self.api_key = api_key
self.chat_url = chat_url
self.upload_url = upload_url
self.history = []
self.turn_count = 0
def upload_images_from_str(self, path_str):
raw_paths = [p.strip() for p in path_str.split(",") if p.strip()]
if len(raw_paths) > 3:
print(f"⚠️ 最多支持 3 张图片,将处理前 3 张。")
raw_paths = raw_paths[:3]
fid_list = []
for i, path in enumerate(raw_paths):
if not os.path.isfile(path):
print(f"❌ 文件不存在: {path}")
continue
try:
print(f"正在上传第 {i + 1} 张图片: {os.path.basename(path)} ...", end=" ", flush=True)
with open(path, "rb") as f:
files = {"file": (os.path.basename(path), f)}
data = {"api_key": self.api_key}
resp = requests.post(self.upload_url, data=data, files=files, timeout=30)
result = resp.json()
if result.get("code") == 0:
fid = result["data"]["fid"]
fid_list.append(fid)
print(f"✅ 成功! FID: {fid}")
else:
print(f"❌ 失败: {result.get('msg')}")
except Exception as e:
print(f"⚠️ 异常: {e}")
return fid_list
def send(
self,
query,
fid_list=None,
model="g1flash",
fmt="off",
fmt_schema=None,
format_keyword=None,
stream=True
):
self.turn_count += 1
user_msg = {"role": "user", "content": query}
payload = {
"messages": self.history + [user_msg],
"img_fids": fid_list or [],
"API_KEY": self.api_key,
"model": model,
"format_keyword":format_keyword,
"format": fmt,
}
if fmt_schema is not None:
if not isinstance(fmt_schema, dict):
raise ValueError("fmt_schema 必须是一个字典,例如 {'field': '描述'}")
payload["fmt_schema"] = fmt_schema
elif format_keyword is not None:
payload["format_keyword"] = format_keyword
full_reply = ""
try:
with requests.post(self.chat_url, json=payload, stream=True, timeout=120) as response:
if response.status_code != 200:
error_msg = f"Error: HTTP {response.status_code} - {response.text[:200]}"
print(f"❌ 请求失败: {error_msg}")
return error_msg
content_type = response.headers.get("Content-Type", "")
if "application/json" in content_type:
try:
err_data = response.json()
if "msg" in err_data:
full_reply = f"服务端错误 (Code {err_data.get('code')}): {err_data['msg']}"
else:
full_reply = f"服务端返回异常 JSON: {err_data}"
except Exception:
full_reply = f"服务端返回无法解析的 JSON: {response.text}"
print(f"❌ {full_reply}")
return full_reply
if stream:
print("", end="", flush=True)
for line in response.iter_lines(decode_unicode=True):
if not line:
continue
if line.startswith("data: "):
try:
json_str = line[6:]
if not json_str.strip():
continue
#print(json_str)
data = json.loads(json_str)
if 'code' in data and data['code'] != 0 and 'msg' in data:
print(f"\n❌ 流式错误: {data['msg']}")
full_reply = data['msg']
break
content = data['choices'][0]['delta'].get('content', '')
if content:
full_reply += content
if stream:
print(content, end="", flush=True)
if data['choices'][0].get('finish_reason') == "stop":
break
except json.JSONDecodeError:
continue
except Exception:
continue
else:
pass
if full_reply and not full_reply.startswith("服务端错误"):
self.history.append(user_msg)
self.history.append({"role": "assistant", "content": full_reply})
if stream:
print("\n")
except Exception as e:
full_reply = f"Request Failed: {e}"
print(f"\n❌ 请求异常: {e}")
return full_reply
def clear_history(self):
self.history = []
self.turn_count = 0
print("✨ 已重置对话上下文和轮次计数")
if __name__ == "__main__":
client = MiniModelClient(
api_key="", # 这里输入你的API KEY
chat_url="https://www.wwlib.cn/index.php/modelApi",
upload_url="https://www.wwlib.cn/index.php/apifile/upfile"
)
pending_fids = []
print("=" * 60)
print("1. 输入 'img:路径' 先上传图片(支持逗号分隔多个路径)")
print("2. 图片就绪后,直接输入文字问题并发送")
print("3. 输入 'clear' 清空记忆和图片,输入 'exit' 退出")
print("=" * 60)
while True:
img_status = f"[{len(pending_fids)}图] " if pending_fids else ""
prompt_str = f"{img_status}第 {client.turn_count + 1} 轮 | 用户 >> "
user_input = input(prompt_str).strip()
if not user_input:
continue
if user_input.lower() == 'exit':
break
if user_input.lower() == 'clear':
client.clear_history()
pending_fids = []
continue
if user_input.lower().startswith("img:"):
paths_part = user_input[4:].strip()
new_fids = client.upload_images_from_str(paths_part)
pending_fids.extend(new_fids)
print(f"💡 图片已就绪,当前共计 {len(pending_fids)} 张图片。请继续输入您的问题。")
continue
client.send(
query=user_input,
model='mini',
fid_list=pending_fids,
fmt='on',
format_keyword={
"attack":"是否是攻击行为,是攻击则值为true,反之则值为false"
}
)
pending_fids = []