Compare commits
2 Commits
340b2a7b6b
...
3c5bc987d9
| Author | SHA1 | Date |
|---|---|---|
|
|
3c5bc987d9 | |
|
|
54cc21806f |
72
README.md
72
README.md
|
|
@ -554,7 +554,60 @@ art. - 冠词
|
|||
|
||||
---
|
||||
|
||||
### 8. 使用示例
|
||||
### 8. AI助手模块 (`/ai_assist`)
|
||||
|
||||
#### 8.1 词语智能问答
|
||||
|
||||
- **接口**: `POST /ai_assist/exp`
|
||||
- **描述**: 针对指定词语,向AI助手提问相关问题,获取简洁自然的答案,适合初学者。
|
||||
- **需要认证**: 是
|
||||
- **请求体**:
|
||||
|
||||
```json
|
||||
{
|
||||
"word": "string",
|
||||
"question": "string"
|
||||
}
|
||||
```
|
||||
|
||||
- **响应**:
|
||||
|
||||
```json
|
||||
{
|
||||
"word": "string",
|
||||
"answer": "string",
|
||||
"model": "string",
|
||||
"tokens_used": "integer"
|
||||
}
|
||||
```
|
||||
|
||||
- **状态码**:
|
||||
- `200`: 问答成功
|
||||
- `400`: 本月API使用量已超
|
||||
- `500`: AI调用失败
|
||||
|
||||
#### 8.2 清除词语聊天记录
|
||||
|
||||
- **接口**: `POST /ai_assist/clear`
|
||||
- **描述**: 清除指定词语的AI助手聊天记录
|
||||
- **需要认证**: 是
|
||||
- **请求参数**:
|
||||
- `word`: 词语 (string)
|
||||
|
||||
- **响应**:
|
||||
|
||||
```json
|
||||
{
|
||||
"msg": "已清除 <word> 的聊天记录"
|
||||
}
|
||||
```
|
||||
|
||||
- **状态码**:
|
||||
- `200`: 清除成功
|
||||
|
||||
---
|
||||
|
||||
### 9. 使用示例
|
||||
|
||||
#### 完整的API调用流程示例
|
||||
|
||||
|
|
@ -597,6 +650,23 @@ curl -X POST "http://127.0.0.1:8000/translate" \
|
|||
|
||||
# 5. 测试Redis连接
|
||||
curl -X GET "http://127.0.0.1:8000/ping-redis"
|
||||
|
||||
# 6. 词语智能问答
|
||||
curl -X POST "http://127.0.0.1:8000/ai_assist/exp" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer <your_token_here>" \
|
||||
-d '{
|
||||
"word": "法语",
|
||||
"question": "什么是法语?"
|
||||
}'
|
||||
|
||||
# 7. 清除词语聊天记录
|
||||
curl -X POST "http://127.0.0.1:8000/ai_assist/clear" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer <your_token_here>" \
|
||||
-d '{
|
||||
"word": "法语"
|
||||
}'
|
||||
```
|
||||
|
||||
---
|
||||
|
|
|
|||
|
|
@ -77,7 +77,8 @@ async def search(request: Request, body: SearchRequest, user=Depends(get_current
|
|||
# 修改freq
|
||||
first_word = word_contents[0].word
|
||||
current_freq = first_word.freq
|
||||
await first_word.update(freq=current_freq + 1)
|
||||
first_word.freq = current_freq + 1
|
||||
await first_word.save()
|
||||
|
||||
pos_seen = set()
|
||||
pos_contents = []
|
||||
|
|
@ -111,7 +112,10 @@ async def search(request: Request, body: SearchRequest, user=Depends(get_current
|
|||
raise HTTPException(status_code=404, detail="Word not found")
|
||||
|
||||
first_def = word_content[0]
|
||||
pos_list = await first_def.pos.all()
|
||||
first_word = first_def.word
|
||||
first_word.freq = first_word.freq + 1
|
||||
await first_word.save()
|
||||
pos_list = await first_def.pos
|
||||
pos_contents = [p.pos_type for p in pos_list]
|
||||
|
||||
contents: List[SearchItemJp] = []
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ from app.utils.textnorm import normalize_text
|
|||
from settings import TORTOISE_ORM
|
||||
|
||||
|
||||
async def suggest_autocomplete(query: SearchRequest, limit: int = 10) -> List[str]:
|
||||
async def suggest_autocomplete(query: SearchRequest, limit: int = 10) -> List[Tuple[str, str]]:
|
||||
"""
|
||||
|
||||
:param query: 当前用户输入的内容
|
||||
|
|
@ -60,10 +60,10 @@ async def suggest_autocomplete(query: SearchRequest, limit: int = 10) -> List[st
|
|||
.get_or_none(
|
||||
text=query.query
|
||||
)
|
||||
.only("text", "freq")
|
||||
.only("text", "hiragana", "freq")
|
||||
)
|
||||
if exact:
|
||||
exact_word = [(exact.text, exact.freq)]
|
||||
exact_word = [(exact.text, exact.hiragana, exact.freq)]
|
||||
else:
|
||||
exact_word = []
|
||||
|
||||
|
|
@ -71,34 +71,34 @@ async def suggest_autocomplete(query: SearchRequest, limit: int = 10) -> List[st
|
|||
WordlistJp
|
||||
.filter(Q(hiragana__startswith=query_word) | Q(text__startswith=query.query))
|
||||
.exclude(text=query.query)
|
||||
.only("text", "freq")
|
||||
.only("text", "hiragana", "freq")
|
||||
)
|
||||
prefix_objs = await qs_prefix[:limit]
|
||||
prefix: List[Tuple[str, int]] = [(o.text, o.freq) for o in prefix_objs]
|
||||
prefix: List[Tuple[str, str, int]] = [(o.text, o.hiragana, o.freq) for o in prefix_objs]
|
||||
|
||||
need = max(0, limit - len(prefix))
|
||||
contains: List[Tuple[str, int]] = []
|
||||
contains: List[Tuple[str, str, int]] = []
|
||||
|
||||
if need > 0:
|
||||
qs_contain = await (
|
||||
WordlistJp
|
||||
.filter(Q(hiragana__icontains=query_word) | Q(text__icontains=query.query))
|
||||
.exclude(Q(hiragana__startswith=query_word) | Q(text__startswith=query.query) | Q(text=query.query))
|
||||
.only("text", "freq")
|
||||
.only("text", "freq")
|
||||
.only("text", "hiragana", "freq")
|
||||
)
|
||||
contains_objs = qs_contain[:need * 2]
|
||||
contains: List[Tuple[str, int]] = [(o.text, o.freq) for o in contains_objs]
|
||||
contains: List[Tuple[str, str, int]] = [(o.text, o.hiragana, o.freq) for o in contains_objs]
|
||||
|
||||
seen_text, out = set(), []
|
||||
for text, freq in list(exact_word) + list(prefix) + list(contains):
|
||||
if text not in seen_text:
|
||||
seen_text.add(text)
|
||||
out.append((text, freq))
|
||||
for text, hiragana, freq in list(exact_word) + list(prefix) + list(contains):
|
||||
key = (text, hiragana)
|
||||
if key not in seen_text:
|
||||
seen_text.add(key)
|
||||
out.append((text, hiragana, freq))
|
||||
if len(out) >= limit:
|
||||
break
|
||||
out = sorted(out, key=lambda w: (w[1], len(w[0]), w[0]))
|
||||
return [text for text, _ in out]
|
||||
out = sorted(out, key=lambda w: (-w[2], len(w[0]), w[0]))
|
||||
return [(text, hiragana) for text, hiragana, _ in out]
|
||||
|
||||
|
||||
async def __test():
|
||||
|
|
|
|||
Loading…
Reference in New Issue