Skip to content

Commit

Permalink
optimize comment
Browse files Browse the repository at this point in the history
  • Loading branch information
ZingLix committed Jan 11, 2024
1 parent 88db396 commit 5af12e5
Show file tree
Hide file tree
Showing 2 changed files with 37 additions and 11 deletions.
7 changes: 4 additions & 3 deletions docs/cli.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ $ qianfan chat [OPTIONS]

* `--model TEXT`:模型名称 [default:ERNIE-Bot-turbo]
* `--endpoint TEXT`:模型的 endpoint
* `--multi-line / --no-multi-line`:多行模式,通过两次回车确认提交消息 [default:no-multi-line]
* `--multi-line / --no-multi-line`:多行模式,提交时需要先按下 Esc 再回车,以避免与文本换行冲突 [default:no-multi-line]
* `--list-model -l`:打印支持的模型名称列表
* `--help`:展示帮助文档

Expand All @@ -59,19 +59,20 @@ $ qianfan chat [OPTIONS]
**用法**:

```console
$ qianfan completion [OPTIONS] MESSAGES...
$ qianfan completion [OPTIONS] PROMPTS...
```

**Arguments 参数**:

* `MESSAGES...`:需要补全的 prompt,支持传递多个 prompt 以表示对话历史,依次表示用户和模型的消息,必须为奇数 [required]
* `PROMPTS...`:需要补全的 prompt,支持传递多个 prompt 以表示对话历史,依次表示用户和模型的消息,必须为奇数。如不传递则需要在命令行中交互输入。

**Options 选项**:

* `--model TEXT`:模型名称 [default:ERNIE-Bot-turbo]
* `--endpoint TEXT`:模型的 endpoint
* `--plain / --no-plain`:普通文本模式,不使用富文本 [default:no-plain]
* `--list-model -l`:打印支持的模型名称列表
* `--multi-line`:多行模式,提交时需要先按下 Esc 再回车,以避免与文本换行冲突
* `--help`:展示帮助文档

### txt2img 文生图
Expand Down
41 changes: 33 additions & 8 deletions src/qianfan/common/client/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,13 +78,22 @@ def __init__(
def single_model_response(
self, msg: Tuple[str, bool, Optional[QfResponse]]
) -> RenderableType:
"""
Renders response of one model
"""
m, done, resp = msg
# have not received first token, return a spinner
if m == "" and not done:
return Spinner("dots", text="Thinking...")
return Spinner("dots", text="Thinking...", style="status.spinner")
# render the recieved message
render_list: List[RenderableType] = [Markdown(m)]
# if not finished, append a spinner
if not done:
render_list.append(Spinner("dots", text="Generating..."))
render_list.append(
Spinner("dots", text="Generating...", style="status.spinner")
)
if resp is not None:
# add latency info
stat = resp.statistic
render_list.append(
Text.from_markup(
Expand All @@ -93,6 +102,7 @@ def single_model_response(
f" {stat['total_latency']:.2f}s.[/]"
)
)
# add token usage when finished
if done:
token_usage = resp["usage"]
render_list.append(
Expand All @@ -109,10 +119,13 @@ def single_model_response(
def render_model_response(
self, msg_list: List[Tuple[str, bool, Optional[QfResponse]]]
) -> RenderableType:
"""
Render responses from multiple models
"""
if len(msg_list) == 1:
return self.single_model_response(msg_list[0])
table = Table(expand=True)
live_list = []
render_list = []
for client, msg in zip(self.clients, msg_list):
title: str
if client._model is not None:
Expand All @@ -123,15 +136,14 @@ def render_model_response(
raise InternalError("No model or endpoint specified in ChatCompletion.")

table.add_column(title, overflow="fold", ratio=1)
live_list.append(self.single_model_response(msg))
table.add_row(*live_list)
render_list.append(self.single_model_response(msg))
table.add_row(*render_list)
return table

def chat_in_terminal(self) -> None:
def print_hint_msg(self) -> None:
"""
Chat in terminal
Print hint message when startup
"""

if self.multi_line:
rprint(
"[bold]Hint[/bold]: [green bold]Press Esc before Enter[/] to submit"
Expand All @@ -146,6 +158,13 @@ def chat_in_terminal(self) -> None:
"[bold]Hint[/bold]: If you want to submit multiple lines, use the"
" '--multi-line' option."
)

def chat_in_terminal(self) -> None:
"""
Chat in terminal
"""

self.print_hint_msg()
# loop the conversation
while True:
# loop the input and check whether the input is valid
Expand All @@ -169,6 +188,7 @@ def chat_in_terminal(self) -> None:
rprint("Bye!")
raise typer.Exit()

# List of (received_msg, is_end, response) for each client
msg_list: List[Tuple[str, bool, Optional[QfResponse]]] = [
("", False, None) for _ in range(len(self.clients))
]
Expand All @@ -182,6 +202,9 @@ def chat_in_terminal(self) -> None:
def model_response_worker(
client: qianfan.ChatCompletion, i: int
) -> None:
"""
Worker for each client to recevie message
"""
try:
messages = self.msg_history[i]
if messages is None:
Expand Down Expand Up @@ -222,9 +245,11 @@ def model_response_worker(
task_list.append(task)
wait(task_list)

# End the client if there is only one client and got exception
if len(self.clients) == 1 and self.msg_history[0] is None:
raise typer.Exit(1)

# append response to each chat history
for i, msg in enumerate(msg_list):
msg_history = self.msg_history[i]
if msg_history is not None:
Expand Down

0 comments on commit 5af12e5

Please sign in to comment.