From b84a5e46e488d91a62937a8e8d0f6332c1d3a8eb Mon Sep 17 00:00:00 2001 From: "sweep-ai[bot]" <128439645+sweep-ai[bot]@users.noreply.github.com> Date: Wed, 3 Jan 2024 18:06:45 +0000 Subject: [PATCH 1/3] feat: Updated tests/test_jieba.py --- tests/test_jieba.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/test_jieba.py b/tests/test_jieba.py index 5c4245c2..c979cff3 100644 --- a/tests/test_jieba.py +++ b/tests/test_jieba.py @@ -3,6 +3,10 @@ def test_jieba_segmentation(): + """ + Tests the segmentation of a Chinese text string using the jieba library. + This function does not take any parameters and does not return anything. + """ text = "我爱自然语言处理" seg_list = jieba.cut(text, cut_all=False) assert list(seg_list) == ['我', '爱', '自然语言处理'] From 873f3d5a495c23ead3b9d53b302f8eae69ac1cdc Mon Sep 17 00:00:00 2001 From: "sweep-ai[bot]" <128439645+sweep-ai[bot]@users.noreply.github.com> Date: Wed, 3 Jan 2024 18:07:09 +0000 Subject: [PATCH 2/3] feat: Updated tests/test_jieba.py --- tests/test_jieba.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/test_jieba.py b/tests/test_jieba.py index c979cff3..fb9ca598 100644 --- a/tests/test_jieba.py +++ b/tests/test_jieba.py @@ -12,6 +12,9 @@ def test_jieba_segmentation(): assert list(seg_list) == ['我', '爱', '自然语言处理'] def test_jieba_import(): + """ + Tests the import of the jieba library. This function does not take any parameters and does not return anything. + """ assert jieba is not None def test_jieba_tokenization(): From 8ec35f07c0a20a5f434b93aa92f12e3df4d485ae Mon Sep 17 00:00:00 2001 From: "sweep-ai[bot]" <128439645+sweep-ai[bot]@users.noreply.github.com> Date: Wed, 3 Jan 2024 18:07:40 +0000 Subject: [PATCH 3/3] feat: Updated tests/test_jieba.py --- tests/test_jieba.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/test_jieba.py b/tests/test_jieba.py index fb9ca598..68318cf1 100644 --- a/tests/test_jieba.py +++ b/tests/test_jieba.py @@ -18,6 +18,10 @@ def test_jieba_import(): assert jieba is not None def test_jieba_tokenization(): + """ + Tests the tokenization of a Chinese text string using the jieba library. + This function does not take any parameters and does not return anything. + """ text = "我爱自然语言处理" tokens = jieba.tokenize(text) assert list(tokens) == [('我', 0, 1), ('爱', 1, 2), ('自然语言处理', 2, 8)]