import fast_jieba
from fast_jieba.analyse import extract_tags, textrank
words = fast_jieba.tokenize("小明就读北京清华大学物理系")
print(words)
tags = extract_tags("小明就读北京清华大学物理系")
print(tags)
tags = textrank("小明就读北京清华大学物理系")
print(tags)
print("**************")
texts = ["小明就读北京清华大学物理系" for _ in range(4)]
words = fast_jieba.batch_tokenize(texts)
print(words)
words = fast_jieba.batch_cut(texts)
print(words)
words = fast_jieba.batch_posseg(texts)
print(words)
-
Notifications
You must be signed in to change notification settings - Fork 0
franklucky001/fast_jieba
Folders and files
Name | Name | Last commit message | Last commit date | |
---|---|---|---|---|
Repository files navigation
About
fast_jieba for python use pyo3, base rust crate jieba_rs
Resources
Stars
Watchers
Forks
Packages 0
No packages published