python-3.x - 如何在文件上逐行部署 Spacy 训练好的分类模型?
问题描述
使用 textcat 进行 spacy 文本分类的例子很少。类似的东西
def load_data(limit=0, split=0.8):
train_data = train
np.random.shuffle(train_data)
train_data = train_data[-limit:]
texts, labels = zip(*train_data)
cats = [{'POSITIVE': bool(y)} for y in labels]
split = int(len(train_data) * split)
return (texts[:split], cats[:split]), (texts[split:], cats[split:])
def evaluate(tokenizer, textcat, texts, cats):
docs = (tokenizer(text) for text in texts)
tp = 1e-8 # True positives
fp = 1e-8 # False positives
fn = 1e-8 # False negatives
tn = 1e-8 # True negatives
for i, doc in enumerate(textcat.pipe(docs)):
gold = cats[i]
for label, score in doc.cats.items():
if label not in gold:
continue
if score >= 0.5 and gold[label] >= 0.5:
tp += 1.
elif score >= 0.5 and gold[label] < 0.5:
fp += 1.
elif score < 0.5 and gold[label] < 0.5:
tn += 1
elif score < 0.5 and gold[label] >= 0.5:
fn += 1
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f_score = 2 * (precision * recall) / (precision + recall)
return {'textcat_p': precision, 'textcat_r': recall, 'textcat_f': f_score}
#("Number of texts to train from","t" , int)
n_texts=8000
#You can increase texts count if you have more computational power.
#("Number of training iterations", "n", int))
n_iter=3
nlp = spacy.load('en_core_web_sm') # create english Language class
if 'textcat' not in nlp.pipe_names:
textcat = nlp.create_pipe('textcat')
nlp.add_pipe(textcat, last=True)
# otherwise, get it, so we can add labels to it
else:
textcat = nlp.get_pipe('textcat')
# add label to text classifier
textcat.add_label('POSITIVE')
# load the dataset
print("Loading food reviews data...")
(train_texts, train_cats), (dev_texts, dev_cats) =
load_data(limit=n_texts)
print("Using {} examples ({} training, {} evaluation)"
.format(n_texts, len(train_texts), len(dev_texts)))
train_data = list(zip(train_texts,
[{'cats': cats} for cats in train_cats]))
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'textcat']
with nlp.disable_pipes(*other_pipes): # only train textcat
optimizer = nlp.begin_training()
print("Training the model...")
print('{:^5}\t{:^5}\t{:^5}\t{:^5}'.format('LOSS', 'P', 'R', 'F'))
for i in range(n_iter):
losses = {}
# batch up the examples using spaCy's minibatch
batches = minibatch(train_data, size=compounding(4., 32., 1.001))
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(texts, annotations, sgd=optimizer, drop=0.2,
losses=losses)
with textcat.model.use_params(optimizer.averages):
# evaluate on the dev data split off in load_data()
scores = evaluate(nlp.tokenizer, textcat, dev_texts, dev_cats)
print('{0:.3f}\t{1:.3f}\t{2:.3f}\t{3:.3f}' # print a simple table
.format(losses['textcat'], scores['textcat_p'],
scores['textcat_r'], scores['textcat_f']))
但是所有这些都是通过在一段文本上部署训练好的模型来完成的,就像这样。
test_text1 ='NEW YORK (Reuters) - Support for U.S. President Donald Trump
increased slightly among Republicans after he lashed out on Twitter over
the weekend'
doc = nlp(test_text1)
test_text1, doc.cats
{'POSITIVE': 0.0011602493468672037})
我不介意在几条文本上部署模型,但是我在 csv 文件中逐行有 300 条文本,我很懒:) 如何在 csv 文件上部署模型并为每一行获取 doc.cats?
解决方案
在处理大量文本时,使用docs = list(nlp.pipe(texts))
文本列表而不是doc = nlp(text)
. 或者您可以将它们作为流处理,如下所示for doc in nlp.pipe(texts): ...
:
有关详细信息,请参阅此处。
推荐阅读
- uwp - 使用对象调用时未找到 UIElement 的成员
- android - 如何确定Android崩溃的原因?
- c - 堆栈上的可变大小数组
- java - PHP 中的 HMAC 输出与 Java 中的输出不匹配
- python - 正确的 python 正则表达式返回 NoneType
- react-native - React Native 自定义组件重用与动态函数
- visual-studio - Reference GAC assemblies in new CSPROJ format?
- firebase - 每当我对集合中的文档进行更新时,Firestore 不会返回所有元素,而是仅返回更新的元素
- symfony - Mautic Contact Field Api 请求返回 400 作为响应
- c# - 识别最后一个被点击的按钮 C#