python - python 使用 file.write 从写入的文件中删除最后一行
问题描述
您好,我已经打开了文件,我正在向文件中写入新行,但在某些情况下,我需要删除最后一行写的女巫,我该怎么办?
ops = open("Ops.txt", "w")
for sepraded in sepradedObject :
lastWord = ""
for nameOfObject in sepraded:
if(nameOfObject == "NN"):
nn = lastWord
#here i need first remove the old one and than add this line
ops.write(nnp + ";" + nn + ";" + predicate+";\n")
if(nameOfObject == "NNP"):
nnp = lastWord
ops.write(nnp + ";" + subject + ";" + predicate+";\n")
lastWord = nameOfObject
这里是我的完整代码,以了解发生了什么以及我现在在做什么(我不关心资源或任何其他事情,我只希望代码有效)所以我该怎么做才能删除最后一行并添加ops文件中的新行?:
# 1- Install nltk , with this command : pip install --user -U nltk And pip install --user -U numpy
# 2- Install nltk data , with the method : nltk.download() | its take over 3H with 8Mbps And 20 min with 40Mbps (over 2.9 GB)
# 3- Read src to get Sentences
# 4- Take loop to get each line of Sentense from the source
# 5- use command to get RDF library : pip install rdflib
# Import the nltk Library that Including above 3Gb Data
from nltk.tokenize import word_tokenize
import nltk
# Import RDF Library
import rdflib
# Just use once nltk.download() to get nlp(nltk) library data.
# nltk.download()
# r means Read
print("--------------Read the inputs from File-------------")
sentences = open("inputs.txt", "r")
print("--------------Read the inputs from File Completed-------------")
# w means write on file with remove all the content that file have
print("--------------Open File to Write The Tags-------------")
intoFileTags = open("srcTags.txt", "w")
for sentence in sentences:
tokenizedSentence = word_tokenize(sentence)
taggedSentence = nltk.pos_tag(tokenizedSentence)
for tagWord in taggedSentence:
i = 0;
for word in tagWord:
intoFileTags.write(word) if i == 1 else intoFileTags.write(word+"->")
i = 1
intoFileTags.write(";")
i = 0
intoFileTags.write("\n")
intoFileTags.close()
print("-----------Making The src Tag with word tokenize And split tags with pos tag Completed------")
print("----------------Close srcTag Completed-----------")
print("-----------Next Step Make RDF From Last Step We have---------------")
tagsFromFile = open("srcTags.txt", "r")
ops = open("Ops.txt", "w")
for line in tagsFromFile:
sentenceWithTagsAndWord = line.split(";")
subject = "";
predicate = "";
obj = "";
afterVBZ = False;
for tagAndWord in sentenceWithTagsAndWord:
if tagAndWord == "\n":
continue
tw = tagAndWord.split("->")
if tw[1] == "VBP" or tw[1] == "VBZ" or tw[1] == "VBN":
afterVBZ = True;
subject = tw[0] + " ";
continue
if afterVBZ == True and tw[1] != "IN" :
predicate += tw[0] + " ";
if afterVBZ == False and (tw[1] != "VBZ" or tw[1] != "VBP") :
obj += tw[0] + " ";
# Write the subject , object and predicate to Ops file -> now we are in step one!
# Remove Withspeace and dot or question mark
obj = obj.strip()
subject = subject.strip()
predicate = predicate.replace(".", "").replace("?", "").replace("!", "")
predicate = predicate.strip()
# so we have object , subject and predicate to normlaization for each object we have extra line to sepraded
objSen = word_tokenize(obj)
sepradedObject = nltk.pos_tag(objSen)
nnp = ""
nn = ""
for sepraded in sepradedObject :
lastWord = ""
for nameOfObject in sepraded:
if(nameOfObject == "NN"):
nn = lastWord
#here i need first remove the old one and than add this line
ops.write(nnp + ";" + nn + ";" + predicate+";\n")
if(nameOfObject == "NNP"):
nnp = lastWord
ops.write(nnp + ";" + subject + ";" + predicate+";\n")
lastWord = nameOfObject
ops.close()
print("-------------Write ops file for STEP ONE! Completed----------")
# create a Graph
graph = rdflib.Graph()
# Create schemas
# we use BNode because we don't know the relation
partOfSpeech = rdflib.Namespace("http://example.org/")
graph.bind("partOfSpeech", partOfSpeech)
# bob is subject , is a predicate , person is object
# Create the node like : bob = partOfSpeech['bob']
解决方案
对,所以你可以只计算内存中的行,然后将它们写在一个单独的循环中。
nnp = ""
nn = ""
lines = []
for sepraded in sepradedObject:
lastWord = ""
for nameOfObject in sepraded:
if nameOfObject == "NN":
nn = lastWord
if lines:
lines.pop(-1) # Remove last line
lines.append(nnp + ";" + nn + ";" + predicate + ";")
if nameOfObject == "NNP":
nnp = lastWord
lines.append(nnp + ";" + subject + ";" + predicate + ";")
lastWord = nameOfObject
for line in lines:
print(line, file=ops) # `print` takes care of newlines
推荐阅读
- webview - Xamarin.iOS 无法从 WebView 导航和导航事件控制 iOS 活动指示器
- c++ - 以当前(LocalSystem)用户的默认权限运行进程
- r - 通过 R 从 Bloomberg 检索存储为文本 (YES/NO) 的数据
- date - 根据日期和年份识别当前周
- javascript - 第二条线图,图表js需要不同的标签
- animation - FFMPEG 由于色彩空间不匹配而丢帧
- azure - 如何通过 Ansible 将 Azure Application Insights 部署到生产环境
- rest - 完全保护 REST API
- php - 如何动态设置背景图像大小?
- reactjs - 如何使用 Sheet.Best API 从我的 GatsbyJS 网站将数据发布到 Google 表格?