This commit is contained in:
Daniel Hládek 2023-03-07 10:57:47 +01:00
parent 6724e964e9
commit b69463fa9d

View File

@ -38,48 +38,6 @@ def calculate_checksums(self, text):
def extract_pages(link_docs):
content = []
extracted_links = set()
for doc in link_docs:
if doc["status"] != "html_ok":
continue
extracted_doc = trafilatura.bare_extraction(doc["content"],extract_links=True)
links = extracted_doc["links"]
extracted_links += links
del extracted_doc["links"]
content.append(extracted_doc)
return content, extracted_links
def index_pages(pagedb,pages_list):
contentcol = pagedb["content"]
for page in page_list:
# get paragraph checksums
checksums,sizes = get_checksums(page["text"])
page["paragraph_checksums"] = checksums
page["paragraph_sizes"] = sizes
x = contentcol.insert_many(pages_list)
page_hashes = []
def process_pages(db,fetched_pages):
content_pages, extracted_links = extract_pages(fetched_pages)
contentcol = db["content"]
contentcol.insertMany(content_pages)
linkcol = db["links"]
extracted = []
for link in extracted_links:
extracted.append({"url":link,"status":"backlink"})
# shuld fail if link already exists
linkcol.insertMany(extracted)
html_pages = []
for page in fetched_pages:
linkcol.updateOne({"url":page["url"]},{"$set":{"status":"visited"}})
if "html" in page:
html_pages.append({"url":page["url"],"html":page["html"],"update_time":datetime.now()})
del page["html"]
htmlcol = db["html"]
htmlcol.insertMany(html_pages)
def filter_links(links,domain): def filter_links(links,domain):
out = set() out = set()
@ -119,11 +77,65 @@ def get_new_links(domain):
print(res) print(res)
return filtered_links return filtered_links
def index_links(db,link_batch): def fetch_links(link_batch):
htmls = [] htmls = []
for link in link_batch: for link in link_batch:
rr = trafilatura.fetch_url(page,decode=True) rr = trafilatura.fetch_url(page,decode=True)
htmls.append(rr) htmls.append(rr)
return htnls
def extract_links(link_batch,htmls):
out = []
for link,html in zip(link_batch,htmls):
doc = None
if html is not None:
doc = trafilatura.bare_extraction(html,extract_links=True)
out.append((link,html,doc))
return out
def index_pages(db,extracted_pages):
extracted_links = set()
linkcol = db["links"]
htmlcol = db["html"]
for link,html,doc in extracted_pages:
state = "good"
if html is None:
state = "html_error"
elif doc is None:
state = "content_error"
if htnl is not None:
htmlcol.insertOne({"url":link,"html":html})
if doc is not None:
checksums,sizes = get_checksums(doc["text"])
doc["paragraph_checksums"] = checksums
doc["paragraph_sizes"] = sizes
contentcol.insertOne(doc)
linkcol.insertOne({"url":link,"status":state},upsert=True)
content_pages, extracted_links = extract_pages(fetched_pages)
contentcol = db["content"]
contentcol.insertMany(content_pages)
extracted = []
for link in extracted_links:
extracted.append({"url":link,"status":"backlink"})
# shuld fail if link already exists
linkcol.insertMany(extracted)
html_pages = []
for page in fetched_pages:
linkcol.updateOne({"url":page["url"]},{"$set":{"status":"visited"}})
if "html" in page:
html_pages.append({"url":page["url"],"html":page["html"],"update_time":datetime.now()})
del page["html"]
htmlcol = db["html"]
htmlcol.insertMany(html_pages)
for page in page_list:
# get paragraph checksums
x = contentcol.insert_many(pages_list)
page_hashes = []
def index_links(db,link_batch):
html_docs = [] html_docs = []
link_docs = [] link_docs = []
for link,html in zip(link_batch,html): for link,html in zip(link_batch,html):