This commit is contained in:
Daniel Hládek 2023-03-07 16:18:32 +01:00
parent b69463fa9d
commit cdd3b15121

View File

@ -7,7 +7,7 @@ import sys
import courlan import courlan
def calculate_checksums(self, text): def calculate_checksums(text):
""" """
@return fingerprints of a paragraphs in text. Paragraphs are separated by a blank line @return fingerprints of a paragraphs in text. Paragraphs are separated by a blank line
""" """
@ -39,7 +39,7 @@ def calculate_checksums(self, text):
def filter_links(links,domain): def filter_links(links):
out = set() out = set()
for link in links: for link in links:
r = courlan.check_url(link) r = courlan.check_url(link)
@ -55,10 +55,6 @@ def filter_links(links,domain):
return out return out
def get_visited_links(domain):
return []
def generic_visit(domain): def generic_visit(domain):
known_links = set(get_visited_links(domain)) known_links = set(get_visited_links(domain))
visit_links = [] visit_links = []
@ -68,6 +64,7 @@ def generic_visit(domain):
if visit_links is None: if visit_links is None:
visit_links = trafilatura.focused_crawler(dommain,known_links=known_links) visit_links = trafilatura.focused_crawler(dommain,known_links=known_links)
def get_new_links(domain): def get_new_links(domain):
known_links = [] known_links = []
# get seed links # get seed links
@ -79,74 +76,62 @@ def get_new_links(domain):
def fetch_links(link_batch): def fetch_links(link_batch):
htmls = [] htmls = []
print(link_batch)
print("zzzzzzzzzz")
for link in link_batch: for link in link_batch:
rr = trafilatura.fetch_url(page,decode=True) print(link)
rr = trafilatura.fetch_url(link,decode=True)
htmls.append(rr) htmls.append(rr)
return htnls return htmls
def extract_links(link_batch,htmls): def extract_links(link_batch,htmls):
out = [] out = []
for link,html in zip(link_batch,htmls): for link,html in zip(link_batch,htmls):
doc = None doc = None
assert link is not None
if html is not None: if html is not None:
doc = trafilatura.bare_extraction(html,extract_links=True) doc = trafilatura.bare_extraction(html,url=link,include_links=True,with_metadata=True,include_formatting=True,target_language="sk")
out.append((link,html,doc)) out.append((link,html,doc))
return out return out
def index_pages(db,extracted_pages): def index_pages(db,extracted_pages,domain):
extracted_links = set() extracted_links = set()
linkcol = db["links"] linkcol = db["links"]
htmlcol = db["html"] htmlcol = db["html"]
contentcol = db["content"]
for link,html,doc in extracted_pages: for link,html,doc in extracted_pages:
state = "good" state = "good"
if html is None: if html is None:
state = "html_error" state = "html_error"
elif doc is None: elif doc is None:
state = "content_error" state = "content_error"
if htnl is not None: if html is not None:
htmlcol.insertOne({"url":link,"html":html}) htmlcol.insert_one({"url":link,"html":html})
if doc is not None: if doc is not None:
checksums,sizes = get_checksums(doc["text"]) print(doc)
checksums,sizes = calculate_checksums(doc["text"])
doc["paragraph_checksums"] = checksums doc["paragraph_checksums"] = checksums
doc["paragraph_sizes"] = sizes doc["paragraph_sizes"] = sizes
contentcol.insertOne(doc) if "links" in doc:
linkcol.insertOne({"url":link,"status":state},upsert=True) extracted_links.union(doc["links"])
del doc["links"]
content_pages, extracted_links = extract_pages(fetched_pages) contentcol.insert_one(doc)
contentcol = db["content"] linkcol.replace_one({"url":link},{"url":link,"status":state},upsert=True)
contentcol.insertMany(content_pages) filtered_links = filter_links(extracted_links)
extracted = [] for link in filtered_links:
for link in extracted_links: linkcol.insert_one({"url":link,"status":"backlink"},upsert=True)
extracted.append({"url":link,"status":"backlink"})
# shuld fail if link already exists
linkcol.insertMany(extracted)
html_pages = []
for page in fetched_pages:
linkcol.updateOne({"url":page["url"]},{"$set":{"status":"visited"}})
if "html" in page:
html_pages.append({"url":page["url"],"html":page["html"],"update_time":datetime.now()})
del page["html"]
htmlcol = db["html"]
htmlcol.insertMany(html_pages)
for page in page_list:
# get paragraph checksums
x = contentcol.insert_many(pages_list)
page_hashes = []
def index_links(db,link_batch):
html_docs = []
link_docs = []
for link,html in zip(link_batch,html):
status = "visited"
if html is None:
status = "html_error"
html_docs.append({"url":link,"html":html})
link_docs.append({"url":link,"status":status})
return docs def simple_visit(start_link):
known_links = []
visit_links,known_links = trafilatura.spider.focused_crawler(start_link,known_links=known_links)
htmls = fetch_links(visit_links)
extracted_links = extract_links(visit_links,htmls)
myclient = pymongo.MongoClient("mongodb://root:example@localhost:27017/")
db = myclient["mydatabase"]
index_pages(db,extracted_links,start_link)
#visit_links = trafilatura.feeds.find_feed_urls(domain) #visit_links = trafilatura.feeds.find_feed_urls(domain)
#visit_links = trafilatura.sitemaps.sitemap_search(domain) #visit_links = trafilatura.sitemaps.sitemap_search(domain)
#print(visit_links) #print(visit_links)