zz
This commit is contained in:
parent
b69463fa9d
commit
cdd3b15121
@ -7,7 +7,7 @@ import sys
|
||||
import courlan
|
||||
|
||||
|
||||
def calculate_checksums(self, text):
|
||||
def calculate_checksums(text):
|
||||
"""
|
||||
@return fingerprints of a paragraphs in text. Paragraphs are separated by a blank line
|
||||
"""
|
||||
@ -39,7 +39,7 @@ def calculate_checksums(self, text):
|
||||
|
||||
|
||||
|
||||
def filter_links(links,domain):
|
||||
def filter_links(links):
|
||||
out = set()
|
||||
for link in links:
|
||||
r = courlan.check_url(link)
|
||||
@ -55,10 +55,6 @@ def filter_links(links,domain):
|
||||
return out
|
||||
|
||||
|
||||
|
||||
def get_visited_links(domain):
|
||||
return []
|
||||
|
||||
def generic_visit(domain):
|
||||
known_links = set(get_visited_links(domain))
|
||||
visit_links = []
|
||||
@ -68,6 +64,7 @@ def generic_visit(domain):
|
||||
if visit_links is None:
|
||||
visit_links = trafilatura.focused_crawler(dommain,known_links=known_links)
|
||||
|
||||
|
||||
def get_new_links(domain):
|
||||
known_links = []
|
||||
# get seed links
|
||||
@ -79,74 +76,62 @@ def get_new_links(domain):
|
||||
|
||||
def fetch_links(link_batch):
|
||||
htmls = []
|
||||
print(link_batch)
|
||||
print("zzzzzzzzzz")
|
||||
for link in link_batch:
|
||||
rr = trafilatura.fetch_url(page,decode=True)
|
||||
print(link)
|
||||
rr = trafilatura.fetch_url(link,decode=True)
|
||||
htmls.append(rr)
|
||||
return htnls
|
||||
return htmls
|
||||
|
||||
|
||||
def extract_links(link_batch,htmls):
|
||||
out = []
|
||||
for link,html in zip(link_batch,htmls):
|
||||
doc = None
|
||||
assert link is not None
|
||||
if html is not None:
|
||||
doc = trafilatura.bare_extraction(html,extract_links=True)
|
||||
doc = trafilatura.bare_extraction(html,url=link,include_links=True,with_metadata=True,include_formatting=True,target_language="sk")
|
||||
out.append((link,html,doc))
|
||||
return out
|
||||
|
||||
def index_pages(db,extracted_pages):
|
||||
def index_pages(db,extracted_pages,domain):
|
||||
extracted_links = set()
|
||||
linkcol = db["links"]
|
||||
htmlcol = db["html"]
|
||||
contentcol = db["content"]
|
||||
for link,html,doc in extracted_pages:
|
||||
state = "good"
|
||||
if html is None:
|
||||
state = "html_error"
|
||||
elif doc is None:
|
||||
state = "content_error"
|
||||
if htnl is not None:
|
||||
htmlcol.insertOne({"url":link,"html":html})
|
||||
if html is not None:
|
||||
htmlcol.insert_one({"url":link,"html":html})
|
||||
if doc is not None:
|
||||
checksums,sizes = get_checksums(doc["text"])
|
||||
print(doc)
|
||||
checksums,sizes = calculate_checksums(doc["text"])
|
||||
doc["paragraph_checksums"] = checksums
|
||||
doc["paragraph_sizes"] = sizes
|
||||
contentcol.insertOne(doc)
|
||||
linkcol.insertOne({"url":link,"status":state},upsert=True)
|
||||
|
||||
content_pages, extracted_links = extract_pages(fetched_pages)
|
||||
contentcol = db["content"]
|
||||
contentcol.insertMany(content_pages)
|
||||
extracted = []
|
||||
for link in extracted_links:
|
||||
extracted.append({"url":link,"status":"backlink"})
|
||||
# shuld fail if link already exists
|
||||
linkcol.insertMany(extracted)
|
||||
|
||||
html_pages = []
|
||||
for page in fetched_pages:
|
||||
linkcol.updateOne({"url":page["url"]},{"$set":{"status":"visited"}})
|
||||
if "html" in page:
|
||||
html_pages.append({"url":page["url"],"html":page["html"],"update_time":datetime.now()})
|
||||
del page["html"]
|
||||
htmlcol = db["html"]
|
||||
htmlcol.insertMany(html_pages)
|
||||
|
||||
for page in page_list:
|
||||
# get paragraph checksums
|
||||
x = contentcol.insert_many(pages_list)
|
||||
page_hashes = []
|
||||
def index_links(db,link_batch):
|
||||
html_docs = []
|
||||
link_docs = []
|
||||
for link,html in zip(link_batch,html):
|
||||
status = "visited"
|
||||
if html is None:
|
||||
status = "html_error"
|
||||
html_docs.append({"url":link,"html":html})
|
||||
link_docs.append({"url":link,"status":status})
|
||||
if "links" in doc:
|
||||
extracted_links.union(doc["links"])
|
||||
del doc["links"]
|
||||
contentcol.insert_one(doc)
|
||||
linkcol.replace_one({"url":link},{"url":link,"status":state},upsert=True)
|
||||
filtered_links = filter_links(extracted_links)
|
||||
for link in filtered_links:
|
||||
linkcol.insert_one({"url":link,"status":"backlink"},upsert=True)
|
||||
|
||||
|
||||
return docs
|
||||
def simple_visit(start_link):
|
||||
known_links = []
|
||||
visit_links,known_links = trafilatura.spider.focused_crawler(start_link,known_links=known_links)
|
||||
htmls = fetch_links(visit_links)
|
||||
extracted_links = extract_links(visit_links,htmls)
|
||||
myclient = pymongo.MongoClient("mongodb://root:example@localhost:27017/")
|
||||
|
||||
db = myclient["mydatabase"]
|
||||
index_pages(db,extracted_links,start_link)
|
||||
#visit_links = trafilatura.feeds.find_feed_urls(domain)
|
||||
#visit_links = trafilatura.sitemaps.sitemap_search(domain)
|
||||
#print(visit_links)
|
||||
|
Loading…
Reference in New Issue
Block a user