This commit is contained in:
Daniel Hládek 2023-03-29 10:17:57 +02:00
parent 964ebb5bfc
commit 1bd088a38a

View File

@ -67,13 +67,13 @@ def calculate_checksums(text):
def is_robot_good(link,rules): def is_robot_good(link,rules):
# check robots.txt rules # check robots.txt rules
if rules is not None and not rules.can_fetch("*", link): if rules is not None and not rules.can_fetch("*", link):
print("bad>>>" + link)
return False return False
return True return True
def is_link_good(link): def is_link_good(link):
r = courlan.check_url(link,strict=True,language=LANGUAGE) r = courlan.check_url(link,strict=True,language=LANGUAGE)
if r is None: if r is None:
#print(link)
return None return None
llink,lhostname = r llink,lhostname = r
#print(llink,lhostname) #print(llink,lhostname)
@ -128,15 +128,16 @@ def fetch_pages(link_batch):
return htmls return htmls
def fetch_robot(base_url): def fetch_robot(base_url):
rules = urllib.robotparser.RobotFileParser()
rules.set_url("https://" + base_url + '/robots.txt')
# exceptions happening here
try: try:
rules.read() rawrules = trafilatura.fetch_url("https://"+ base_url + "/robots.txt")
#print(rawrules)
rules = urllib.robotparser.RobotFileParser()
rules.parse(rawrules.split("\n"))
LOGGER.info('got robots') LOGGER.info('got robots')
except Exception as exc: except Exception as exc:
LOGGER.error('cannot read robots.txt: %s', exc) LOGGER.error('cannot read robots.txt: %s', exc)
rules = None rules = None
# exceptions happening here
return rules return rules
@ -146,7 +147,7 @@ def extract_pages(link_batch,responses):
doc = None doc = None
assert original_link is not None assert original_link is not None
if html is not None: if html is not None:
doc = trafilatura.bare_extraction(html,url=final_link,with_metadata=True,include_formatting=True,target_language=LANGUAGE) doc = trafilatura.bare_extraction(html,url=final_link,with_metadata=True,include_formatting=False,target_language=LANGUAGE,favor_precision=True)
if doc is not None: if doc is not None:
if not "text" in doc or len(doc["text"]) < MINTEXTSIZE: if not "text" in doc or len(doc["text"]) < MINTEXTSIZE:
# text too small # text too small
@ -177,6 +178,7 @@ def index_pages(db,hostname,extracted_pages):
doc["text_size"] = len(text) doc["text_size"] = len(text)
doc["paragraph_checksums"] = checksums doc["paragraph_checksums"] = checksums
doc["paragraph_sizes"] = sizes doc["paragraph_sizes"] = sizes
doc["paragraph_sizes_sum"] = sum(sizes)
goodsz = sum(sizes) goodsz = sum(sizes)
if len(text) < 200 or goodsz/len(text) < 0.4: if len(text) < 200 or goodsz/len(text) < 0.4:
stat = "trash" stat = "trash"
@ -197,6 +199,7 @@ def index_pages(db,hostname,extracted_pages):
def extract_links(link_batch,responses,hostname,rules,default_status="frontlink"): def extract_links(link_batch,responses,hostname,rules,default_status="frontlink"):
links = {} links = {}
badrobot = 0
for original_link,(final_link,html) in zip(link_batch,responses): for original_link,(final_link,html) in zip(link_batch,responses):
status = default_status status = default_status
external_links = courlan.extract_links(html,final_link,external_bool=True,language=LANGUAGE) external_links = courlan.extract_links(html,final_link,external_bool=True,language=LANGUAGE)
@ -205,18 +208,17 @@ def extract_links(link_batch,responses,hostname,rules,default_status="frontlink"
internal_links = courlan.extract_links(html,final_link,external_bool=False,language=LANGUAGE) internal_links = courlan.extract_links(html,final_link,external_bool=False,language=LANGUAGE)
#print(extracted_links) #print(extracted_links)
for link in internal_links: for link in internal_links:
if not is_robot_good(link,rules):
badrobot += 1
continue
status = str(default_status) status = str(default_status)
if courlan.is_navigation_page(link): if courlan.is_navigation_page(link):
status = "navigation" status = "navigation"
#print(link,status) #print(link,status)
links[link] = status links[link] = status
outlinks = [] outlinks = []
badrobot = 0
badlink = 0 badlink = 0
for link,status in links.items(): for link,status in links.items():
if not is_robot_good(link,rules):
badrobot += 1
continue
link = is_link_good(link) link = is_link_good(link)
if link is None: if link is None:
badlink += 1 badlink += 1
@ -234,14 +236,18 @@ def index_links(db,extracted_links):
except pymongo.errors.DuplicateKeyError as ex: except pymongo.errors.DuplicateKeyError as ex:
pass pass
def get_links(db,hostname,status,batch_size=BATCHSIZE): def get_links(db,hostname,status,batch_size):
linkcol = db["links"] linkcol = db["links"]
res = linkcol.find({"status":status,"host":hostname},{"url":1},limit=batch_size) res = linkcol.find({"status":status,"host":hostname},{"url":1},limit=batch_size)
links = [] links = []
for doc in res: for i,doc in enumerate(res):
#print(">>>>>" + status) #print(">>>>>" + status)
#print(doc) #print(doc);
print(">>>>links")
print(doc)
links.append(doc["url"]) links.append(doc["url"])
if i >= batch_size:
break
return links return links
@ -274,7 +280,11 @@ def link_summary(db,hostname):
res = contentcol.aggregate([ res = contentcol.aggregate([
{"$match":{"host":hostname}}, {"$match":{"host":hostname}},
#{"$project": {"textsum":{"$sum":"$text_size"}}} #{"$project": {"textsum":{"$sum":"$text_size"}}}
{"$group":{"_id":None,"text_size_sum":{"$sum":"$text_size"}}}, {"$group":{"_id":None,
"text_size_sum":{"$sum":"$text_size"},
"paragraph_size_sum":{"$sum":"$paragraph_sizes_sum"}
}
},
]) ])
for item in res: for item in res:
print(item) print(item)
@ -294,7 +304,7 @@ def createdb():
contentcol = db["content"] contentcol = db["content"]
contentcol.create_index("url",unique=True) contentcol.create_index("url",unique=True)
#contentcol.create_index({"paragraph_checksums":1}) #contentcol.create_index({"paragraph_checksums":1})
contentcol.create_index({"host":1}) contentcol.create_index("host")
htmlcol = db["html"] htmlcol = db["html"]
htmlcol.create_index("url",unique=True) htmlcol.create_index("url",unique=True)