This commit is contained in:
Daniel Hládek 2023-03-17 12:30:53 +01:00
parent 0accadf633
commit 399f263921

View File

@ -63,30 +63,17 @@ def is_link_good(link):
if r is None: if r is None:
#print(link) #print(link)
return None return None
llink,ldomain = r llink,lhostname = r
#print(llink,ldomain) #print(llink,lhostname)
# domain rules # hostname rules
if not ldomain.endswith(DOMAIN): if not lhostname.endswith(DOMAIN):
LOGGER.debug("bad domain") LOGGER.debug("bad hostname")
return None return None
if courlan.is_not_crawlable(llink): if courlan.is_not_crawlable(llink):
LOGGER.debug("not crawlable") LOGGER.debug("not crawlable")
return None return None
return llink return llink
def filter_links(links,rules=None):
out = set()
for link in links:
r = is_link_good(link)
if r is None:
continue
# check robots.txt rules
if rules is not None and not rules.can_fetch("*", r):
continue
out.add(llink)
return out
def get_link_doc(link,status="frontlink"): def get_link_doc(link,status="frontlink"):
r = courlan.check_url(link) r = courlan.check_url(link)
assert r is not None assert r is not None
@ -94,12 +81,12 @@ def get_link_doc(link,status="frontlink"):
domain = courlan.extract_domain(link) domain = courlan.extract_domain(link)
return {"url":link,"host":host,"domain":domain,"status":status,"created_at":datetime.utcnow()} return {"url":link,"host":host,"domain":domain,"status":status,"created_at":datetime.utcnow()}
def generic_visit(domain): def generic_visit(hostname):
known_links = set(get_visited_links(domain)) known_links = set(get_visited_links(hostname))
visit_links = [] visit_links = []
visit_links = trafilatura.find_feed_urls(domain) visit_links = trafilatura.find_feed_urls(hostname)
if visit_links is None: if visit_links is None:
visit_links = trafilatura.sitemap_search(domain) visit_links = trafilatura.sitemap_search(hostname)
if visit_links is None: if visit_links is None:
visit_links = trafilatura.focused_crawler(dommain,known_links=known_links) visit_links = trafilatura.focused_crawler(dommain,known_links=known_links)
@ -170,7 +157,7 @@ def extract_pages(link_batch,responses):
return out return out
def index_pages(db,domain,extracted_pages): def index_pages(db,hostname,extracted_pages):
linkcol = db["links"] linkcol = db["links"]
htmlcol = db["html"] htmlcol = db["html"]
contentcol = db["content"] contentcol = db["content"]
@ -209,7 +196,7 @@ def index_pages(db,domain,extracted_pages):
linkcol.update_one({"url":original_link},{"$set":{"status":state}}) linkcol.update_one({"url":original_link},{"$set":{"status":state}})
def extract_links(link_batch,responses,domain,rules,default_status="frontlink"): def extract_links(link_batch,responses,hostname,rules,default_status="frontlink"):
links = {} links = {}
for original_link,(final_link,html) in zip(link_batch,responses): for original_link,(final_link,html) in zip(link_batch,responses):
status = default_status status = default_status
@ -249,9 +236,9 @@ def index_links(db,extracted_links):
pass pass
def get_links(db,domain,status,batch_size=BATCHSIZE): def get_links(db,hostname,status,batch_size=BATCHSIZE):
linkcol = db["links"] linkcol = db["links"]
res = linkcol.find({"status":status,"host":domain},{"url":1},limit=batch_size) res = linkcol.find({"status":status,"host":hostname},{"url":1},limit=batch_size)
links = [] links = []
for doc in res: for doc in res:
#print(">>>>>" + status) #print(">>>>>" + status)
@ -261,32 +248,32 @@ def get_links(db,domain,status,batch_size=BATCHSIZE):
def process_links(db,domain,status,links=[],rules=None,batch_size=BATCHSIZE): def process_links(db,hostname,status,links=[],rules=None,batch_size=BATCHSIZE):
#print(links) #print(links)
responses = fetch_pages(links) responses = fetch_pages(links)
#print(responses) #print(responses)
extracted_pages = extract_pages(links,responses) extracted_pages = extract_pages(links,responses)
#print(extracted_pages) #print(extracted_pages)
extracted_links = extract_links(links,responses,domain,rules,status) extracted_links = extract_links(links,responses,hostname,rules,status)
#print(extracted_links) #print(extracted_links)
index_links(db,extracted_links) index_links(db,extracted_links)
index_pages(db,domain,extracted_pages) index_pages(db,hostname,extracted_pages)
def link_summary(db,domain): def link_summary(db,hostname):
linkcol = db["links"] linkcol = db["links"]
#res = linkcol.distinct("domain",{"hostname":domain}) #res = linkcol.distinct("hostname",{"hostname":hostname})
# count links # count links
res = linkcol.aggregate([ res = linkcol.aggregate([
{"$match":{"host":domain}}, {"$match":{"host":hostname}},
{"$group":{"_id":"$status","count":{"$sum":1}}}, {"$group":{"_id":"$status","count":{"$sum":1}}},
]) ])
for item in res: for item in res:
print(item) print(item)
contentcol = db["content"] contentcol = db["content"]
res = contentcol.aggregate([ res = contentcol.aggregate([
{"$match":{"hostname":domain}}, {"$match":{"hostname":hostname}},
{"$group":{"_id":None,"text_size_sum":{"$sum":"text_size"}}}, {"$group":{"_id":None,"text_size_sum":{"$sum":"text_size"}}},
]) ])
for item in res: for item in res:
@ -307,21 +294,21 @@ def createdb():
contentcol = db["content"] contentcol = db["content"]
contentcol.create_index("url",unique=True) contentcol.create_index("url",unique=True)
#contentcol.create_index({"paragraph_checksums":1}) #contentcol.create_index({"paragraph_checksums":1})
#contentcol.create_index({"domain":1}) #contentcol.create_index({"hostname":1})
htmlcol = db["html"] htmlcol = db["html"]
htmlcol.create_index("url",unique=True) htmlcol.create_index("url",unique=True)
@cli.command() @cli.command()
@click.argument("start_link") @click.argument("start_link")
def parseurl(start_link): def parseurl(start_link):
link,domain = courlan.check_url(start_link) link,hostname = courlan.check_url(start_link)
links = [link] links = [link]
responses = fetch_pages(links) responses = fetch_pages(links)
#pprint.pprint(responses) #pprint.pprint(responses)
extracted_pages = extract_pages(links,responses) extracted_pages = extract_pages(links,responses)
for ol,bl,html,doc in extracted_pages: for ol,bl,html,doc in extracted_pages:
pprint.pprint(doc) pprint.pprint(doc)
extracted_links = extract_links(links,responses,domain,None,"backlink") extracted_links = extract_links(links,responses,hostname,None,"backlink")
pprint.pprint(extracted_links) pprint.pprint(extracted_links)
@ -330,25 +317,25 @@ def parseurl(start_link):
def visit(start_link): def visit(start_link):
myclient = pymongo.MongoClient(CONNECTION) myclient = pymongo.MongoClient(CONNECTION)
db=myclient[DBNAME] db=myclient[DBNAME]
start_link,domain = courlan.check_url(start_link) start_link,hostname = courlan.check_url(start_link)
rules = fetch_robot(domain) rules = fetch_robot(hostname)
print(rules) print(rules)
batch_size = BATCHSIZE batch_size = BATCHSIZE
navigation_links = get_links(db,domain,"navigation",batch_size) navigation_links = get_links(db,hostname,"navigation",batch_size)
if start_link is not None: if start_link is not None:
navigation_links.append(start_link) navigation_links.append(start_link)
print(f"Navigation links {len(navigation_links)}") print(f"Navigation links {len(navigation_links)}")
process_links(db,domain,"frontlink",navigation_links,rules) process_links(db,hostname,"frontlink",navigation_links,rules)
links = get_links(db,domain,"frontlink",batch_size) links = get_links(db,hostname,"frontlink",batch_size)
bl = len(links) - batch_size bl = len(links) - batch_size
print(f"Got {len(links)} frontlinks") print(f"Got {len(links)} frontlinks")
if bl > 0: if bl > 0:
print("Getting backlinks") print("Getting backlinks")
front_links = get_links(db,domain,"backlink",bl) front_links = get_links(db,hostname,"backlink",bl)
links += front_links links += front_links
print("Processing backlinks") print("Processing backlinks")
process_links(db,domain,"backlink",links,rules=rules) process_links(db,hostname,"backlink",links,rules=rules)
link_summary(db,domain) link_summary(db,hostname)
if __name__ == "__main__": if __name__ == "__main__":
cli() cli()