import pymongo import trafilatura import trafilatura.feeds import trafilatura.sitemaps import trafilatura.spider import trafilatura.utils import trafilatura.external import sys import courlan import urllib from datetime import datetime import click import logging as LOGGER import os import pprint LANGUAGE= os.getenv("SUCKER_LANGUAGE","sk") DOMAIN = os.getenv("SUCKER_DOMAIN","sk") BATCHSIZE=os.getenv("SUCKER_BATCHSIZE",10) CONNECTION=os.getenv("SUCKER_CONNECTION","mongodb://root:example@localhost:27017/") DBNAME=os.getenv("SUCKER_DBNAME","crawler") MINFILESIZE=300 MAXFILESIZE=10000000 MINTEXTSIZE=200 def put_queue(db,channel,message): queuecol = db["queue"] queuecol.insert_one({"channel":channel,"message":message,"created_at":datetime.utcnow(),"started_at":None}) def reserve_queue(db,channel,message): queuecol = db["queue"] r = queuecol.find_one_and_delete({"channel":channel},sort={"created_at":-1}) def delete_queue(db,channel): queuecol = db["queue"] pass def calculate_checksums(text): """ @return fingerprints of a paragraphs in text. Paragraphs are separated by a blank line """ checksums = [] sizes = [] hval = 0 hsz = 0 sz = 0 for c in text: cv = ord(c) sz += 1 if cv > 64: hval += (hval << 3) + cv zv = hval >> 31 hval &= 0x7fffffff hval += zv hsz += 1 if c == "\n" and hsz > 0: if hsz > 100: checksums.append(hval) sizes.append(sz) sz = 0 hsz = 0 if hsz > 100: checksums.append(hval) sizes.append(sz) return checksums, sizes def is_robot_good(link,rules): # check robots.txt rules if rules is not None and not rules.can_fetch("*", link): print("bad>>>" + link) return False return True def is_link_good(link): r = courlan.check_url(link,strict=True,language=LANGUAGE) if r is None: return None llink,lhostname = r #print(llink,lhostname) # hostname rules if not lhostname.endswith(DOMAIN): LOGGER.debug("bad hostname") return None if courlan.is_not_crawlable(llink): LOGGER.debug("not crawlable") return None return llink def get_link_doc(link,status="frontlink"): r = courlan.check_url(link) assert r is not None link,host = r domain = courlan.extract_domain(link) return {"url":link,"host":host,"domain":domain,"status":status,"created_at":datetime.utcnow()} def fetch_pages(link_batch): htmls = [] #print(link_batch) #print("zzzzzzzzzz") for link in link_batch: print("fetching:::::") print(link) final_link = link response = trafilatura.fetch_url(link,decode=False) html = None if response is not None : good = True if response.status != 200: good = False LOGGER.error('not a 200 response: %s for URL %s', response.status, url) elif response.data is None or len(response.data) < MINFILESIZE: LOGGER.error('too small/incorrect for URL %s', link) good = False # raise error instead? elif len(response.data) > MAXFILESIZE: good = False LOGGER.error('too large: length %s for URL %s', len(response.data), link) if good: html = trafilatura.utils.decode_response(response) final_link = response.url if html is not None: html, final_link = trafilatura.spider.refresh_detection(html, final_link) # is there a meta-refresh on the page? if final_link is None: # malformed or malicious content html = None htmls.append((final_link,html)) return htmls def fetch_robot(base_url): try: rawrules = trafilatura.fetch_url("https://"+ base_url + "/robots.txt") #print(rawrules) rules = urllib.robotparser.RobotFileParser() rules.parse(rawrules.split("\n")) LOGGER.info('got robots') except Exception as exc: LOGGER.error('cannot read robots.txt: %s', exc) rules = None # exceptions happening here return rules def extract_pages(link_batch,responses): out = [] for original_link,(final_link,html) in zip(link_batch,responses): doc = None assert original_link is not None if html is not None: doc = trafilatura.bare_extraction(html,url=final_link,with_metadata=True,include_formatting=False,target_language=LANGUAGE,favor_precision=True) if doc is not None: if not "text" in doc or len(doc["text"]) < MINTEXTSIZE: # text too small doc = None out.append((original_link,final_link,html,doc)) return out def index_pages(db,hostname,extracted_pages): linkcol = db["links"] htmlcol = db["html"] contentcol = db["content"] links = [] for original_link,final_link,html,doc in extracted_pages: state = "good" link = original_link if original_link != final_link: linkcol.update_one({"url":original_link},{"$set":{"status":"redirect"}}) link = final_link if html is None: state = "html_error" elif doc is None: state = "content_error" if doc is not None: text = doc["text"] checksums,sizes = calculate_checksums(text) doc["text_size"] = len(text) doc["paragraph_checksums"] = checksums doc["paragraph_sizes"] = sizes goodsz = sum(sizes) doc["paragraph_sizes_sum"] = goodsz if len(text) < 200 or goodsz/len(text) < 0.6: state = "trash" if state == "good": htdoc = get_link_doc(link,state) htdoc["html"] = html htdoc["html_size"] = len(html) # can be revisited - upsert del htdoc["url"] htmlcol.update_one({"url":link},{"$set":htdoc},upsert=True) doc.update(get_link_doc(link,"good")) # todo extract links print(doc) del doc["url"] contentcol.update_one({"url":link},{"$set":doc},upsert=True) linkcol.update_one({"url":original_link},{"$set":{"status":state}}) def extract_links(link_batch,responses,hostname,rules,default_status="frontlink"): links = {} badrobot = 0 for original_link,(final_link,html) in zip(link_batch,responses): status = default_status external_links = courlan.extract_links(html,final_link,external_bool=True,language=LANGUAGE) for link in external_links: links[link] = "frontlink" internal_links = courlan.extract_links(html,final_link,external_bool=False,language=LANGUAGE) #print(extracted_links) for link in internal_links: if not is_robot_good(link,rules): badrobot += 1 continue status = str(default_status) if courlan.is_navigation_page(link): status = "navigation" #print(link,status) links[link] = status outlinks = [] badlink = 0 for link,status in links.items(): link = is_link_good(link) if link is None: badlink += 1 continue outlinks.append((link,status)) print(f"{len(links)} total links, {badrobot} badrobot {badlink} badlinks") return outlinks def index_links(db,extracted_links): linkcol=db["links"] for link,status in extracted_links: doc = get_link_doc(link,status) try: linkcol.insert_one(doc) except pymongo.errors.DuplicateKeyError as ex: pass def get_links(db,hostname,status,batch_size): linkcol = db["links"] res = linkcol.find({"status":status,"host":hostname},{"url":1},limit=batch_size) links = [] for i,doc in enumerate(res): #print(">>>>>" + status) #print(doc); print(">>>>links") print(doc) links.append(doc["url"]) if i >= batch_size: break return links def process_links(db,hostname,status,links=[],rules=None,batch_size=BATCHSIZE): #print(links) responses = fetch_pages(links) #print(responses) extracted_pages = extract_pages(links,responses) #print(extracted_pages) extracted_links = extract_links(links,responses,hostname,rules,status) #print(extracted_links) index_links(db,extracted_links) index_pages(db,hostname,extracted_pages) def link_summary(db,hostname): linkcol = db["links"] #res = linkcol.distinct("hostname",{"hostname":hostname}) # count links res = linkcol.aggregate([ {"$match":{"host":hostname}}, {"$group":{"_id":"$status","count":{"$sum":1}}}, ]) for item in res: print(item) print(">>>Domain Content") contentcol = db["content"] res = contentcol.aggregate([ {"$match":{"host":hostname}}, #{"$project": {"textsum":{"$sum":"$text_size"}}} {"$group":{"_id":None, "text_size_sum":{"$sum":"$text_size"}, "paragraph_size_sum":{"$sum":"$paragraph_sizes_sum"} } }, ]) for item in res: print(item) @click.group() def cli(): pass @cli.command() def createdb(): myclient = pymongo.MongoClient(CONNECTION) db=myclient[DBNAME] linkcol = db["links"] linkcol.create_index("url",unique=True) linkcol.create_index("host") contentcol = db["content"] contentcol.create_index("url",unique=True) #contentcol.create_index({"paragraph_checksums":1}) contentcol.create_index("host") htmlcol = db["html"] htmlcol.create_index("url",unique=True) @cli.command() @click.argument("link") def parseurl(link): link,hostname = courlan.check_url(link) rawrules = trafilatura.fetch_url("https://"+ hostname + "/robots.txt") print(rawrules) rules = urllib.robotparser.RobotFileParser() rules.parse(rawrules.split("\n")) print(rules.can_fetch("*",link)) print(rules.site_maps()) print(rules.crawl_delay("*")) html = trafilatura.fetch_url(link,decode=True) doc = trafilatura.bare_extraction(html) import pprint pprint.pprint(doc) @cli.command() @click.argument("link") def externaldomains(link): html = trafilatura.fetch_url(link,decode=True) external_links = courlan.extract_links(html,link,external_bool=True,language=LANGUAGE) domains = set() for l in external_links: r = courlan.check_url(l) if r is None: pass link,domain = r domains.add(domain) for d in domains: print(d) @cli.command() @click.argument("start_link") def visit(start_link): myclient = pymongo.MongoClient(CONNECTION) db=myclient[DBNAME] start_link,hostname = courlan.check_url(start_link) rules = fetch_robot(hostname) print(rules) batch_size = BATCHSIZE navigation_links = get_links(db,hostname,"navigation",batch_size) if start_link is not None: navigation_links.append(start_link) print(f"Navigation links {len(navigation_links)}") process_links(db,hostname,"frontlink",navigation_links,rules) links = get_links(db,hostname,"frontlink",batch_size) bl = len(links) - batch_size print(f"Got {len(links)} frontlinks") if bl > 0: print("Getting backlinks") front_links = get_links(db,hostname,"backlink",bl) links += front_links print("Processing backlinks") process_links(db,hostname,"backlink",links,rules=rules) link_summary(db,hostname) if __name__ == "__main__": cli()