works
This commit is contained in:
parent
a1638db2c5
commit
08ccc93977
@ -6,9 +6,12 @@ import trafilatura.spider
|
|||||||
import trafilatura.utils
|
import trafilatura.utils
|
||||||
import sys
|
import sys
|
||||||
import courlan
|
import courlan
|
||||||
|
import urllib
|
||||||
|
|
||||||
LANGUAGE="sk"
|
LANGUAGE="sk"
|
||||||
BATCHSIZE=10
|
BATCHSIZE=10
|
||||||
|
MINFILESIZE=300
|
||||||
|
MAXFILESIZE=1000000
|
||||||
|
|
||||||
def calculate_checksums(text):
|
def calculate_checksums(text):
|
||||||
"""
|
"""
|
||||||
@ -46,8 +49,10 @@ def is_robot_good(link,rules):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
def is_link_good(link):
|
def is_link_good(link):
|
||||||
r = courlan.check_url(link,strict=True,language=language)
|
r = courlan.check_url(link,strict=True,language=LANGUAGE)
|
||||||
if r is None:
|
if r is None:
|
||||||
|
print("BBBBBBB")
|
||||||
|
print(link)
|
||||||
return None
|
return None
|
||||||
llink,ldomain = r
|
llink,ldomain = r
|
||||||
print(llink,ldomain)
|
print(llink,ldomain)
|
||||||
@ -58,7 +63,7 @@ def is_link_good(link):
|
|||||||
if courlan.is_not_crawlable(llink):
|
if courlan.is_not_crawlable(llink):
|
||||||
print("not crawlable")
|
print("not crawlable")
|
||||||
return None
|
return None
|
||||||
return None
|
return llink
|
||||||
|
|
||||||
def filter_links(links,rules=None):
|
def filter_links(links,rules=None):
|
||||||
out = set()
|
out = set()
|
||||||
@ -72,14 +77,6 @@ def filter_links(links,rules=None):
|
|||||||
out.add(llink)
|
out.add(llink)
|
||||||
return out
|
return out
|
||||||
|
|
||||||
def sort_links(links,domain):
|
|
||||||
for llink in filtered_links:
|
|
||||||
doc = get_link_doc(link,"backlink")
|
|
||||||
if courlan.is_external(link,domain):
|
|
||||||
doc["status"]= "frontlink"
|
|
||||||
elif courlan.is_navigation(link):
|
|
||||||
doc["status"] = "navigation"
|
|
||||||
linkcol.insert_one(doc)
|
|
||||||
|
|
||||||
def get_link_doc(link,status="frontlink"):
|
def get_link_doc(link,status="frontlink"):
|
||||||
r = courlan.check_url(link)
|
r = courlan.check_url(link)
|
||||||
@ -100,44 +97,57 @@ def generic_visit(domain):
|
|||||||
|
|
||||||
def fetch_pages(link_batch):
|
def fetch_pages(link_batch):
|
||||||
htmls = []
|
htmls = []
|
||||||
print(link_batch)
|
#print(link_batch)
|
||||||
print("zzzzzzzzzz")
|
#print("zzzzzzzzzz")
|
||||||
for link in link_batch:
|
for link in link_batch:
|
||||||
print("fetching:::::")
|
print("fetching:::::")
|
||||||
print(link)
|
print(link)
|
||||||
|
final_link = link
|
||||||
response = trafilatura.fetch_url(link,decode=False)
|
response = trafilatura.fetch_url(link,decode=False)
|
||||||
htmlstring, homepage = trafilatura.spider.refresh_detection(response.data, link)
|
html = None
|
||||||
# is there a meta-refresh on the page?
|
if response is not None :
|
||||||
if homepage is None: # malformed or malicious content
|
good = True
|
||||||
response = None
|
if response.status != 200:
|
||||||
htmls.append(response)
|
good = False
|
||||||
|
#LOGGER.error('not a 200 response: %s for URL %s', response.status, url)
|
||||||
|
elif response.data is None or len(response.data) < MINFILESIZE:
|
||||||
|
#LOGGER.error('too small/incorrect for URL %s', url)
|
||||||
|
good = False
|
||||||
|
# raise error instead?
|
||||||
|
elif len(response.data) > MAXFILESIZE:
|
||||||
|
good = False
|
||||||
|
#LOGGER.error('too large: length %s for URL %s', len(response.data), url)
|
||||||
|
if good:
|
||||||
|
html = trafilatura.utils.decode_response(response)
|
||||||
|
final_link = response.url
|
||||||
|
if html is not None:
|
||||||
|
html, final_link = trafilatura.spider.refresh_detection(html, final_link)
|
||||||
|
# is there a meta-refresh on the page?
|
||||||
|
if final_link is None: # malformed or malicious content
|
||||||
|
html = None
|
||||||
|
htmls.append((final_link,html))
|
||||||
return htmls
|
return htmls
|
||||||
|
|
||||||
def fetch_rules(base_url):
|
def fetch_robot(base_url):
|
||||||
rules = urllib.robotparser.RobotFileParser()
|
rules = urllib.robotparser.RobotFileParser()
|
||||||
rules.set_url(base_url + '/robots.txt')
|
rules.set_url(base_url + '/robots.txt')
|
||||||
# exceptions happening here
|
# exceptions happening here
|
||||||
try:
|
try:
|
||||||
rules.read()
|
rules.read()
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
LOGGER.error('cannot read robots.txt: %s', exc)
|
#LOGGER.error('cannot read robots.txt: %s', exc)
|
||||||
rules = None
|
rules = None
|
||||||
return rules
|
return rules
|
||||||
|
|
||||||
|
|
||||||
def extract_pages(link_batch,responses):
|
def extract_pages(link_batch,responses):
|
||||||
out = []
|
out = []
|
||||||
extracted_links = set()
|
for original_link,(final_link,html) in zip(link_batch,responses):
|
||||||
for link,response in zip(link_batch,responses):
|
|
||||||
doc = None
|
doc = None
|
||||||
assert link is not None
|
assert original_link is not None
|
||||||
html = None
|
|
||||||
response_link = None
|
|
||||||
if response is not None:
|
|
||||||
html = trafilatura.utils.decode_response(response)
|
|
||||||
if html is not None:
|
if html is not None:
|
||||||
doc = trafilatura.bare_extraction(html,url=link,with_metadata=True,include_formatting=True,target_language=LANGUAGE)
|
doc = trafilatura.bare_extraction(html,url=final_link,with_metadata=True,include_formatting=True,target_language=LANGUAGE)
|
||||||
out.append((link,response_link,html,doc))
|
out.append((original_link,final_link,html,doc))
|
||||||
return out
|
return out
|
||||||
|
|
||||||
|
|
||||||
@ -146,12 +156,17 @@ def index_pages(db,domain,extracted_pages):
|
|||||||
linkcol = db["links"]
|
linkcol = db["links"]
|
||||||
htmlcol = db["html"]
|
htmlcol = db["html"]
|
||||||
contentcol = db["content"]
|
contentcol = db["content"]
|
||||||
|
links = []
|
||||||
for original_link,final_link,html,doc in extracted_pages:
|
for original_link,final_link,html,doc in extracted_pages:
|
||||||
state = "good"
|
state = "good"
|
||||||
if html is None:
|
if html is None:
|
||||||
state = "html_error"
|
state = "html_error"
|
||||||
elif doc is None:
|
elif doc is None:
|
||||||
state = "content_error"
|
state = "content_error"
|
||||||
|
if original_link != final_link:
|
||||||
|
linkcol.insert_one(get_link_doc(final_link,state))
|
||||||
|
state = "redirect"
|
||||||
|
linkcol.update_one({"url":original_link},{"$set":{"status":state}})
|
||||||
if html is not None:
|
if html is not None:
|
||||||
htmlcol.insert_one({"url":final_link,"html":html})
|
htmlcol.insert_one({"url":final_link,"html":html})
|
||||||
if doc is not None:
|
if doc is not None:
|
||||||
@ -165,28 +180,20 @@ def index_pages(db,domain,extracted_pages):
|
|||||||
|
|
||||||
def extract_links(link_batch,responses,domain,rules,default_status="frontlink"):
|
def extract_links(link_batch,responses,domain,rules,default_status="frontlink"):
|
||||||
links = {}
|
links = {}
|
||||||
for original_link,response in zip(link_batch,resposnes):
|
for original_link,(final_link,html) in zip(link_batch,responses):
|
||||||
final_link = response.url
|
|
||||||
status = default_status
|
status = default_status
|
||||||
link = original_link
|
extracted_links = courlan.extract_links(html,final_link,False,language=LANGUAGE)
|
||||||
if original_link != final_link:
|
#print(extracted_links)
|
||||||
links[original_link] = "redirect"
|
for link in extracted_links:
|
||||||
link = final_link
|
|
||||||
if courlan.is_external(link,domain):
|
|
||||||
status = "frontlink"
|
|
||||||
elif courlan.is_navigation(link):
|
|
||||||
status = "navigation"
|
|
||||||
links[link] = status
|
|
||||||
extracted_links = courlan.extract_links(response.content)
|
|
||||||
for link in extracted_links
|
|
||||||
if courlan.is_external(link,domain):
|
if courlan.is_external(link,domain):
|
||||||
status = "frontlink"
|
status = "frontlink"
|
||||||
elif courlan.is_navigation(link):
|
elif courlan.is_navigation(link):
|
||||||
status = "navigation"
|
status = "navigation"
|
||||||
|
#print(link,status)
|
||||||
links[link] = status
|
links[link] = status
|
||||||
outlinks = []
|
outlinks = []
|
||||||
for link,status in links.items():
|
for link,status in links.items():
|
||||||
if not is_robot_good(rules):
|
if not is_robot_good(link,rules):
|
||||||
continue
|
continue
|
||||||
link = is_link_good(link)
|
link = is_link_good(link)
|
||||||
if link is None:
|
if link is None:
|
||||||
@ -203,34 +210,25 @@ def index_links(db,extracted_links):
|
|||||||
|
|
||||||
def get_links(db,domain,status,batch_size=BATCHSIZE):
|
def get_links(db,domain,status,batch_size=BATCHSIZE):
|
||||||
linkcol = db["links"]
|
linkcol = db["links"]
|
||||||
res = linkcol.find({"status":status,"domain":domain},{"url":1},limit=batch_size)
|
res = linkcol.find({"status":status,"host":domain},{"url":1},limit=batch_size)
|
||||||
print(res,domain,status)
|
links = []
|
||||||
front_links = []
|
|
||||||
for doc in res:
|
for doc in res:
|
||||||
|
print(">>>>>" + status)
|
||||||
print(doc)
|
print(doc)
|
||||||
front_links.append(doc["url"])
|
links.append(doc["url"])
|
||||||
return front_links
|
return links
|
||||||
|
|
||||||
|
|
||||||
def index_front_links(db,filtered_links):
|
|
||||||
linkcol = db["links"]
|
|
||||||
for link in filtered_links:
|
|
||||||
linkcol.insert_one(get_link_doc(link,"frontlink"))
|
|
||||||
|
|
||||||
|
def process_links(db,domain,status,links=[],rules=None):
|
||||||
#visit_links = trafilatura.feeds.find_feed_urls(domain)
|
|
||||||
#visit_links = trafilatura.sitemaps.sitemap_search(domain)
|
|
||||||
#print(visit_links)
|
|
||||||
#for link in visit_links:
|
|
||||||
# content = trafilatura.fetch_url(link,decode=True)
|
|
||||||
# document = trafilatura.bare_extraction(content)
|
|
||||||
# print(content)
|
|
||||||
|
|
||||||
def process_links(status,domain,links=[],rules=None):
|
|
||||||
links += get_links(db,domain,status)
|
links += get_links(db,domain,status)
|
||||||
|
#print(links)
|
||||||
responses = fetch_pages(links)
|
responses = fetch_pages(links)
|
||||||
|
#print(responses)
|
||||||
extracted_pages = extract_pages(links,responses)
|
extracted_pages = extract_pages(links,responses)
|
||||||
|
#print(extracted_pages)
|
||||||
extracted_links = extract_links(links,responses,domain,rules,status)
|
extracted_links = extract_links(links,responses,domain,rules,status)
|
||||||
|
print(extracted_links)
|
||||||
index_links(db,extracted_links)
|
index_links(db,extracted_links)
|
||||||
index_pages(db,domain,extracted_pages)
|
index_pages(db,domain,extracted_pages)
|
||||||
|
|
||||||
@ -242,17 +240,7 @@ def simple_visit(start_link):
|
|||||||
navigation_links =[start_link]
|
navigation_links =[start_link]
|
||||||
print(navigation_links)
|
print(navigation_links)
|
||||||
process_links(db,domain,"navigation",navigation_links,rules)
|
process_links(db,domain,"navigation",navigation_links,rules)
|
||||||
process_links(db,domain,front_links,rules)
|
process_links(db,domain,"frontlink",rules=rules)
|
||||||
back_links = get_links(db,domain,"backlink")
|
process_links(db,domain,"backlink",rules=rules)
|
||||||
process_links(db,domain,front_links,rules)
|
|
||||||
#new_front_links = fetch_front_links(navigation_links)
|
|
||||||
print("NEW FRONT LINKS")
|
|
||||||
#print(new_front_links)
|
|
||||||
#index_front_links(db,new_front_links)
|
|
||||||
front_links = get_links(db,domain,"frontlink")
|
|
||||||
print("NEW VISIT LINKS")
|
|
||||||
visit_links = front_links
|
|
||||||
print(visit_links)
|
|
||||||
responses = fetch_pages(visit_links)
|
|
||||||
|
|
||||||
simple_visit(sys.argv[1])
|
simple_visit(sys.argv[1])
|
||||||
|
Loading…
Reference in New Issue
Block a user