2023-03-05 14:44:49 +00:00
|
|
|
import pymongo
|
|
|
|
import trafilatura
|
|
|
|
import trafilatura.feeds
|
|
|
|
import trafilatura.sitemaps
|
|
|
|
import trafilatura.spider
|
|
|
|
import sys
|
2023-03-07 07:58:28 +00:00
|
|
|
import courlan
|
2023-03-05 14:44:49 +00:00
|
|
|
|
2023-03-05 17:53:14 +00:00
|
|
|
|
2023-03-07 15:18:32 +00:00
|
|
|
def calculate_checksums(text):
|
2023-03-05 17:53:14 +00:00
|
|
|
"""
|
|
|
|
@return fingerprints of a paragraphs in text. Paragraphs are separated by a blank line
|
|
|
|
"""
|
|
|
|
checksums = []
|
|
|
|
sizes = []
|
|
|
|
hval = 0
|
|
|
|
hsz = 0
|
|
|
|
sz = 0
|
|
|
|
for c in text:
|
|
|
|
cv = ord(c)
|
|
|
|
sz += 1
|
|
|
|
if cv > 64:
|
|
|
|
hval += (hval << 3) + cv
|
|
|
|
zv = hval >> 31
|
|
|
|
hval &= 0x7fffffff
|
|
|
|
hval += zv
|
|
|
|
hsz += 1
|
|
|
|
if c == "\n" and hsz > 0:
|
|
|
|
if hsz > 100:
|
|
|
|
checksums.append(hval)
|
|
|
|
sizes.append(sz)
|
|
|
|
sz = 0
|
|
|
|
hsz = 0
|
|
|
|
if hsz > 100:
|
|
|
|
checksums.append(hval)
|
|
|
|
sizes.append(sz)
|
|
|
|
return checksums, sizes
|
|
|
|
|
2023-03-07 15:18:32 +00:00
|
|
|
def filter_links(links):
|
2023-03-07 07:58:28 +00:00
|
|
|
out = set()
|
|
|
|
for link in links:
|
|
|
|
r = courlan.check_url(link)
|
|
|
|
if r is None:
|
|
|
|
continue
|
2023-03-08 09:56:39 +00:00
|
|
|
llink,ldomain = r
|
|
|
|
print(llink,ldomain)
|
2023-03-07 07:58:28 +00:00
|
|
|
# domain rules
|
|
|
|
if not ldomain.endswith("sk"):
|
2023-03-08 09:56:39 +00:00
|
|
|
print("bad domain")
|
2023-03-07 07:58:28 +00:00
|
|
|
continue
|
2023-03-08 09:56:39 +00:00
|
|
|
if courlan.is_not_crawlable(llink):
|
|
|
|
print("not crawlable")
|
2023-03-07 07:58:28 +00:00
|
|
|
continue
|
2023-03-08 09:56:39 +00:00
|
|
|
out.add(llink)
|
2023-03-07 07:58:28 +00:00
|
|
|
return out
|
|
|
|
|
2023-03-08 09:56:39 +00:00
|
|
|
def get_link_doc(link,status="frontlink"):
|
|
|
|
r = courlan.check_url(link)
|
|
|
|
assert r is not None
|
|
|
|
link,domain = r
|
|
|
|
return {"url":link,"domain":domain,"status":status}
|
2023-03-05 17:53:14 +00:00
|
|
|
|
2023-03-05 14:44:49 +00:00
|
|
|
def generic_visit(domain):
|
|
|
|
known_links = set(get_visited_links(domain))
|
|
|
|
visit_links = []
|
|
|
|
visit_links = trafilatura.find_feed_urls(domain)
|
|
|
|
if visit_links is None:
|
|
|
|
visit_links = trafilatura.sitemap_search(domain)
|
|
|
|
if visit_links is None:
|
|
|
|
visit_links = trafilatura.focused_crawler(dommain,known_links=known_links)
|
|
|
|
|
2023-03-07 15:18:32 +00:00
|
|
|
|
2023-03-07 07:58:28 +00:00
|
|
|
|
2023-03-08 09:56:39 +00:00
|
|
|
def fetch_pages(link_batch):
|
2023-03-07 07:58:28 +00:00
|
|
|
htmls = []
|
2023-03-07 15:18:32 +00:00
|
|
|
print(link_batch)
|
|
|
|
print("zzzzzzzzzz")
|
2023-03-07 07:58:28 +00:00
|
|
|
for link in link_batch:
|
2023-03-08 09:56:39 +00:00
|
|
|
print("fetching:::::")
|
2023-03-07 15:18:32 +00:00
|
|
|
print(link)
|
|
|
|
rr = trafilatura.fetch_url(link,decode=True)
|
2023-03-07 07:58:28 +00:00
|
|
|
htmls.append(rr)
|
2023-03-07 15:18:32 +00:00
|
|
|
return htmls
|
2023-03-07 09:57:47 +00:00
|
|
|
|
2023-03-08 09:56:39 +00:00
|
|
|
def fetch_front_links(start_link):
|
|
|
|
known_links = []
|
|
|
|
visit_links,known_links = trafilatura.spider.focused_crawler(start_link,known_links=known_links)
|
|
|
|
filtered_links = filter_links(visit_links)
|
|
|
|
return filtered_links
|
2023-03-07 09:57:47 +00:00
|
|
|
|
2023-03-08 09:56:39 +00:00
|
|
|
def extract_pages(link_batch,htmls):
|
2023-03-07 09:57:47 +00:00
|
|
|
out = []
|
|
|
|
for link,html in zip(link_batch,htmls):
|
|
|
|
doc = None
|
2023-03-07 15:18:32 +00:00
|
|
|
assert link is not None
|
2023-03-07 09:57:47 +00:00
|
|
|
if html is not None:
|
2023-03-07 15:18:32 +00:00
|
|
|
doc = trafilatura.bare_extraction(html,url=link,include_links=True,with_metadata=True,include_formatting=True,target_language="sk")
|
2023-03-07 09:57:47 +00:00
|
|
|
out.append((link,html,doc))
|
|
|
|
return out
|
|
|
|
|
2023-03-08 09:56:39 +00:00
|
|
|
|
|
|
|
|
|
|
|
def index_pages(db,domain,extracted_pages):
|
2023-03-07 09:57:47 +00:00
|
|
|
extracted_links = set()
|
|
|
|
linkcol = db["links"]
|
|
|
|
htmlcol = db["html"]
|
2023-03-07 15:18:32 +00:00
|
|
|
contentcol = db["content"]
|
2023-03-07 09:57:47 +00:00
|
|
|
for link,html,doc in extracted_pages:
|
|
|
|
state = "good"
|
|
|
|
if html is None:
|
|
|
|
state = "html_error"
|
|
|
|
elif doc is None:
|
|
|
|
state = "content_error"
|
2023-03-07 15:18:32 +00:00
|
|
|
if html is not None:
|
|
|
|
htmlcol.insert_one({"url":link,"html":html})
|
2023-03-07 09:57:47 +00:00
|
|
|
if doc is not None:
|
2023-03-07 15:18:32 +00:00
|
|
|
print(doc)
|
|
|
|
checksums,sizes = calculate_checksums(doc["text"])
|
2023-03-07 09:57:47 +00:00
|
|
|
doc["paragraph_checksums"] = checksums
|
|
|
|
doc["paragraph_sizes"] = sizes
|
2023-03-07 15:18:32 +00:00
|
|
|
if "links" in doc:
|
|
|
|
extracted_links.union(doc["links"])
|
|
|
|
del doc["links"]
|
|
|
|
contentcol.insert_one(doc)
|
2023-03-08 09:56:39 +00:00
|
|
|
doc = get_link_doc(link,state)
|
|
|
|
linkcol.replace_one({"url":link},doc,upsert=True)
|
2023-03-07 15:18:32 +00:00
|
|
|
filtered_links = filter_links(extracted_links)
|
2023-03-08 09:56:39 +00:00
|
|
|
for llink in filtered_links:
|
|
|
|
doc = get_link_doc(link,"backlink")
|
|
|
|
linkcol.insert_one(doc,upsert=True)
|
2023-03-07 09:57:47 +00:00
|
|
|
|
2023-03-08 09:56:39 +00:00
|
|
|
def get_front_links(db,domain,batch_size=100):
|
|
|
|
linkcol = db["links"]
|
|
|
|
res = linkcol.find({"status":"frontlink","domain":domain},limit=batch_size)
|
|
|
|
front_links = []
|
|
|
|
for doc in res:
|
|
|
|
front_links.append(doc["url"])
|
|
|
|
return filter_links(front_links)
|
|
|
|
|
|
|
|
def get_back_links(db,domain,batch_size=100):
|
|
|
|
linkcol = db["links"]
|
|
|
|
frontlinks = linkcol.find({"status":"backlink","domain":domain},limit=batch_size)
|
|
|
|
return front_links
|
|
|
|
|
|
|
|
|
|
|
|
def index_front_links(db,filtered_links):
|
|
|
|
linkcol = db["links"]
|
|
|
|
for link in filtered_links:
|
|
|
|
linkcol.insert_one(get_link_doc(link,"frontlink"))
|
2023-03-07 07:58:28 +00:00
|
|
|
|
|
|
|
|
2023-03-05 14:44:49 +00:00
|
|
|
#visit_links = trafilatura.feeds.find_feed_urls(domain)
|
|
|
|
#visit_links = trafilatura.sitemaps.sitemap_search(domain)
|
|
|
|
#print(visit_links)
|
|
|
|
#for link in visit_links:
|
|
|
|
# content = trafilatura.fetch_url(link,decode=True)
|
|
|
|
# document = trafilatura.bare_extraction(content)
|
|
|
|
# print(content)
|
|
|
|
|
2023-03-08 09:56:39 +00:00
|
|
|
def simple_visit(start_link):
|
|
|
|
start_link,domain = courlan.check_url(start_link)
|
|
|
|
myclient = pymongo.MongoClient("mongodb://root:example@localhost:27017/")
|
|
|
|
db=myclient["crawler"]
|
|
|
|
new_front_links = fetch_front_links(start_link)
|
|
|
|
print("NEW FRONT LINKS")
|
|
|
|
print(new_front_links)
|
|
|
|
index_front_links(db,new_front_links)
|
|
|
|
visit_links = get_front_links(db,domain)
|
|
|
|
print("NEW VISIT LINKS")
|
|
|
|
print(visit_links)
|
|
|
|
htmls = fetch_pages(visit_links)
|
|
|
|
extracted_pages = extract_pages(visit_links,htmls)
|
|
|
|
index_pages(db,domain,extracted_pages)
|
|
|
|
|
2023-03-05 14:44:49 +00:00
|
|
|
simple_visit(sys.argv[1])
|