aboutsummaryrefslogtreecommitdiff
path: root/crawler/nevrax/spiders/update.py
diff options
context:
space:
mode:
authorneodarz <neodarz@neodarz.net>2019-02-06 19:15:36 +0100
committerneodarz <neodarz@neodarz.net>2019-02-06 19:15:36 +0100
commit9a8badd5dffe47813489ab0b355f5db5faa66646 (patch)
treebf53db13612cd7c32f2eadfe905cad83ce50a0d7 /crawler/nevrax/spiders/update.py
parentf84e8fb75b8096dff5a39936ac26c933fdba3059 (diff)
downloadkhanindexer-9a8badd5dffe47813489ab0b355f5db5faa66646.tar.xz
khanindexer-9a8badd5dffe47813489ab0b355f5db5faa66646.zip
Add ability to update url who are one week old and content modified
Diffstat (limited to 'crawler/nevrax/spiders/update.py')
-rw-r--r--crawler/nevrax/spiders/update.py50
1 files changed, 50 insertions, 0 deletions
diff --git a/crawler/nevrax/spiders/update.py b/crawler/nevrax/spiders/update.py
new file mode 100644
index 0000000..b3f7aa1
--- /dev/null
+++ b/crawler/nevrax/spiders/update.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+import scrapy
+from scrapy.spiders import CrawlSpider, Rule
+from scrapy.linkextractors import LinkExtractor
+from scrapy import Selector
+
+import config
+import datetime
+
+from database.models import Nevrax
+
+from dateutil.relativedelta import *
+
+import logging
+
+class NevraxSpider(CrawlSpider):
+ name = "nevrax_updater"
+ custom_settings = {
+ 'ITEM_PIPELINES': {
+ 'crawler.nevrax.pipelines.NevraxPipeline': 0
+ }
+ }
+ allow_domains = [config.NEVRAX_URL]
+
+ datas = Nevrax.select(Nevrax.url).dicts()
+ datas_array = []
+ for value in datas:
+ datas_array.append(value['url'])
+ start_urls = datas_array
+
+ def start_requests(self):
+ for url in self.start_urls:
+ logging.info(url)
+ try:
+ page = Nevrax.get(Nevrax.url == url)
+ if page.date_updated < datetime.datetime.now()+relativedelta(weeks=-1):
+ yield scrapy.Request(url, callback=self.parse_url, dont_filter=True)
+ except Nevrax.DoesNotExist:
+ yield scrapy.Request(url, callback=self.parse_url, dont_filter=True)
+ continue
+
+ def parse_url(self, response):
+ sel = Selector(response)
+ yield {
+ 'url': response.url,
+ 'title': response.css('title::text').extract_first(),
+ 'content': ''.join(sel.select("//div[@class='bodya']//text()").extract()).strip(),
+ 'content_length': len(response.body),
+ 'date_updated': datetime.datetime.now()
+ }