Krrish Ghimire
4 years ago
10 changed files with 252 additions and 0 deletions
@ -0,0 +1,12 @@ |
|||||||
|
# Define here the models for your scraped items |
||||||
|
# |
||||||
|
# See documentation in: |
||||||
|
# https://docs.scrapy.org/en/latest/topics/items.html |
||||||
|
|
||||||
|
import scrapy |
||||||
|
|
||||||
|
|
||||||
|
class RsslinksItem(scrapy.Item): |
||||||
|
# define the fields for your item here like: |
||||||
|
# name = scrapy.Field() |
||||||
|
pass |
@ -0,0 +1,103 @@ |
|||||||
|
# Define here the models for your spider middleware |
||||||
|
# |
||||||
|
# See documentation in: |
||||||
|
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html |
||||||
|
|
||||||
|
from scrapy import signals |
||||||
|
|
||||||
|
# useful for handling different item types with a single interface |
||||||
|
from itemadapter import is_item, ItemAdapter |
||||||
|
|
||||||
|
|
||||||
|
class RsslinksSpiderMiddleware: |
||||||
|
# Not all methods need to be defined. If a method is not defined, |
||||||
|
# scrapy acts as if the spider middleware does not modify the |
||||||
|
# passed objects. |
||||||
|
|
||||||
|
@classmethod |
||||||
|
def from_crawler(cls, crawler): |
||||||
|
# This method is used by Scrapy to create your spiders. |
||||||
|
s = cls() |
||||||
|
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) |
||||||
|
return s |
||||||
|
|
||||||
|
def process_spider_input(self, response, spider): |
||||||
|
# Called for each response that goes through the spider |
||||||
|
# middleware and into the spider. |
||||||
|
|
||||||
|
# Should return None or raise an exception. |
||||||
|
return None |
||||||
|
|
||||||
|
def process_spider_output(self, response, result, spider): |
||||||
|
# Called with the results returned from the Spider, after |
||||||
|
# it has processed the response. |
||||||
|
|
||||||
|
# Must return an iterable of Request, or item objects. |
||||||
|
for i in result: |
||||||
|
yield i |
||||||
|
|
||||||
|
def process_spider_exception(self, response, exception, spider): |
||||||
|
# Called when a spider or process_spider_input() method |
||||||
|
# (from other spider middleware) raises an exception. |
||||||
|
|
||||||
|
# Should return either None or an iterable of Request or item objects. |
||||||
|
pass |
||||||
|
|
||||||
|
def process_start_requests(self, start_requests, spider): |
||||||
|
# Called with the start requests of the spider, and works |
||||||
|
# similarly to the process_spider_output() method, except |
||||||
|
# that it doesn’t have a response associated. |
||||||
|
|
||||||
|
# Must return only requests (not items). |
||||||
|
for r in start_requests: |
||||||
|
yield r |
||||||
|
|
||||||
|
def spider_opened(self, spider): |
||||||
|
spider.logger.info('Spider opened: %s' % spider.name) |
||||||
|
|
||||||
|
|
||||||
|
class RsslinksDownloaderMiddleware: |
||||||
|
# Not all methods need to be defined. If a method is not defined, |
||||||
|
# scrapy acts as if the downloader middleware does not modify the |
||||||
|
# passed objects. |
||||||
|
|
||||||
|
@classmethod |
||||||
|
def from_crawler(cls, crawler): |
||||||
|
# This method is used by Scrapy to create your spiders. |
||||||
|
s = cls() |
||||||
|
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) |
||||||
|
return s |
||||||
|
|
||||||
|
def process_request(self, request, spider): |
||||||
|
# Called for each request that goes through the downloader |
||||||
|
# middleware. |
||||||
|
|
||||||
|
# Must either: |
||||||
|
# - return None: continue processing this request |
||||||
|
# - or return a Response object |
||||||
|
# - or return a Request object |
||||||
|
# - or raise IgnoreRequest: process_exception() methods of |
||||||
|
# installed downloader middleware will be called |
||||||
|
return None |
||||||
|
|
||||||
|
def process_response(self, request, response, spider): |
||||||
|
# Called with the response returned from the downloader. |
||||||
|
|
||||||
|
# Must either; |
||||||
|
# - return a Response object |
||||||
|
# - return a Request object |
||||||
|
# - or raise IgnoreRequest |
||||||
|
return response |
||||||
|
|
||||||
|
def process_exception(self, request, exception, spider): |
||||||
|
# Called when a download handler or a process_request() |
||||||
|
# (from other downloader middleware) raises an exception. |
||||||
|
|
||||||
|
# Must either: |
||||||
|
# - return None: continue processing this exception |
||||||
|
# - return a Response object: stops process_exception() chain |
||||||
|
# - return a Request object: stops process_exception() chain |
||||||
|
pass |
||||||
|
|
||||||
|
def spider_opened(self, spider): |
||||||
|
spider.logger.info('Spider opened: %s' % spider.name) |
@ -0,0 +1,13 @@ |
|||||||
|
# Define your item pipelines here |
||||||
|
# |
||||||
|
# Don't forget to add your pipeline to the ITEM_PIPELINES setting |
||||||
|
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html |
||||||
|
|
||||||
|
|
||||||
|
# useful for handling different item types with a single interface |
||||||
|
from itemadapter import ItemAdapter |
||||||
|
|
||||||
|
|
||||||
|
class RsslinksPipeline: |
||||||
|
def process_item(self, item, spider): |
||||||
|
return item |
@ -0,0 +1,7 @@ |
|||||||
|
#!/usr/bin/python |
||||||
|
import os |
||||||
|
import sys |
||||||
|
|
||||||
|
spider_name = 'youtube' |
||||||
|
url = sys.argv[1] |
||||||
|
os.system('scrapy crawl ' + spider_name + ' -a url=' + url + ' -s LOG_ENABLED=False') |
@ -0,0 +1,88 @@ |
|||||||
|
# Scrapy settings for rsslinks project |
||||||
|
# |
||||||
|
# For simplicity, this file contains only settings considered important or |
||||||
|
# commonly used. You can find more settings consulting the documentation: |
||||||
|
# |
||||||
|
# https://docs.scrapy.org/en/latest/topics/settings.html |
||||||
|
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html |
||||||
|
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html |
||||||
|
|
||||||
|
BOT_NAME = 'rsslinks' |
||||||
|
|
||||||
|
SPIDER_MODULES = ['rsslinks.spiders'] |
||||||
|
NEWSPIDER_MODULE = 'rsslinks.spiders' |
||||||
|
|
||||||
|
|
||||||
|
# Crawl responsibly by identifying yourself (and your website) on the user-agent |
||||||
|
#USER_AGENT = 'rsslinks (+http://www.yourdomain.com)' |
||||||
|
|
||||||
|
# Obey robots.txt rules |
||||||
|
ROBOTSTXT_OBEY = True |
||||||
|
|
||||||
|
# Configure maximum concurrent requests performed by Scrapy (default: 16) |
||||||
|
#CONCURRENT_REQUESTS = 32 |
||||||
|
|
||||||
|
# Configure a delay for requests for the same website (default: 0) |
||||||
|
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay |
||||||
|
# See also autothrottle settings and docs |
||||||
|
#DOWNLOAD_DELAY = 3 |
||||||
|
# The download delay setting will honor only one of: |
||||||
|
#CONCURRENT_REQUESTS_PER_DOMAIN = 16 |
||||||
|
#CONCURRENT_REQUESTS_PER_IP = 16 |
||||||
|
|
||||||
|
# Disable cookies (enabled by default) |
||||||
|
#COOKIES_ENABLED = False |
||||||
|
|
||||||
|
# Disable Telnet Console (enabled by default) |
||||||
|
#TELNETCONSOLE_ENABLED = False |
||||||
|
|
||||||
|
# Override the default request headers: |
||||||
|
#DEFAULT_REQUEST_HEADERS = { |
||||||
|
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', |
||||||
|
# 'Accept-Language': 'en', |
||||||
|
#} |
||||||
|
|
||||||
|
# Enable or disable spider middlewares |
||||||
|
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html |
||||||
|
#SPIDER_MIDDLEWARES = { |
||||||
|
# 'rsslinks.middlewares.RsslinksSpiderMiddleware': 543, |
||||||
|
#} |
||||||
|
|
||||||
|
# Enable or disable downloader middlewares |
||||||
|
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html |
||||||
|
#DOWNLOADER_MIDDLEWARES = { |
||||||
|
# 'rsslinks.middlewares.RsslinksDownloaderMiddleware': 543, |
||||||
|
#} |
||||||
|
|
||||||
|
# Enable or disable extensions |
||||||
|
# See https://docs.scrapy.org/en/latest/topics/extensions.html |
||||||
|
#EXTENSIONS = { |
||||||
|
# 'scrapy.extensions.telnet.TelnetConsole': None, |
||||||
|
#} |
||||||
|
|
||||||
|
# Configure item pipelines |
||||||
|
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html |
||||||
|
#ITEM_PIPELINES = { |
||||||
|
# 'rsslinks.pipelines.RsslinksPipeline': 300, |
||||||
|
#} |
||||||
|
|
||||||
|
# Enable and configure the AutoThrottle extension (disabled by default) |
||||||
|
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html |
||||||
|
#AUTOTHROTTLE_ENABLED = True |
||||||
|
# The initial download delay |
||||||
|
#AUTOTHROTTLE_START_DELAY = 5 |
||||||
|
# The maximum download delay to be set in case of high latencies |
||||||
|
#AUTOTHROTTLE_MAX_DELAY = 60 |
||||||
|
# The average number of requests Scrapy should be sending in parallel to |
||||||
|
# each remote server |
||||||
|
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 |
||||||
|
# Enable showing throttling stats for every response received: |
||||||
|
#AUTOTHROTTLE_DEBUG = False |
||||||
|
|
||||||
|
# Enable and configure HTTP caching (disabled by default) |
||||||
|
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings |
||||||
|
#HTTPCACHE_ENABLED = True |
||||||
|
#HTTPCACHE_EXPIRATION_SECS = 0 |
||||||
|
#HTTPCACHE_DIR = 'httpcache' |
||||||
|
#HTTPCACHE_IGNORE_HTTP_CODES = [] |
||||||
|
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' |
@ -0,0 +1,4 @@ |
|||||||
|
# This package will contain the spiders of your Scrapy project |
||||||
|
# |
||||||
|
# Please refer to the documentation for information on how to create and manage |
||||||
|
# your spiders. |
@ -0,0 +1,13 @@ |
|||||||
|
import scrapy |
||||||
|
|
||||||
|
class YoutubeSpider(scrapy.Spider): |
||||||
|
name = "youtube" |
||||||
|
|
||||||
|
def __init__(self, url=None, *args, **kwargs): |
||||||
|
super(YoutubeSpider, self).__init__(*args, **kwargs) |
||||||
|
self.start_urls = [url] |
||||||
|
|
||||||
|
def parse(self, response): |
||||||
|
prefix = "https://www.youtube.com/feeds/videos.xml?channel_id=" |
||||||
|
xpath_query = "//meta[@itemprop='channelId']/@content" |
||||||
|
print(prefix + response.xpath(xpath_query).get()) |
@ -0,0 +1,11 @@ |
|||||||
|
# Automatically created by: scrapy startproject |
||||||
|
# |
||||||
|
# For more information about the [deploy] section see: |
||||||
|
# https://scrapyd.readthedocs.io/en/latest/deploy.html |
||||||
|
|
||||||
|
[settings] |
||||||
|
default = rsslinks.settings |
||||||
|
|
||||||
|
[deploy] |
||||||
|
#url = http://localhost:6800/ |
||||||
|
project = rsslinks |
Loading…
Reference in new issue