70 lines
2.6 KiB
Python
70 lines
2.6 KiB
Python
import sys
|
|
from w3lib.url import is_url
|
|
|
|
from scrapy.commands import ScrapyCommand
|
|
from scrapy.http import Request
|
|
from scrapy.exceptions import UsageError
|
|
from scrapy.utils.datatypes import SequenceExclude
|
|
from scrapy.utils.spider import spidercls_for_request, DefaultSpider
|
|
|
|
|
|
class Command(ScrapyCommand):
|
|
|
|
requires_project = False
|
|
|
|
def syntax(self):
|
|
return "[options] <url>"
|
|
|
|
def short_desc(self):
|
|
return "Fetch a URL using the Scrapy downloader"
|
|
|
|
def long_desc(self):
|
|
return (
|
|
"Fetch a URL using the Scrapy downloader and print its content"
|
|
" to stdout. You may want to use --nolog to disable logging"
|
|
)
|
|
|
|
def add_options(self, parser):
|
|
ScrapyCommand.add_options(self, parser)
|
|
parser.add_option("--spider", dest="spider", help="use this spider")
|
|
parser.add_option("--headers", dest="headers", action="store_true",
|
|
help="print response HTTP headers instead of body")
|
|
parser.add_option("--no-redirect", dest="no_redirect", action="store_true", default=False,
|
|
help="do not handle HTTP 3xx status codes and print response as-is")
|
|
|
|
def _print_headers(self, headers, prefix):
|
|
for key, values in headers.items():
|
|
for value in values:
|
|
self._print_bytes(prefix + b' ' + key + b': ' + value)
|
|
|
|
def _print_response(self, response, opts):
|
|
if opts.headers:
|
|
self._print_headers(response.request.headers, b'>')
|
|
print('>')
|
|
self._print_headers(response.headers, b'<')
|
|
else:
|
|
self._print_bytes(response.body)
|
|
|
|
def _print_bytes(self, bytes_):
|
|
sys.stdout.buffer.write(bytes_ + b'\n')
|
|
|
|
def run(self, args, opts):
|
|
if len(args) != 1 or not is_url(args[0]):
|
|
raise UsageError()
|
|
request = Request(args[0], callback=self._print_response,
|
|
cb_kwargs={"opts": opts}, dont_filter=True)
|
|
# by default, let the framework handle redirects,
|
|
# i.e. command handles all codes expect 3xx
|
|
if not opts.no_redirect:
|
|
request.meta['handle_httpstatus_list'] = SequenceExclude(range(300, 400))
|
|
else:
|
|
request.meta['handle_httpstatus_all'] = True
|
|
|
|
spidercls = DefaultSpider
|
|
spider_loader = self.crawler_process.spider_loader
|
|
if opts.spider:
|
|
spidercls = spider_loader.load(opts.spider)
|
|
else:
|
|
spidercls = spidercls_for_request(spider_loader, request, spidercls)
|
|
self.crawler_process.crawl(spidercls, start_requests=lambda: [request])
|
|
self.crawler_process.start()
|