Ausgabe der neuen DB Einträge
This commit is contained in:
parent
bad48e1627
commit
cfbbb9ee3d
2399 changed files with 843193 additions and 43 deletions
|
|
@ -0,0 +1,196 @@
|
|||
"""
|
||||
This module implements the Response class which is used to represent HTTP
|
||||
responses in Scrapy.
|
||||
|
||||
See documentation in docs/topics/request-response.rst
|
||||
"""
|
||||
from typing import Generator
|
||||
from urllib.parse import urljoin
|
||||
|
||||
from scrapy.exceptions import NotSupported
|
||||
from scrapy.http.common import obsolete_setter
|
||||
from scrapy.http.headers import Headers
|
||||
from scrapy.http.request import Request
|
||||
from scrapy.link import Link
|
||||
from scrapy.utils.trackref import object_ref
|
||||
|
||||
|
||||
class Response(object_ref):
|
||||
|
||||
def __init__(self, url, status=200, headers=None, body=b'', flags=None,
|
||||
request=None, certificate=None, ip_address=None):
|
||||
self.headers = Headers(headers or {})
|
||||
self.status = int(status)
|
||||
self._set_body(body)
|
||||
self._set_url(url)
|
||||
self.request = request
|
||||
self.flags = [] if flags is None else list(flags)
|
||||
self.certificate = certificate
|
||||
self.ip_address = ip_address
|
||||
|
||||
@property
|
||||
def cb_kwargs(self):
|
||||
try:
|
||||
return self.request.cb_kwargs
|
||||
except AttributeError:
|
||||
raise AttributeError(
|
||||
"Response.cb_kwargs not available, this response "
|
||||
"is not tied to any request"
|
||||
)
|
||||
|
||||
@property
|
||||
def meta(self):
|
||||
try:
|
||||
return self.request.meta
|
||||
except AttributeError:
|
||||
raise AttributeError(
|
||||
"Response.meta not available, this response "
|
||||
"is not tied to any request"
|
||||
)
|
||||
|
||||
def _get_url(self):
|
||||
return self._url
|
||||
|
||||
def _set_url(self, url):
|
||||
if isinstance(url, str):
|
||||
self._url = url
|
||||
else:
|
||||
raise TypeError(f'{type(self).__name__} url must be str, '
|
||||
f'got {type(url).__name__}')
|
||||
|
||||
url = property(_get_url, obsolete_setter(_set_url, 'url'))
|
||||
|
||||
def _get_body(self):
|
||||
return self._body
|
||||
|
||||
def _set_body(self, body):
|
||||
if body is None:
|
||||
self._body = b''
|
||||
elif not isinstance(body, bytes):
|
||||
raise TypeError(
|
||||
"Response body must be bytes. "
|
||||
"If you want to pass unicode body use TextResponse "
|
||||
"or HtmlResponse.")
|
||||
else:
|
||||
self._body = body
|
||||
|
||||
body = property(_get_body, obsolete_setter(_set_body, 'body'))
|
||||
|
||||
def __str__(self):
|
||||
return f"<{self.status} {self.url}>"
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
def copy(self):
|
||||
"""Return a copy of this Response"""
|
||||
return self.replace()
|
||||
|
||||
def replace(self, *args, **kwargs):
|
||||
"""Create a new Response with the same attributes except for those
|
||||
given new values.
|
||||
"""
|
||||
for x in ['url', 'status', 'headers', 'body',
|
||||
'request', 'flags', 'certificate', 'ip_address']:
|
||||
kwargs.setdefault(x, getattr(self, x))
|
||||
cls = kwargs.pop('cls', self.__class__)
|
||||
return cls(*args, **kwargs)
|
||||
|
||||
def urljoin(self, url):
|
||||
"""Join this Response's url with a possible relative url to form an
|
||||
absolute interpretation of the latter."""
|
||||
return urljoin(self.url, url)
|
||||
|
||||
@property
|
||||
def text(self):
|
||||
"""For subclasses of TextResponse, this will return the body
|
||||
as str
|
||||
"""
|
||||
raise AttributeError("Response content isn't text")
|
||||
|
||||
def css(self, *a, **kw):
|
||||
"""Shortcut method implemented only by responses whose content
|
||||
is text (subclasses of TextResponse).
|
||||
"""
|
||||
raise NotSupported("Response content isn't text")
|
||||
|
||||
def xpath(self, *a, **kw):
|
||||
"""Shortcut method implemented only by responses whose content
|
||||
is text (subclasses of TextResponse).
|
||||
"""
|
||||
raise NotSupported("Response content isn't text")
|
||||
|
||||
def follow(self, url, callback=None, method='GET', headers=None, body=None,
|
||||
cookies=None, meta=None, encoding='utf-8', priority=0,
|
||||
dont_filter=False, errback=None, cb_kwargs=None, flags=None):
|
||||
# type: (...) -> Request
|
||||
"""
|
||||
Return a :class:`~.Request` instance to follow a link ``url``.
|
||||
It accepts the same arguments as ``Request.__init__`` method,
|
||||
but ``url`` can be a relative URL or a ``scrapy.link.Link`` object,
|
||||
not only an absolute URL.
|
||||
|
||||
:class:`~.TextResponse` provides a :meth:`~.TextResponse.follow`
|
||||
method which supports selectors in addition to absolute/relative URLs
|
||||
and Link objects.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
The *flags* parameter.
|
||||
"""
|
||||
if isinstance(url, Link):
|
||||
url = url.url
|
||||
elif url is None:
|
||||
raise ValueError("url can't be None")
|
||||
url = self.urljoin(url)
|
||||
|
||||
return Request(
|
||||
url=url,
|
||||
callback=callback,
|
||||
method=method,
|
||||
headers=headers,
|
||||
body=body,
|
||||
cookies=cookies,
|
||||
meta=meta,
|
||||
encoding=encoding,
|
||||
priority=priority,
|
||||
dont_filter=dont_filter,
|
||||
errback=errback,
|
||||
cb_kwargs=cb_kwargs,
|
||||
flags=flags,
|
||||
)
|
||||
|
||||
def follow_all(self, urls, callback=None, method='GET', headers=None, body=None,
|
||||
cookies=None, meta=None, encoding='utf-8', priority=0,
|
||||
dont_filter=False, errback=None, cb_kwargs=None, flags=None):
|
||||
# type: (...) -> Generator[Request, None, None]
|
||||
"""
|
||||
.. versionadded:: 2.0
|
||||
|
||||
Return an iterable of :class:`~.Request` instances to follow all links
|
||||
in ``urls``. It accepts the same arguments as ``Request.__init__`` method,
|
||||
but elements of ``urls`` can be relative URLs or :class:`~scrapy.link.Link` objects,
|
||||
not only absolute URLs.
|
||||
|
||||
:class:`~.TextResponse` provides a :meth:`~.TextResponse.follow_all`
|
||||
method which supports selectors in addition to absolute/relative URLs
|
||||
and Link objects.
|
||||
"""
|
||||
if not hasattr(urls, '__iter__'):
|
||||
raise TypeError("'urls' argument must be an iterable")
|
||||
return (
|
||||
self.follow(
|
||||
url=url,
|
||||
callback=callback,
|
||||
method=method,
|
||||
headers=headers,
|
||||
body=body,
|
||||
cookies=cookies,
|
||||
meta=meta,
|
||||
encoding=encoding,
|
||||
priority=priority,
|
||||
dont_filter=dont_filter,
|
||||
errback=errback,
|
||||
cb_kwargs=cb_kwargs,
|
||||
flags=flags,
|
||||
)
|
||||
for url in urls
|
||||
)
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
"""
|
||||
This module implements the HtmlResponse class which adds encoding
|
||||
discovering through HTML encoding declarations to the TextResponse class.
|
||||
|
||||
See documentation in docs/topics/request-response.rst
|
||||
"""
|
||||
|
||||
from scrapy.http.response.text import TextResponse
|
||||
|
||||
|
||||
class HtmlResponse(TextResponse):
|
||||
pass
|
||||
265
venv/lib/python3.9/site-packages/scrapy/http/response/text.py
Normal file
265
venv/lib/python3.9/site-packages/scrapy/http/response/text.py
Normal file
|
|
@ -0,0 +1,265 @@
|
|||
"""
|
||||
This module implements the TextResponse class which adds encoding handling and
|
||||
discovering (through HTTP headers) to base Response class.
|
||||
|
||||
See documentation in docs/topics/request-response.rst
|
||||
"""
|
||||
|
||||
import json
|
||||
import warnings
|
||||
from contextlib import suppress
|
||||
from typing import Generator
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import parsel
|
||||
from w3lib.encoding import (html_body_declared_encoding, html_to_unicode,
|
||||
http_content_type_encoding, resolve_encoding)
|
||||
from w3lib.html import strip_html5_whitespace
|
||||
|
||||
from scrapy.exceptions import ScrapyDeprecationWarning
|
||||
from scrapy.http import Request
|
||||
from scrapy.http.response import Response
|
||||
from scrapy.utils.python import memoizemethod_noargs, to_unicode
|
||||
from scrapy.utils.response import get_base_url
|
||||
|
||||
_NONE = object()
|
||||
|
||||
|
||||
class TextResponse(Response):
|
||||
|
||||
_DEFAULT_ENCODING = 'ascii'
|
||||
_cached_decoded_json = _NONE
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._encoding = kwargs.pop('encoding', None)
|
||||
self._cached_benc = None
|
||||
self._cached_ubody = None
|
||||
self._cached_selector = None
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def _set_url(self, url):
|
||||
if isinstance(url, str):
|
||||
self._url = to_unicode(url, self.encoding)
|
||||
else:
|
||||
super()._set_url(url)
|
||||
|
||||
def _set_body(self, body):
|
||||
self._body = b'' # used by encoding detection
|
||||
if isinstance(body, str):
|
||||
if self._encoding is None:
|
||||
raise TypeError('Cannot convert unicode body - '
|
||||
f'{type(self).__name__} has no encoding')
|
||||
self._body = body.encode(self._encoding)
|
||||
else:
|
||||
super()._set_body(body)
|
||||
|
||||
def replace(self, *args, **kwargs):
|
||||
kwargs.setdefault('encoding', self.encoding)
|
||||
return Response.replace(self, *args, **kwargs)
|
||||
|
||||
@property
|
||||
def encoding(self):
|
||||
return self._declared_encoding() or self._body_inferred_encoding()
|
||||
|
||||
def _declared_encoding(self):
|
||||
return (
|
||||
self._encoding
|
||||
or self._headers_encoding()
|
||||
or self._body_declared_encoding()
|
||||
)
|
||||
|
||||
def body_as_unicode(self):
|
||||
"""Return body as unicode"""
|
||||
warnings.warn('Response.body_as_unicode() is deprecated, '
|
||||
'please use Response.text instead.',
|
||||
ScrapyDeprecationWarning, stacklevel=2)
|
||||
return self.text
|
||||
|
||||
def json(self):
|
||||
"""
|
||||
.. versionadded:: 2.2
|
||||
|
||||
Deserialize a JSON document to a Python object.
|
||||
"""
|
||||
if self._cached_decoded_json is _NONE:
|
||||
self._cached_decoded_json = json.loads(self.text)
|
||||
return self._cached_decoded_json
|
||||
|
||||
@property
|
||||
def text(self):
|
||||
""" Body as unicode """
|
||||
# access self.encoding before _cached_ubody to make sure
|
||||
# _body_inferred_encoding is called
|
||||
benc = self.encoding
|
||||
if self._cached_ubody is None:
|
||||
charset = f'charset={benc}'
|
||||
self._cached_ubody = html_to_unicode(charset, self.body)[1]
|
||||
return self._cached_ubody
|
||||
|
||||
def urljoin(self, url):
|
||||
"""Join this Response's url with a possible relative url to form an
|
||||
absolute interpretation of the latter."""
|
||||
return urljoin(get_base_url(self), url)
|
||||
|
||||
@memoizemethod_noargs
|
||||
def _headers_encoding(self):
|
||||
content_type = self.headers.get(b'Content-Type', b'')
|
||||
return http_content_type_encoding(to_unicode(content_type))
|
||||
|
||||
def _body_inferred_encoding(self):
|
||||
if self._cached_benc is None:
|
||||
content_type = to_unicode(self.headers.get(b'Content-Type', b''))
|
||||
benc, ubody = html_to_unicode(content_type, self.body,
|
||||
auto_detect_fun=self._auto_detect_fun,
|
||||
default_encoding=self._DEFAULT_ENCODING)
|
||||
self._cached_benc = benc
|
||||
self._cached_ubody = ubody
|
||||
return self._cached_benc
|
||||
|
||||
def _auto_detect_fun(self, text):
|
||||
for enc in (self._DEFAULT_ENCODING, 'utf-8', 'cp1252'):
|
||||
try:
|
||||
text.decode(enc)
|
||||
except UnicodeError:
|
||||
continue
|
||||
return resolve_encoding(enc)
|
||||
|
||||
@memoizemethod_noargs
|
||||
def _body_declared_encoding(self):
|
||||
return html_body_declared_encoding(self.body)
|
||||
|
||||
@property
|
||||
def selector(self):
|
||||
from scrapy.selector import Selector
|
||||
if self._cached_selector is None:
|
||||
self._cached_selector = Selector(self)
|
||||
return self._cached_selector
|
||||
|
||||
def xpath(self, query, **kwargs):
|
||||
return self.selector.xpath(query, **kwargs)
|
||||
|
||||
def css(self, query):
|
||||
return self.selector.css(query)
|
||||
|
||||
def follow(self, url, callback=None, method='GET', headers=None, body=None,
|
||||
cookies=None, meta=None, encoding=None, priority=0,
|
||||
dont_filter=False, errback=None, cb_kwargs=None, flags=None):
|
||||
# type: (...) -> Request
|
||||
"""
|
||||
Return a :class:`~.Request` instance to follow a link ``url``.
|
||||
It accepts the same arguments as ``Request.__init__`` method,
|
||||
but ``url`` can be not only an absolute URL, but also
|
||||
|
||||
* a relative URL
|
||||
* a :class:`~scrapy.link.Link` object, e.g. the result of
|
||||
:ref:`topics-link-extractors`
|
||||
* a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.
|
||||
``response.css('a.my_link')[0]``
|
||||
* an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.
|
||||
``response.css('a::attr(href)')[0]`` or
|
||||
``response.xpath('//img/@src')[0]``
|
||||
|
||||
See :ref:`response-follow-example` for usage examples.
|
||||
"""
|
||||
if isinstance(url, parsel.Selector):
|
||||
url = _url_from_selector(url)
|
||||
elif isinstance(url, parsel.SelectorList):
|
||||
raise ValueError("SelectorList is not supported")
|
||||
encoding = self.encoding if encoding is None else encoding
|
||||
return super().follow(
|
||||
url=url,
|
||||
callback=callback,
|
||||
method=method,
|
||||
headers=headers,
|
||||
body=body,
|
||||
cookies=cookies,
|
||||
meta=meta,
|
||||
encoding=encoding,
|
||||
priority=priority,
|
||||
dont_filter=dont_filter,
|
||||
errback=errback,
|
||||
cb_kwargs=cb_kwargs,
|
||||
flags=flags,
|
||||
)
|
||||
|
||||
def follow_all(self, urls=None, callback=None, method='GET', headers=None, body=None,
|
||||
cookies=None, meta=None, encoding=None, priority=0,
|
||||
dont_filter=False, errback=None, cb_kwargs=None, flags=None,
|
||||
css=None, xpath=None):
|
||||
# type: (...) -> Generator[Request, None, None]
|
||||
"""
|
||||
A generator that produces :class:`~.Request` instances to follow all
|
||||
links in ``urls``. It accepts the same arguments as the :class:`~.Request`'s
|
||||
``__init__`` method, except that each ``urls`` element does not need to be
|
||||
an absolute URL, it can be any of the following:
|
||||
|
||||
* a relative URL
|
||||
* a :class:`~scrapy.link.Link` object, e.g. the result of
|
||||
:ref:`topics-link-extractors`
|
||||
* a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.
|
||||
``response.css('a.my_link')[0]``
|
||||
* an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.
|
||||
``response.css('a::attr(href)')[0]`` or
|
||||
``response.xpath('//img/@src')[0]``
|
||||
|
||||
In addition, ``css`` and ``xpath`` arguments are accepted to perform the link extraction
|
||||
within the ``follow_all`` method (only one of ``urls``, ``css`` and ``xpath`` is accepted).
|
||||
|
||||
Note that when passing a ``SelectorList`` as argument for the ``urls`` parameter or
|
||||
using the ``css`` or ``xpath`` parameters, this method will not produce requests for
|
||||
selectors from which links cannot be obtained (for instance, anchor tags without an
|
||||
``href`` attribute)
|
||||
"""
|
||||
arguments = [x for x in (urls, css, xpath) if x is not None]
|
||||
if len(arguments) != 1:
|
||||
raise ValueError(
|
||||
"Please supply exactly one of the following arguments: urls, css, xpath"
|
||||
)
|
||||
if not urls:
|
||||
if css:
|
||||
urls = self.css(css)
|
||||
if xpath:
|
||||
urls = self.xpath(xpath)
|
||||
if isinstance(urls, parsel.SelectorList):
|
||||
selectors = urls
|
||||
urls = []
|
||||
for sel in selectors:
|
||||
with suppress(_InvalidSelector):
|
||||
urls.append(_url_from_selector(sel))
|
||||
return super().follow_all(
|
||||
urls=urls,
|
||||
callback=callback,
|
||||
method=method,
|
||||
headers=headers,
|
||||
body=body,
|
||||
cookies=cookies,
|
||||
meta=meta,
|
||||
encoding=encoding,
|
||||
priority=priority,
|
||||
dont_filter=dont_filter,
|
||||
errback=errback,
|
||||
cb_kwargs=cb_kwargs,
|
||||
flags=flags,
|
||||
)
|
||||
|
||||
|
||||
class _InvalidSelector(ValueError):
|
||||
"""
|
||||
Raised when a URL cannot be obtained from a Selector
|
||||
"""
|
||||
|
||||
|
||||
def _url_from_selector(sel):
|
||||
# type: (parsel.Selector) -> str
|
||||
if isinstance(sel.root, str):
|
||||
# e.g. ::attr(href) result
|
||||
return strip_html5_whitespace(sel.root)
|
||||
if not hasattr(sel.root, 'tag'):
|
||||
raise _InvalidSelector(f"Unsupported selector: {sel}")
|
||||
if sel.root.tag not in ('a', 'link'):
|
||||
raise _InvalidSelector("Only <a> and <link> elements are supported; "
|
||||
f"got <{sel.root.tag}>")
|
||||
href = sel.root.get('href')
|
||||
if href is None:
|
||||
raise _InvalidSelector(f"<{sel.root.tag}> element has no href attribute: {sel}")
|
||||
return strip_html5_whitespace(href)
|
||||
12
venv/lib/python3.9/site-packages/scrapy/http/response/xml.py
Normal file
12
venv/lib/python3.9/site-packages/scrapy/http/response/xml.py
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
"""
|
||||
This module implements the XmlResponse class which adds encoding
|
||||
discovering through XML encoding declarations to the TextResponse class.
|
||||
|
||||
See documentation in docs/topics/request-response.rst
|
||||
"""
|
||||
|
||||
from scrapy.http.response.text import TextResponse
|
||||
|
||||
|
||||
class XmlResponse(TextResponse):
|
||||
pass
|
||||
Loading…
Add table
Add a link
Reference in a new issue