settings.py 3.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. # -*- coding: utf-8 -*-
  2. # Scrapy settings for xueqiu project
  3. #
  4. # For simplicity, this file contains only settings considered important or
  5. # commonly used. You can find more settings consulting the documentation:
  6. #
  7. # https://doc.scrapy.org/en/latest/topics/settings.html
  8. # https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
  9. # https://doc.scrapy.org/en/latest/topics/spider-middleware.html
  10. BOT_NAME = 'xueqiu'
  11. SPIDER_MODULES = ['xueqiu.spiders']
  12. NEWSPIDER_MODULE = 'xueqiu.spiders'
  13. # Crawl responsibly by identifying yourself (and your website) on the user-agent
  14. USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'
  15. # Obey robots.txt rules
  16. ROBOTSTXT_OBEY = True
  17. # Configure maximum concurrent requests performed by Scrapy (default: 16)
  18. # CONCURRENT_REQUESTS = 32
  19. # Configure a delay for requests for the same website (default: 0)
  20. # See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
  21. # See also autothrottle settings and docs
  22. # DOWNLOAD_DELAY = 3
  23. # The download delay setting will honor only one of:
  24. # CONCURRENT_REQUESTS_PER_DOMAIN = 16
  25. # CONCURRENT_REQUESTS_PER_IP = 16
  26. # Disable cookies (enabled by default)
  27. # COOKIES_ENABLED = False
  28. # Disable Telnet Console (enabled by default)
  29. # TELNETCONSOLE_ENABLED = False
  30. # Override the default request headers:
  31. # DEFAULT_REQUEST_HEADERS = {
  32. # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
  33. # 'Accept-Language': 'en',
  34. # }
  35. # Enable or disable spider middlewares
  36. # See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
  37. # SPIDER_MIDDLEWARES = {
  38. # 'xueqiu.middlewares.XueqiuSpiderMiddleware': 543,
  39. # }
  40. # Enable or disable downloader middlewares
  41. # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
  42. # DOWNLOADER_MIDDLEWARES = {
  43. # 'xueqiu.middlewares.XueqiuDownloaderMiddleware': 543,
  44. # }
  45. # Enable or disable extensions
  46. # See https://doc.scrapy.org/en/latest/topics/extensions.html
  47. # EXTENSIONS = {
  48. # 'scrapy.extensions.telnet.TelnetConsole': None,
  49. # }
  50. # Configure item pipelines
  51. # See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
  52. ITEM_PIPELINES = {
  53. 'xueqiu.pipelines.XueqiuPipeline': 300,
  54. }
  55. # Enable and configure the AutoThrottle extension (disabled by default)
  56. # See https://doc.scrapy.org/en/latest/topics/autothrottle.html
  57. # AUTOTHROTTLE_ENABLED = True
  58. # The initial download delay
  59. # AUTOTHROTTLE_START_DELAY = 5
  60. # The maximum download delay to be set in case of high latencies
  61. # AUTOTHROTTLE_MAX_DELAY = 60
  62. # The average number of requests Scrapy should be sending in parallel to
  63. # each remote server
  64. # AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
  65. # Enable showing throttling stats for every response received:
  66. # AUTOTHROTTLE_DEBUG = False
  67. # Enable and configure HTTP caching (disabled by default)
  68. # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
  69. # HTTPCACHE_ENABLED = True
  70. # HTTPCACHE_EXPIRATION_SECS = 0
  71. # HTTPCACHE_DIR = 'httpcache'
  72. # HTTPCACHE_IGNORE_HTTP_CODES = []
  73. # HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'