webforms_aggregator.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768
  1. #!/usr/bin/env python
  2. # Copyright (c) 2011 The Chromium Authors. All rights reserved.
  3. # Use of this source code is governed by a BSD-style license that can be
  4. # found in the LICENSE file.
  5. """Downloads web pages with fillable forms after parsing through a set of links.
  6. Used for collecting web pages with forms. Used as a standalone script.
  7. This script assumes that it's run from within the same directory in which it's
  8. checked into. If this script were to be run elsewhere then the path for
  9. REGISTER_PAGE_DIR needs to be changed.
  10. This script assumes that third party modules are installed:
  11. httplib2, lxml, pycurl.
  12. Usage: webforms_aggregator.py [options] [single url or file containing urls]
  13. Options:
  14. -l LOG_LEVEL, --log_level LOG_LEVEL
  15. LOG_LEVEL: debug, info, warning or error [default: error]
  16. -h, --help show this help message and exit
  17. """
  18. import datetime
  19. import errno
  20. import logging
  21. import optparse
  22. import os
  23. import re
  24. # Needed in Linux so that PyCurl does not throw a segmentation fault.
  25. import signal
  26. import sys
  27. import tempfile
  28. import threading
  29. import time
  30. import urlparse
  31. import httplib2
  32. from lxml import html, etree
  33. import pycurl
  34. REGISTER_PAGE_DIR = os.path.join(os.pardir, 'test', 'data', 'autofill',
  35. 'heuristics', 'input')
  36. NOT_FOUND_REG_PAGE_SITES_FILENAME = 'notFoundRegPageSites.txt'
  37. FORM_LOCATION_COMMENT = 'Form Location: %s'
  38. HTML_FILE_PREFIX = 'grabber-'
  39. MAX_REDIRECTIONS = 10
  40. # Strings in a webpage that are indicative of a registration link.
  41. LINK_CLUES = ['regist', 'user', 'sign', 'login', 'account']
  42. MAX_SAME_DOMAIN_URLS_NO = 30
  43. MAX_TOTAL_URLS_PER_DOMAIN = 300
  44. MAX_OPEN_FILES_NO = 500
  45. # URLs are selected for downloading with the following rules from the link
  46. # lists, giving more weight to the links that contain a link clue.
  47. CLUE_SECURE_LINKS_NO = MAX_SAME_DOMAIN_URLS_NO * 3/10
  48. CLUE_GENERAL_LINKS_NO = MAX_SAME_DOMAIN_URLS_NO * 3/10
  49. SECURE_LINKS_NO = MAX_SAME_DOMAIN_URLS_NO * 2/10
  50. GENERAL_LINKS_NO = MAX_SAME_DOMAIN_URLS_NO * 2/10
  51. MAX_ALLOWED_THREADS = MAX_OPEN_FILES_NO / MAX_SAME_DOMAIN_URLS_NO + 1
  52. class Retriever(object):
  53. """Download, parse, and check if the web page contains a registration form.
  54. The objects of this class has a one to one relation with the web pages. For
  55. each page that is downloaded and parsed an object of this class is created.
  56. Each Retriever object creates a curl object. This object is added to the curl
  57. multi object of the crawler object so that the corresponding pages gets
  58. downloaded.
  59. """
  60. logger = logging.getLogger(__name__)
  61. def __init__(self, url, domain, cookie_file):
  62. """Initializes a Retriever object.
  63. Args:
  64. url: url to download page from.
  65. domain: only links with this domain will be retrieved.
  66. cookie_file: the name of a cookie file, needed for pages that use session
  67. cookies to change their contents.
  68. """
  69. self._url = url
  70. self._domain = domain
  71. self._html_content = ''
  72. # Http links without clues from LINK_CLUES.
  73. self._general_links = []
  74. # Http links that contain a clue from LINK_CLUES.
  75. self._clues_general_links = []
  76. # Https links that do not contain any clues from LINK_CLUES.
  77. self._secure_links = []
  78. # Https links that contain a clue from LINK_CLUES.
  79. self._clues_secure_links = []
  80. self._cookie_file = cookie_file
  81. self._curl_object = None
  82. def __del__(self):
  83. """Cleans up before this object is destroyed.
  84. The function closes the corresponding curl object that does the downloading.
  85. """
  86. if self._curl_object:
  87. self._curl_object.close()
  88. def _AddLink(self, link):
  89. """Adds url |link|, if not already present, to the appropriate list.
  90. The link only gets added to the single list that is appopriate for it:
  91. _secure_links, _general_links, _clues_secure_links or _clues_general_links.
  92. Args:
  93. link: the url that is inserted to the appropriate links list.
  94. """
  95. # Handles sites with unicode URLs.
  96. if isinstance(link, unicode):
  97. # Encode in 'utf-8' to avoid the UnicodeEncodeError exception.
  98. link = httplib2.iri2uri(link).encode('utf-8')
  99. link_parsed = urlparse.urlparse(link)
  100. link_lists = [self._clues_secure_links, self._secure_links,
  101. self._clues_general_links, self._general_links]
  102. # Checks that the registration page is within the domain.
  103. if (self._domain in link_parsed[1] and
  104. all(link not in x for x in link_lists)):
  105. for clue in LINK_CLUES:
  106. if clue in link.lower():
  107. if link_parsed[0].startswith('https'):
  108. self._clues_secure_links.append(link)
  109. return
  110. else:
  111. self._clues_general_links.append(link)
  112. return
  113. if link_parsed[0].startswith('https'): # No clues found in the link.
  114. self._secure_links.append(link)
  115. else:
  116. self._general_links.append(link)
  117. def ParseAndGetLinks(self):
  118. """Parses downloaded page and gets url link for non registration page.
  119. Checks if current page contains a registration page and if not it gets
  120. the url links. If it is a registration page, it saves it in a file as
  121. 'grabber-' + domain + '.html' after it has added the FORM_LOCATION_COMMENT
  122. and it returns True. Otherwise it returns False.
  123. Returns:
  124. True if current page contains a registration form, and False otherwise.
  125. Raises:
  126. IOError: When can't write to the file.
  127. """
  128. if not self._domain:
  129. self.logger.error('Error: self._domain was not set')
  130. sys.exit(1)
  131. match_list = re.findall(r'(?P<quote>[\'\"])(?P<link>(?:https?:)?//.*?)\1',
  132. self._html_content)
  133. for group_list in match_list:
  134. link = group_list[1]
  135. if link.startswith('//'):
  136. link = urlparse.urljoin(self._url, link)
  137. self._AddLink(link)
  138. try:
  139. tree = html.fromstring(self._html_content, parser=html.HTMLParser())
  140. except etree.LxmlError:
  141. self.logger.info('\t\tSkipping: not valid HTML code in this page <<< %s',
  142. self._url)
  143. return False
  144. try:
  145. body = tree.iter('body').next()
  146. except StopIteration:
  147. self.logger.info('\t\tSkipping: no "BODY" tag in this page <<< %s',
  148. self._url)
  149. return False
  150. # Get a list of all input elements with attribute type='password'
  151. password_elements = list(body.iterfind('.//input[@type="password"]'))
  152. # Check for multiple password elements to distinguish between a login form
  153. # and a registration form (Password field and Confirm Password field).
  154. if password_elements and len(password_elements) >= 2:
  155. form_elements = []
  156. for password_elem in password_elements:
  157. form_elem = password_elem.xpath('ancestor::form[1]')
  158. if not form_elem:
  159. continue
  160. if not form_elem[0] in form_elements:
  161. form_elements.append(form_elem[0])
  162. else:
  163. # Confirms that the page contains a registration form if two passwords
  164. # are contained in the same form for form_elem[0].
  165. if not os.path.isdir(REGISTER_PAGE_DIR):
  166. os.makedirs(REGISTER_PAGE_DIR)
  167. # Locate the HTML tag and insert the form location comment after it.
  168. html_tag = tree.iter('html').next()
  169. comment = etree.Comment(FORM_LOCATION_COMMENT % self._url)
  170. html_tag.insert(0, comment)
  171. # Create a new file and save the HTML registration page code.
  172. f = open('%s/%s%s.html' % (REGISTER_PAGE_DIR, HTML_FILE_PREFIX,
  173. self._domain), 'w')
  174. try:
  175. f.write(html.tostring(tree, pretty_print=True))
  176. except IOError as e:
  177. self.logger.error('Error: %s', e)
  178. raise
  179. finally:
  180. f.close()
  181. return True # Registration page found.
  182. # Indicates page is not a registration page and links must be parsed.
  183. link_elements = list(body.iter('a'))
  184. for link_elem in link_elements:
  185. link = link_elem.get('href')
  186. if not link or '#' == link[0]:
  187. continue
  188. link = urlparse.urljoin(self._url, link)
  189. link_parsed = urlparse.urlparse(link)
  190. if not link_parsed[0].startswith('http'):
  191. continue
  192. self._AddLink(link)
  193. return False # Registration page not found.
  194. def InitRequestHead(self):
  195. """Initializes curl object for a HEAD request.
  196. A HEAD request is initiated so that we can check from the headers if this is
  197. a valid HTML file. If it is not a valid HTML file, then we do not initiate a
  198. GET request, saving any unnecessary downloadings.
  199. """
  200. self._curl_object = pycurl.Curl()
  201. self._curl_object.setopt(pycurl.URL, self._url)
  202. # The following line fixes the GnuTLS package error that pycurl depends
  203. # on for getting https pages.
  204. self._curl_object.setopt(pycurl.SSLVERSION, pycurl.SSLVERSION_SSLv3)
  205. self._curl_object.setopt(pycurl.FOLLOWLOCATION, True)
  206. self._curl_object.setopt(pycurl.NOBODY, True)
  207. self._curl_object.setopt(pycurl.SSL_VERIFYPEER, False);
  208. self._curl_object.setopt(pycurl.MAXREDIRS, MAX_REDIRECTIONS)
  209. self._curl_object.setopt(pycurl.FAILONERROR, False)
  210. self._curl_object.setopt(pycurl.COOKIEFILE, self._cookie_file)
  211. self._curl_object.setopt(pycurl.COOKIEJAR, self._cookie_file)
  212. self._curl_object.setopt(pycurl.CONNECTTIMEOUT, 30)
  213. self._curl_object.setopt(pycurl.TIMEOUT, 300)
  214. self._curl_object.setopt(pycurl.NOSIGNAL, 1)
  215. def InitRequestGet(self):
  216. """Initializes curl object for a GET request.
  217. This is called only for valid HTML files. The Pycurl makes a GET request.
  218. The page begins to download, but since not all the data of the pages comes
  219. at once. When some of the data on the page is downloaded Pycurl will put
  220. this data in the buffer. The data is appended to the end of the page until
  221. everything is downloaded.
  222. """
  223. self._curl_object.setopt(pycurl.NOBODY, False)
  224. self._curl_object.setopt(
  225. pycurl.WRITEFUNCTION, lambda buff: setattr(
  226. self, '_html_content', self._html_content + buff))
  227. def Download(self):
  228. """Downloads the self._url page.
  229. It first does a HEAD request and then it proceeds to a GET request.
  230. It uses a curl object for a single download. This function is called only
  231. once for the initial url of a site when we still don't have more urls from a
  232. domain.
  233. Returns:
  234. True, if the downloaded page is valid HTML code, or False otherwise.
  235. """
  236. self.InitRequestHead()
  237. try:
  238. self._curl_object.perform()
  239. except pycurl.error as e:
  240. self.logger.error('Error: %s, url: %s', e, self._url)
  241. return False
  242. self._url = urlparse.urljoin(
  243. self._url, self._curl_object.getinfo(pycurl.EFFECTIVE_URL))
  244. content_type = self._curl_object.getinfo(pycurl.CONTENT_TYPE)
  245. if content_type and ('text/html' in content_type.lower()):
  246. self.InitRequestGet()
  247. try:
  248. self._curl_object.perform()
  249. except pycurl.error as e:
  250. self.logger.error('Error: %s, url: %s', e, self._url)
  251. return False
  252. return True
  253. else:
  254. self.logger.info('\tSkipping: Not an HTML page <<< %s', self._url)
  255. return False
  256. def Run(self):
  257. """Called only once for the initial url when we do not have more urls.
  258. Downloads the originally-specified site url, parses it and gets the links.
  259. Returns:
  260. True, if a registration page is found, and False otherwise.
  261. """
  262. if self.Download():
  263. if not self._domain:
  264. url_parsed = urlparse.urlparse(self._url)
  265. self._domain = url_parsed[1]
  266. if self._domain.startswith('www'):
  267. self._domain = '.'.join(self._domain.split('.')[1:])
  268. if self.ParseAndGetLinks():
  269. return True
  270. return False
  271. class Crawler(object):
  272. """Crawls a site until a registration page is found or max level is reached.
  273. Creates, uses and destroys Retriever objects. Creates a cookie temp file
  274. needed for session cookies. It keeps track of 'visited links' and
  275. 'links to visit' of the site. To do this it uses the links discovered from
  276. each Retriever object. Use Run() to crawl the site.
  277. """
  278. try:
  279. signal.signal(signal.SIGPIPE, signal.SIG_IGN)
  280. except ImportError:
  281. pass
  282. logger = logging.getLogger(__name__)
  283. def __init__(self, url, logging_level=None):
  284. """Init crawler URL, links lists, logger, and creates a cookie temp file.
  285. The cookie temp file is needed for session cookies.
  286. Args:
  287. url: the initial "seed" url of the site.
  288. logging_level: the desired verbosity level, default is None.
  289. """
  290. if logging_level:
  291. self.logger.setLevel(logging_level)
  292. self.url_error = False
  293. url_parsed = urlparse.urlparse(url)
  294. if not url_parsed[0].startswith('http'):
  295. self.logger.error(
  296. 'Error: "%s" does not begin with http:// or https://', url)
  297. self.url_error = True
  298. return
  299. # Example: if url is 'http://www.example.com?name=john' then value [1] or
  300. # network location is 'www.example.com'.
  301. if not url_parsed[1]:
  302. self.logger.error('Error: "%s" is not a valid url', url)
  303. self.url_error = True
  304. return
  305. self._url = url
  306. self._domain = ''
  307. # Http links that contain a clue from LINK_CLUES.
  308. self._clues_general_links = []
  309. # Http links that do not contain any clue from LINK_CLUES.
  310. self._general_links = []
  311. # Https links that contain a clue from LINK_CLUES.
  312. self._clues_secure_links = []
  313. # Https links that do not contain any clue from LINK_CLUES.
  314. self._secure_links = []
  315. # All links downloaded and parsed so far.
  316. self._links_visited = []
  317. self._retrievers_list = []
  318. self._cookie_file = tempfile.NamedTemporaryFile(
  319. suffix='.cookie', delete=False)
  320. self._cookie_file.close()
  321. self._cookie_file = self._cookie_file.name # Keep only the filename.
  322. def __del__(self):
  323. """Deletes cookie file when Crawler instances are destroyed."""
  324. if hasattr(self, '_cookie_file'):
  325. self.logger.info('Deleting cookie file %s ...', self._cookie_file)
  326. os.unlink(self._cookie_file)
  327. def _MultiPerform(self, curl_multi_object):
  328. """Performs concurrent downloads using a CurlMulti object.
  329. Args:
  330. curl_multi_object: a curl object that downloads multiple pages
  331. concurrently. The class of this object is |pycurl.CurlMulti|.
  332. """
  333. # Following code uses the example from section for the CurlMulti object
  334. # at http://pycurl.sourceforge.net/doc/curlmultiobject.html.
  335. while True:
  336. ret, no_handles = curl_multi_object.perform()
  337. if ret != pycurl.E_CALL_MULTI_PERFORM:
  338. break
  339. while no_handles:
  340. curl_multi_object.select(1.0)
  341. while True:
  342. ret, no_handles = curl_multi_object.perform()
  343. if ret != pycurl.E_CALL_MULTI_PERFORM:
  344. break
  345. def _GetLinksPages(self, curl_multi_object):
  346. """Downloads many pages concurrently using a CurlMulti Object.
  347. Creates many Retriever objects and adds them to a list. The constant
  348. MAX_SAME_DOMAIN_URLS_NO defines the number of pages that can be downloaded
  349. concurrently from the same domain using the pycurl multi object. It's
  350. currently set to 30 URLs. These URLs are taken from the links lists, which
  351. are from csl, gcl, sl, and gl. The rules define how many URLs are taken from
  352. each list during each iteration.
  353. Example of the rules:
  354. 3/10 from csl results in 9 URLs
  355. 3/10 from cgl results in 9 URLs
  356. 2/10 from sl results in 6 URLs
  357. 2/10 from gl results in 6 URLs
  358. Adding up the above URLs gives 30 URLs that can be downloaded concurrently.
  359. If these lists have fewer items than the defined rules, such as if a site
  360. does not contain any secure links, then csl and sl lists will be of 0 length
  361. and only 15 pages would be downloaded concurrently from the same domain.
  362. Since 30 URLs can be handled concurrently, the number of links taken from
  363. other lists can be increased. This means that we can take 24 links from the
  364. cgl list so that 24 from gfl + 6 from gl = 30 URLs. If the cgl list has less
  365. than 24 links, e.g. there are only 21 links, then only 9 links may be taken
  366. from gl so ) + 21 + 0 + 9 = 30.
  367. Args:
  368. curl_multi_object: Each Retriever object has a curl object which is
  369. added to the CurlMulti Object.
  370. """
  371. self._retrievers_list = []
  372. csl_no = min(CLUE_SECURE_LINKS_NO, len(self._clues_secure_links))
  373. cgl_no = min(CLUE_GENERAL_LINKS_NO, len(self._clues_general_links))
  374. sl_no = min(SECURE_LINKS_NO, len(self._secure_links))
  375. gl_no = min(GENERAL_LINKS_NO, len(self._general_links))
  376. # If some links within the list have fewer items than needed, the missing
  377. # links will be taken by the following priority: csl, cgl, sl, gl.
  378. # c: clues, s: secure, g: general, l: list.
  379. spare_links = MAX_SAME_DOMAIN_URLS_NO - (csl_no + sl_no + cgl_no + gl_no)
  380. if spare_links > 0:
  381. csl_no = min(csl_no + spare_links, len(self._clues_secure_links))
  382. spare_links = MAX_SAME_DOMAIN_URLS_NO - (csl_no + sl_no + cgl_no + gl_no)
  383. if spare_links > 0:
  384. cgl_no = min(cgl_no + spare_links, len(self._clues_general_links))
  385. spare_links = MAX_SAME_DOMAIN_URLS_NO - (csl_no + sl_no + cgl_no + gl_no)
  386. if spare_links > 0:
  387. sl_no = min(sl_no + spare_links, len(self._secure_links))
  388. spare_links = MAX_SAME_DOMAIN_URLS_NO - (csl_no + sl_no + cgl_no + gl_no)
  389. if spare_links > 0:
  390. gl_no = min(gl_no + spare_links, len(self._general_links))
  391. for no_of_links, links in [
  392. (csl_no, self._clues_secure_links),
  393. (sl_no, self._secure_links),
  394. (cgl_no, self._clues_general_links),
  395. (gl_no, self._general_links)]:
  396. for i in xrange(no_of_links):
  397. if not links:
  398. break
  399. url = links.pop(0)
  400. self._links_visited.append(url)
  401. r = Retriever(url, self._domain, self._cookie_file)
  402. r.InitRequestHead()
  403. curl_multi_object.add_handle(r._curl_object)
  404. self._retrievers_list.append(r)
  405. if self._retrievers_list:
  406. try:
  407. self._MultiPerform(curl_multi_object)
  408. except pycurl.error as e:
  409. self.logger.error('Error: %s, url: %s', e, self._url)
  410. finally:
  411. for r in self._retrievers_list:
  412. curl_multi_object.remove_handle(r._curl_object)
  413. # |_retrievers_list[:]| is a copy of |_retrievers_list| to avoid removing
  414. # items from the iterated list.
  415. for r in self._retrievers_list[:]:
  416. r._url = urlparse.urljoin(r._url, r._curl_object.getinfo(
  417. pycurl.EFFECTIVE_URL))
  418. content_type = r._curl_object.getinfo(pycurl.CONTENT_TYPE)
  419. if content_type and ('text/html' in content_type.lower()):
  420. r.InitRequestGet()
  421. curl_multi_object.add_handle(r._curl_object)
  422. else:
  423. self._retrievers_list.remove(r)
  424. self.logger.info('\tSkipping: Not an HTML page <<< %s', r._url)
  425. if self._retrievers_list:
  426. try:
  427. self._MultiPerform(curl_multi_object)
  428. except pycurl.error as e:
  429. self.logger.error('Error: %s, url: %s', e, self._url)
  430. finally:
  431. for r in self._retrievers_list:
  432. curl_multi_object.remove_handle(r._curl_object)
  433. self.logger.info('Downloaded: %s', r._url)
  434. def _LogRegPageFound(self, retriever):
  435. """Display logging for registration page found.
  436. Args:
  437. retriever: The object that has retrieved the page.
  438. """
  439. self.logger.info('\t##############################################')
  440. self.logger.info('\t### %s ###', retriever._domain)
  441. self.logger.info('\t##############################################')
  442. self.logger.info('\t!!!!!!!!! registration page FOUND !!!!!!!!!!!')
  443. self.logger.info('\t%s', retriever._url)
  444. self.logger.info('\t##############################################')
  445. def _GetNewLinks(self, retriever):
  446. """Appends new links discovered by each retriever to the appropriate lists.
  447. Links are copied to the links list of the crawler object, which holds all
  448. the links found from all retrievers that the crawler object created. The
  449. Crawler object exists as far as a specific site is examined and the
  450. Retriever object exists as far as a page of this site is examined.
  451. Args:
  452. retriever: a temporary object that downloads a specific page, parses the
  453. content and gets the page's href link.
  454. """
  455. for link in retriever._clues_secure_links:
  456. if (not link in self._clues_secure_links and
  457. not link in self._links_visited):
  458. self._clues_secure_links.append(link)
  459. for link in retriever._secure_links:
  460. if (not link in self._secure_links and
  461. not link in self._links_visited):
  462. self._secure_links.append(link)
  463. for link in retriever._clues_general_links:
  464. if (not link in self._clues_general_links and
  465. not link in self._links_visited):
  466. self._clues_general_links.append(link)
  467. for link in retriever._general_links:
  468. if (not link in self._general_links and
  469. not link in self._links_visited):
  470. self._general_links.append(link)
  471. def Run(self):
  472. """Runs the Crawler.
  473. Creates a Retriever object and calls its run method to get the first links,
  474. and then uses CurlMulti object and creates many Retriever objects to get
  475. the subsequent pages.
  476. The number of pages (=Retriever objs) created each time is restricted by
  477. MAX_SAME_DOMAIN_URLS_NO. After this number of Retriever objects download
  478. and parse their pages, we do the same again. The number of total pages
  479. visited is kept in urls_visited.
  480. If no registration page is found, the Crawler object will give up its try
  481. after MAX_TOTAL_URLS_PER_DOMAIN is reached.
  482. Returns:
  483. True is returned if registration page is found, or False otherwise.
  484. """
  485. reg_page_found = False
  486. if self.url_error:
  487. return False
  488. r = Retriever(self._url, self._domain, self._cookie_file)
  489. if r.Run():
  490. self._LogRegPageFound(r)
  491. reg_page_found = True
  492. else:
  493. self._url = r._url
  494. self._domain = r._domain
  495. self.logger.info('url to crawl: %s', self._url)
  496. self.logger.info('domain: %s', self._domain)
  497. self._links_visited.append(r._url)
  498. self._GetNewLinks(r)
  499. urls_visited = 1
  500. while True:
  501. if (not (self._clues_secure_links or self._secure_links or
  502. self._clues_general_links or self._general_links) or
  503. urls_visited >= MAX_TOTAL_URLS_PER_DOMAIN):
  504. break # Registration page not found.
  505. m = pycurl.CurlMulti()
  506. self._GetLinksPages(m)
  507. urls_visited += len(self._retrievers_list)
  508. self.logger.info('\t<----- URLs visited for domain "%s": %d ----->',
  509. self._domain, urls_visited)
  510. for r in self._retrievers_list:
  511. if r.ParseAndGetLinks():
  512. self._LogRegPageFound(r)
  513. reg_page_found = True
  514. break
  515. else:
  516. self.logger.info('parsed: %s', r._url)
  517. self._GetNewLinks(r)
  518. m.close()
  519. if reg_page_found:
  520. break
  521. while self._retrievers_list:
  522. r = self._retrievers_list.pop()
  523. return reg_page_found
  524. class WorkerThread(threading.Thread):
  525. """Creates a new thread of execution."""
  526. def __init__(self, url):
  527. """Creates _url and page_found attri to populate urls_with_no_reg_page file.
  528. Used after thread's termination for the creation of a file with a list of
  529. the urls for which a registration page wasn't found.
  530. Args:
  531. url: will be used as an argument to create a Crawler object later.
  532. """
  533. threading.Thread.__init__(self)
  534. self._url = url
  535. self.page_found = False
  536. def run(self):
  537. """Execution of thread creates a Crawler object and runs it.
  538. Caution: this function name should not be changed to 'Run' or any other
  539. names because it is overriding the 'run' method of the 'threading.Thread'
  540. class. Otherwise it will never be called.
  541. """
  542. self.page_found = Crawler(self._url).Run()
  543. class ThreadedCrawler(object):
  544. """Calls the Run function of WorkerThread which creates & runs a Crawler obj.
  545. The crawler object runs concurrently, examining one site each.
  546. """
  547. logger = logging.getLogger(__name__)
  548. def __init__(self, urls_file, logging_level=None):
  549. """Creates threaded Crawler objects.
  550. Args:
  551. urls_file: a text file containing a URL in each line.
  552. logging_level: verbosity level, default is None.
  553. Raises:
  554. IOError: If cannot find URLs from the list.
  555. """
  556. if logging_level:
  557. self.logger.setLevel(logging_level)
  558. self._urls_list = []
  559. f = open(urls_file)
  560. try:
  561. for url in f.readlines():
  562. url = url.strip()
  563. if not urlparse.urlparse(url)[0].startswith('http'):
  564. self.logger.info(
  565. '%s: skipping this (does not begin with "http://")', url)
  566. continue
  567. self._urls_list.append(url)
  568. except IOError as e:
  569. self.logger.error('Error: %s', e)
  570. raise
  571. finally:
  572. f.close()
  573. if not self._urls_list:
  574. error_msg = 'No URLs were found.'
  575. self.logger.error('ERROR: %s', error_msg)
  576. raise IOError(error_msg)
  577. def Run(self):
  578. """Runs Crawler objects using python threads.
  579. Number of concurrent threads is restricted to MAX_ALLOWED_THREADS.
  580. Returns:
  581. The number of registration pages found. -1 if no URLs are given.
  582. Raises:
  583. OSError: When creating the same directory that already exists.
  584. """
  585. if self._urls_list:
  586. allThreads = []
  587. # originalNumThreads is the number of threads just before the
  588. # ThreadedCrawler starts creating new threads. As a standalone script it
  589. # will be 1.
  590. originalNumThreads = threading.active_count()
  591. for url in self._urls_list:
  592. self.logger.info('URL fed to a crawler thread: %s', url)
  593. t = WorkerThread(url)
  594. t.start()
  595. allThreads.append(t)
  596. while threading.active_count() >= (
  597. MAX_ALLOWED_THREADS + originalNumThreads):
  598. time.sleep(.4)
  599. while threading.active_count() > originalNumThreads:
  600. time.sleep(.4)
  601. self.logger.info('----------------')
  602. self.logger.info('--- FINISHED ---')
  603. self.logger.info('----------------')
  604. urls_no = 0
  605. urls_not_found_no = 0
  606. not_file_name = os.path.join(
  607. REGISTER_PAGE_DIR, NOT_FOUND_REG_PAGE_SITES_FILENAME)
  608. not_file_dir = os.path.dirname(not_file_name)
  609. try:
  610. os.makedirs(not_file_dir)
  611. except OSError as e:
  612. if e.errno != errno.EEXIST:
  613. raise
  614. fnot = open(not_file_name, 'wb')
  615. try:
  616. for t in sorted(allThreads, key=lambda t: t._url):
  617. urls_no += 1
  618. if not t.page_found:
  619. urls_not_found_no += 1
  620. fnot.write('%s' % t._url)
  621. fnot.write(os.linesep)
  622. except IOError as e:
  623. self.logger.error('Error: %s', e)
  624. finally:
  625. fnot.close()
  626. self.logger.info('Total number of URLs given: %d\n', urls_no)
  627. self.logger.info(
  628. 'Registration pages found: %d\n', (urls_no - urls_not_found_no))
  629. self.logger.info(
  630. 'URLs that did not return a registration page: %d\n',
  631. urls_not_found_no)
  632. return urls_no - urls_not_found_no
  633. else:
  634. self.logger.error('Error: no URLs were found.')
  635. return -1
  636. def main():
  637. usage = 'usage: %prog [options] single_url_or_urls_filename'
  638. parser = optparse.OptionParser(usage)
  639. parser.add_option(
  640. '-l', '--log_level', metavar='LOG_LEVEL', default='error',
  641. help='LOG_LEVEL: debug, info, warning or error [default: %default]')
  642. (options, args) = parser.parse_args()
  643. options.log_level = options.log_level.upper()
  644. if options.log_level not in ['DEBUG', 'INFO', 'WARNING', 'ERROR']:
  645. print 'Wrong log_level argument.'
  646. parser.print_help()
  647. return 1
  648. options.log_level = getattr(logging, options.log_level)
  649. if len(args) != 1:
  650. parser.error('Wrong number of arguments.')
  651. logger = logging.getLogger(__name__)
  652. if options.log_level:
  653. console = logging.StreamHandler()
  654. logger.addHandler(console)
  655. logger.setLevel(options.log_level)
  656. arg_is_a_file = os.path.isfile(args[0])
  657. if arg_is_a_file:
  658. CrawlerClass = ThreadedCrawler
  659. else:
  660. CrawlerClass = Crawler
  661. t0 = datetime.datetime.now()
  662. c = CrawlerClass(args[0], options.log_level)
  663. c.Run()
  664. if not arg_is_a_file and c.url_error:
  665. logger.error(
  666. 'ERROR: "%s" is neither a valid filename nor a valid URL' % args[0])
  667. t1 = datetime.datetime.now()
  668. delta_t = t1 - t0
  669. logger.info('Started at: %s\n', t0)
  670. logger.info('Ended at: %s\n', t1)
  671. logger.info('Total execution time: %s\n', delta_t)
  672. return 0
  673. if __name__ == "__main__":
  674. sys.exit(main())