uberblame.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729
  1. #!/usr/bin/env python3
  2. # Copyright 2017 The Chromium Authors. All rights reserved.
  3. # Use of this source code is governed by a BSD-style license that can be
  4. # found in the LICENSE file.
  5. import argparse
  6. import colorsys
  7. import difflib
  8. import html
  9. import random
  10. import os
  11. import re
  12. import subprocess
  13. import sys
  14. import tempfile
  15. import textwrap
  16. import webbrowser
  17. class TokenContext(object):
  18. """Metadata about a token.
  19. Attributes:
  20. row: Row index of the token in the data file.
  21. column: Column index of the token in the data file.
  22. token: The token string.
  23. commit: A Commit object that corresponds to the commit that added
  24. this token.
  25. """
  26. def __init__(self, row, column, token, commit=None):
  27. self.row = row
  28. self.column = column
  29. self.token = token
  30. self.commit = commit
  31. class Commit(object):
  32. """Commit data.
  33. Attributes:
  34. hash: The commit hash.
  35. author_name: The author's name.
  36. author_email: the author's email.
  37. author_date: The date and time the author created this commit.
  38. message: The commit message.
  39. diff: The commit diff.
  40. """
  41. def __init__(self, hash, author_name, author_email, author_date, message,
  42. diff):
  43. self.hash = hash
  44. self.author_name = author_name
  45. self.author_email = author_email
  46. self.author_date = author_date
  47. self.message = message
  48. self.diff = diff
  49. def tokenize_data(data, tokenize_by_char, tokenize_whitespace):
  50. """Tokenizes |data|.
  51. Args:
  52. data: String to tokenize.
  53. tokenize_by_char: If true, individual characters are treated as tokens.
  54. Otherwise, tokens are either symbols or strings of both alphanumeric
  55. characters and underscores.
  56. tokenize_whitespace: Treat non-newline whitespace characters as tokens.
  57. Returns:
  58. A list of lists of TokenContexts. Each list represents a line.
  59. """
  60. contexts = []
  61. in_identifier = False
  62. identifier_start = 0
  63. identifier = ''
  64. row = 0
  65. column = 0
  66. line_contexts = []
  67. for c in data:
  68. if not tokenize_by_char and (c.isalnum() or c == '_'):
  69. if in_identifier:
  70. identifier += c
  71. else:
  72. in_identifier = True
  73. identifier_start = column
  74. identifier = c
  75. else:
  76. if in_identifier:
  77. line_contexts.append(TokenContext(row, identifier_start, identifier))
  78. in_identifier = False
  79. if not c.isspace() or (tokenize_whitespace and c != '\n'):
  80. line_contexts.append(TokenContext(row, column, c))
  81. if c == '\n':
  82. row += 1
  83. column = 0
  84. contexts.append(line_contexts)
  85. line_tokens = []
  86. line_contexts = []
  87. else:
  88. column += 1
  89. contexts.append(line_contexts)
  90. return contexts
  91. def compute_unified_diff(old_tokens, new_tokens):
  92. """Computes the diff between |old_tokens| and |new_tokens|.
  93. Args:
  94. old_tokens: Token strings corresponding to the old data.
  95. new_tokens: Token strings corresponding to the new data.
  96. Returns:
  97. The diff, in unified diff format.
  98. """
  99. return difflib.unified_diff(old_tokens, new_tokens, n=0, lineterm='')
  100. def parse_chunk_header_file_range(file_range):
  101. """Parses a chunk header file range.
  102. Diff chunk headers have the form:
  103. @@ -<file-range> +<file-range> @@
  104. File ranges have the form:
  105. <start line number>,<number of lines changed>
  106. Args:
  107. file_range: A chunk header file range.
  108. Returns:
  109. A tuple (range_start, range_end). The endpoints are adjusted such that
  110. iterating over [range_start, range_end) will give the changed indices.
  111. """
  112. if ',' in file_range:
  113. file_range_parts = file_range.split(',')
  114. start = int(file_range_parts[0])
  115. amount = int(file_range_parts[1])
  116. if amount == 0:
  117. return (start, start)
  118. return (start - 1, start + amount - 1)
  119. else:
  120. return (int(file_range) - 1, int(file_range))
  121. def compute_changed_token_indices(previous_tokens, current_tokens):
  122. """Computes changed and added tokens.
  123. Args:
  124. previous_tokens: Tokens corresponding to the old file.
  125. current_tokens: Tokens corresponding to the new file.
  126. Returns:
  127. A tuple (added_tokens, changed_tokens).
  128. added_tokens: A list of indices into |current_tokens|.
  129. changed_tokens: A map of indices into |current_tokens| to
  130. indices into |previous_tokens|.
  131. """
  132. prev_file_chunk_end = 0
  133. prev_patched_chunk_end = 0
  134. added_tokens = []
  135. changed_tokens = {}
  136. for line in compute_unified_diff(previous_tokens, current_tokens):
  137. if line.startswith("@@"):
  138. parts = line.split(' ')
  139. removed = parts[1].lstrip('-')
  140. removed_start, removed_end = parse_chunk_header_file_range(removed)
  141. added = parts[2].lstrip('+')
  142. added_start, added_end = parse_chunk_header_file_range(added)
  143. for i in range(added_start, added_end):
  144. added_tokens.append(i)
  145. for i in range(0, removed_start - prev_patched_chunk_end):
  146. changed_tokens[prev_file_chunk_end + i] = prev_patched_chunk_end + i
  147. prev_patched_chunk_end = removed_end
  148. prev_file_chunk_end = added_end
  149. for i in range(0, len(previous_tokens) - prev_patched_chunk_end):
  150. changed_tokens[prev_file_chunk_end + i] = prev_patched_chunk_end + i
  151. return added_tokens, changed_tokens
  152. def flatten_nested_list(l):
  153. """Flattens a list and provides a mapping from elements in the list back
  154. into the nested list.
  155. Args:
  156. l: A list of lists.
  157. Returns:
  158. A tuple (flattened, index_to_position):
  159. flattened: The flattened list.
  160. index_to_position: A list of pairs (r, c) such that
  161. index_to_position[i] == (r, c); flattened[i] == l[r][c]
  162. """
  163. flattened = []
  164. index_to_position = {}
  165. r = 0
  166. c = 0
  167. for nested_list in l:
  168. for element in nested_list:
  169. index_to_position[len(flattened)] = (r, c)
  170. flattened.append(element)
  171. c += 1
  172. r += 1
  173. c = 0
  174. return (flattened, index_to_position)
  175. def compute_changed_token_positions(previous_tokens, current_tokens):
  176. """Computes changed and added token positions.
  177. Args:
  178. previous_tokens: A list of lists of token strings. Lines in the file
  179. correspond to the nested lists.
  180. current_tokens: A list of lists of token strings. Lines in the file
  181. correspond to the nested lists.
  182. Returns:
  183. A tuple (added_token_positions, changed_token_positions):
  184. added_token_positions: A list of pairs that index into |current_tokens|.
  185. changed_token_positions: A map from pairs that index into
  186. |current_tokens| to pairs that index into |previous_tokens|.
  187. """
  188. flat_previous_tokens, previous_index_to_position = flatten_nested_list(
  189. previous_tokens)
  190. flat_current_tokens, current_index_to_position = flatten_nested_list(
  191. current_tokens)
  192. added_indices, changed_indices = compute_changed_token_indices(
  193. flat_previous_tokens, flat_current_tokens)
  194. added_token_positions = [current_index_to_position[i] for i in added_indices]
  195. changed_token_positions = {
  196. current_index_to_position[current_i]:
  197. previous_index_to_position[changed_indices[current_i]]
  198. for current_i in changed_indices
  199. }
  200. return (added_token_positions, changed_token_positions)
  201. def parse_chunks_from_diff(diff):
  202. """Returns a generator of chunk data from a diff.
  203. Args:
  204. diff: A list of strings, with each string being a line from a diff
  205. in unified diff format.
  206. Returns:
  207. A generator of tuples (added_lines_start, added_lines_end, removed_lines)
  208. """
  209. it = iter(diff)
  210. for line in it:
  211. while not line.startswith('@@'):
  212. line = next(it)
  213. parts = line.split(' ')
  214. previous_start, previous_end = parse_chunk_header_file_range(
  215. parts[1].lstrip('-'))
  216. current_start, current_end = parse_chunk_header_file_range(
  217. parts[2].lstrip('+'))
  218. in_delta = False
  219. added_lines_start = None
  220. added_lines_end = None
  221. removed_lines = []
  222. while previous_start < previous_end or current_start < current_end:
  223. line = next(it)
  224. firstchar = line[0]
  225. line = line[1:]
  226. if not in_delta and (firstchar == '-' or firstchar == '+'):
  227. in_delta = True
  228. added_lines_start = current_start
  229. added_lines_end = current_start
  230. removed_lines = []
  231. if firstchar == '-':
  232. removed_lines.append(line)
  233. previous_start += 1
  234. elif firstchar == '+':
  235. current_start += 1
  236. added_lines_end = current_start
  237. elif firstchar == ' ':
  238. if in_delta:
  239. in_delta = False
  240. yield (added_lines_start, added_lines_end, removed_lines)
  241. previous_start += 1
  242. current_start += 1
  243. if in_delta:
  244. yield (added_lines_start, added_lines_end, removed_lines)
  245. def should_skip_commit(commit):
  246. """Decides if |commit| should be skipped when computing the blame.
  247. Commit 5d4451e deleted all files in the repo except for DEPS. The
  248. next commit, 1e7896, brought them back. This is a hack to skip
  249. those commits (except for the files they modified). If we did not
  250. do this, changes would be incorrectly attributed to 1e7896.
  251. Args:
  252. commit: A Commit object.
  253. Returns:
  254. A boolean indicating if this commit should be skipped.
  255. """
  256. banned_commits = [
  257. '1e78967ed2f1937b3809c19d91e7dd62d756d307',
  258. '5d4451ebf298d9d71f716cc0135f465cec41fcd0',
  259. ]
  260. if commit.hash not in banned_commits:
  261. return False
  262. banned_commits_file_exceptions = [
  263. 'DEPS',
  264. 'chrome/browser/ui/views/file_manager_dialog_browsertest.cc',
  265. ]
  266. for line in commit.diff:
  267. if line.startswith('---') or line.startswith('+++'):
  268. if line.split(' ')[1] in banned_commits_file_exceptions:
  269. return False
  270. elif line.startswith('@@'):
  271. return True
  272. assert False
  273. def generate_substrings(file):
  274. """Generates substrings from a file stream, where substrings are
  275. separated by '\0'.
  276. For example, the input:
  277. 'a\0bc\0\0\0d\0'
  278. would produce the output:
  279. ['a', 'bc', 'd']
  280. Args:
  281. file: A readable file.
  282. """
  283. BUF_SIZE = 448 # Experimentally found to be pretty fast.
  284. data = []
  285. while True:
  286. buf = file.read(BUF_SIZE)
  287. parts = buf.split(b'\0')
  288. data.append(parts[0])
  289. if len(parts) > 1:
  290. joined = b''.join(data)
  291. if joined != b'':
  292. yield joined.decode()
  293. for i in range(1, len(parts) - 1):
  294. if parts[i] != b'':
  295. yield parts[i].decode()
  296. data = [parts[-1]]
  297. if len(buf) < BUF_SIZE:
  298. joined = b''.join(data)
  299. if joined != b'':
  300. yield joined.decode()
  301. return
  302. def generate_commits(git_log_stdout):
  303. """Parses git log output into a stream of Commit objects.
  304. """
  305. substring_generator = generate_substrings(git_log_stdout)
  306. try:
  307. while True:
  308. hash = next(substring_generator)
  309. author_name = next(substring_generator)
  310. author_email = next(substring_generator)
  311. author_date = next(substring_generator)
  312. message = next(substring_generator).rstrip('\n')
  313. diff = next(substring_generator).split('\n')[1:-1]
  314. yield Commit(hash, author_name, author_email, author_date, message, diff)
  315. except StopIteration:
  316. pass
  317. def uberblame_aux(file_name, git_log_stdout, data, tokenization_method):
  318. """Computes the uberblame of file |file_name|.
  319. Args:
  320. file_name: File to uberblame.
  321. git_log_stdout: A file object that represents the git log output.
  322. data: A string containing the data of file |file_name|.
  323. tokenization_method: A function that takes a string and returns a list of
  324. TokenContexts.
  325. Returns:
  326. A tuple (data, blame).
  327. data: File contents.
  328. blame: A list of TokenContexts.
  329. """
  330. blame = tokenization_method(data)
  331. blamed_tokens = 0
  332. uber_blame = (data, blame[:])
  333. for commit in generate_commits(git_log_stdout):
  334. if should_skip_commit(commit):
  335. continue
  336. offset = 0
  337. for (added_lines_start, added_lines_end,
  338. removed_lines) in parse_chunks_from_diff(commit.diff):
  339. added_lines_start += offset
  340. added_lines_end += offset
  341. previous_contexts = [
  342. token_lines
  343. for line_previous in removed_lines
  344. for token_lines in tokenization_method(line_previous)
  345. ]
  346. previous_tokens = [[context.token for context in contexts]
  347. for contexts in previous_contexts]
  348. current_contexts = blame[added_lines_start:added_lines_end]
  349. current_tokens = [[context.token for context in contexts]
  350. for contexts in current_contexts]
  351. added_token_positions, changed_token_positions = (
  352. compute_changed_token_positions(previous_tokens, current_tokens))
  353. for r, c in added_token_positions:
  354. current_contexts[r][c].commit = commit
  355. blamed_tokens += 1
  356. for r, c in changed_token_positions:
  357. pr, pc = changed_token_positions[(r, c)]
  358. previous_contexts[pr][pc] = current_contexts[r][c]
  359. assert added_lines_start <= added_lines_end <= len(blame)
  360. current_blame_size = len(blame)
  361. blame[added_lines_start:added_lines_end] = previous_contexts
  362. offset += len(blame) - current_blame_size
  363. assert blame == [] or blame == [[]]
  364. return uber_blame
  365. def uberblame(file_name, revision, tokenization_method):
  366. """Computes the uberblame of file |file_name|.
  367. Args:
  368. file_name: File to uberblame.
  369. revision: The revision to start the uberblame at.
  370. tokenization_method: A function that takes a string and returns a list of
  371. TokenContexts.
  372. Returns:
  373. A tuple (data, blame).
  374. data: File contents.
  375. blame: A list of TokenContexts.
  376. """
  377. DIFF_CONTEXT = 3
  378. cmd_git_log = [
  379. 'git', 'log', '--minimal', '--no-prefix', '--follow', '-m',
  380. '--first-parent', '-p',
  381. '-U%d' % DIFF_CONTEXT, '-z', '--format=%x00%H%x00%an%x00%ae%x00%ad%x00%B',
  382. revision, '--', file_name
  383. ]
  384. git_log = subprocess.Popen(
  385. cmd_git_log, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
  386. data = subprocess.check_output(
  387. ['git', 'show', '%s:%s' % (revision, file_name)]).decode()
  388. data, blame = uberblame_aux(file_name, git_log.stdout, data,
  389. tokenization_method)
  390. stderr = git_log.communicate()[1].decode()
  391. if git_log.returncode != 0:
  392. raise subprocess.CalledProcessError(git_log.returncode, cmd_git_log, stderr)
  393. return data, blame
  394. def generate_pastel_color():
  395. """Generates a random color from a nice looking pastel palette.
  396. Returns:
  397. The color, formatted as hex string. For example, white is "#FFFFFF".
  398. """
  399. (h, l, s) = (random.uniform(0, 1), random.uniform(0.8, 0.9), random.uniform(
  400. 0.5, 1))
  401. (r, g, b) = colorsys.hls_to_rgb(h, l, s)
  402. return "#%0.2X%0.2X%0.2X" % (int(r * 255), int(g * 255), int(b * 255))
  403. def colorize_diff(diff):
  404. """Colorizes a diff for use in an HTML page.
  405. Args:
  406. diff: The diff, in unified diff format, as a list of line strings.
  407. Returns:
  408. The HTML-formatted diff, as a string. The diff will already be escaped.
  409. """
  410. colorized = []
  411. for line in diff:
  412. escaped = html.escape(line.replace('\r', ''), quote=True)
  413. if line.startswith('+'):
  414. colorized.append('<span class=\\"addition\\">%s</span>' % escaped)
  415. elif line.startswith('-'):
  416. colorized.append('<span class=\\"deletion\\">%s</span>' % escaped)
  417. elif line.startswith('@@'):
  418. context_begin = escaped.find('@@', 2)
  419. assert context_begin != -1
  420. colorized.append(
  421. '<span class=\\"chunk_meta\\">%s</span>'
  422. '<span class=\\"chunk_context\\">%s</span'
  423. % (escaped[0:context_begin + 2], escaped[context_begin + 2:]))
  424. elif line.startswith('diff') or line.startswith('index'):
  425. colorized.append('<span class=\\"file_header\\">%s</span>' % escaped)
  426. else:
  427. colorized.append('<span class=\\"context_line\\">%s</span>' % escaped)
  428. return '\n'.join(colorized)
  429. def create_visualization(data, blame):
  430. """Creates a web page to visualize |blame|.
  431. Args:
  432. data: The data file as returned by uberblame().
  433. blame: A list of TokenContexts as returned by uberblame().
  434. Returns:
  435. The HTML for the generated page, as a string.
  436. """
  437. # Use the same seed for the color generator on each run so that
  438. # loading the same blame of the same file twice will result in the
  439. # same generated HTML page.
  440. random.seed(0x52937865ec62d1ea)
  441. page = """\
  442. <html>
  443. <head>
  444. <style>
  445. body {
  446. font-family: monospace;
  447. }
  448. pre {
  449. display: inline;
  450. }
  451. .token {
  452. outline: 1pt solid #00000030;
  453. outline-offset: -1pt;
  454. cursor: pointer;
  455. }
  456. .addition {
  457. color: #080;
  458. }
  459. .deletion {
  460. color: #c00;
  461. }
  462. .chunk_meta {
  463. color: #099;
  464. }
  465. .context_line .chunk_context {
  466. // Just normal text.
  467. }
  468. .file_header {
  469. font-weight: bold;
  470. }
  471. #linenums {
  472. text-align: right;
  473. }
  474. #file_display {
  475. position: absolute;
  476. left: 0;
  477. top: 0;
  478. width: 50%%;
  479. height: 100%%;
  480. overflow: scroll;
  481. }
  482. #commit_display_container {
  483. position: absolute;
  484. left: 50%%;
  485. top: 0;
  486. width: 50%%;
  487. height: 100%%;
  488. overflow: scroll;
  489. }
  490. </style>
  491. <script>
  492. commit_data = %s;
  493. function display_commit(hash) {
  494. var e = document.getElementById("commit_display");
  495. e.innerHTML = commit_data[hash]
  496. }
  497. </script>
  498. </head>
  499. <body>
  500. <div id="file_display">
  501. <table>
  502. <tbody>
  503. <tr>
  504. <td valign="top" id="linenums">
  505. <pre>%s</pre>
  506. </td>
  507. <td valign="top">
  508. <pre>%s</pre>
  509. </td>
  510. </tr>
  511. </tbody>
  512. </table>
  513. </div>
  514. <div id="commit_display_container" valign="top">
  515. <pre id="commit_display" />
  516. </div>
  517. </body>
  518. </html>
  519. """
  520. page = textwrap.dedent(page)
  521. commits = {}
  522. lines = []
  523. commit_colors = {}
  524. blame_index = 0
  525. blame = [context for contexts in blame for context in contexts]
  526. row = 0
  527. lastline = ''
  528. for line in data.split('\n'):
  529. lastline = line
  530. column = 0
  531. for c in line + '\n':
  532. if blame_index < len(blame):
  533. token_context = blame[blame_index]
  534. if (row == token_context.row and
  535. column == token_context.column + len(token_context.token)):
  536. if (blame_index + 1 == len(blame) or blame[blame_index].commit.hash !=
  537. blame[blame_index + 1].commit.hash):
  538. lines.append('</span>')
  539. blame_index += 1
  540. if blame_index < len(blame):
  541. token_context = blame[blame_index]
  542. if row == token_context.row and column == token_context.column:
  543. if (blame_index == 0 or blame[blame_index - 1].commit.hash !=
  544. blame[blame_index].commit.hash):
  545. hash = token_context.commit.hash
  546. commits[hash] = token_context.commit
  547. if hash not in commit_colors:
  548. commit_colors[hash] = generate_pastel_color()
  549. color = commit_colors[hash]
  550. lines.append(('<span class="token" style="background-color: %s" ' +
  551. 'onclick="display_commit(&quot;%s&quot;)">') % (color,
  552. hash))
  553. lines.append(html.escape(c))
  554. column += 1
  555. row += 1
  556. commit_data = ['{\n']
  557. commit_display_format = """\
  558. commit: {hash}
  559. Author: {author_name} <{author_email}>
  560. Date: {author_date}
  561. {message}
  562. """
  563. commit_display_format = textwrap.dedent(commit_display_format)
  564. links = re.compile(r'(https?:\/\/\S+)')
  565. for hash in commits:
  566. commit = commits[hash]
  567. commit_display = commit_display_format.format(
  568. hash=hash,
  569. author_name=commit.author_name,
  570. author_email=commit.author_email,
  571. author_date=commit.author_date,
  572. message=commit.message)
  573. commit_display = html.escape(commit_display, quote=True)
  574. commit_display += colorize_diff(commit.diff)
  575. commit_display = re.sub(links, '<a href=\\"\\1\\">\\1</a>', commit_display)
  576. commit_display = commit_display.replace('\n', '\\n')
  577. commit_data.append('"%s": "%s",\n' % (hash, commit_display))
  578. commit_data.append('}')
  579. commit_data = ''.join(commit_data)
  580. line_nums = range(1, row if lastline.strip() == '' else row + 1)
  581. line_nums = '\n'.join([str(num) for num in line_nums])
  582. lines = ''.join(lines)
  583. return page % (commit_data, line_nums, lines)
  584. def show_visualization(page):
  585. """Display |html| in a web browser.
  586. Args:
  587. html: The contents of the file to display, as a string.
  588. """
  589. # Keep the temporary file around so the browser has time to open it.
  590. # TODO(thomasanderson): spin up a temporary web server to serve this
  591. # file so we don't have to leak it.
  592. html_file = tempfile.NamedTemporaryFile(delete=False, suffix='.html')
  593. html_file.write(page.encode())
  594. html_file.flush()
  595. if sys.platform.startswith('linux'):
  596. # Don't show any messages when starting the browser.
  597. saved_stdout = os.dup(1)
  598. saved_stderr = os.dup(2)
  599. os.close(1)
  600. os.close(2)
  601. os.open(os.devnull, os.O_RDWR)
  602. os.open(os.devnull, os.O_RDWR)
  603. webbrowser.open('file://' + html_file.name)
  604. if sys.platform.startswith('linux'):
  605. os.dup2(saved_stdout, 1)
  606. os.dup2(saved_stderr, 2)
  607. os.close(saved_stdout)
  608. os.close(saved_stderr)
  609. def main(argv):
  610. parser = argparse.ArgumentParser(
  611. description='Show what revision last modified each token of a file.')
  612. parser.add_argument(
  613. 'revision',
  614. default='HEAD',
  615. nargs='?',
  616. help='show only commits starting from a revision')
  617. parser.add_argument('file', help='the file to uberblame')
  618. parser.add_argument(
  619. '--skip-visualization',
  620. action='store_true',
  621. help='do not display the blame visualization in a web browser')
  622. parser.add_argument(
  623. '--tokenize-by-char',
  624. action='store_true',
  625. help='treat individual characters as tokens')
  626. parser.add_argument(
  627. '--tokenize-whitespace',
  628. action='store_true',
  629. help='also blame non-newline whitespace characters')
  630. args = parser.parse_args(argv)
  631. def tokenization_method(data):
  632. return tokenize_data(data, args.tokenize_by_char, args.tokenize_whitespace)
  633. data, blame = uberblame(args.file, args.revision, tokenization_method)
  634. html = create_visualization(data, blame)
  635. if not args.skip_visualization:
  636. show_visualization(html)
  637. return 0
  638. if __name__ == '__main__':
  639. sys.exit(main(sys.argv[1:]))