siggen.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605
  1. import hashlib
  2. import logging
  3. import os
  4. import re
  5. import tempfile
  6. import pickle
  7. import bb.data
  8. from bb.checksum import FileChecksumCache
  9. logger = logging.getLogger('BitBake.SigGen')
  10. def init(d):
  11. siggens = [obj for obj in globals().values()
  12. if type(obj) is type and issubclass(obj, SignatureGenerator)]
  13. desired = d.getVar("BB_SIGNATURE_HANDLER", True) or "noop"
  14. for sg in siggens:
  15. if desired == sg.name:
  16. return sg(d)
  17. break
  18. else:
  19. logger.error("Invalid signature generator '%s', using default 'noop'\n"
  20. "Available generators: %s", desired,
  21. ', '.join(obj.name for obj in siggens))
  22. return SignatureGenerator(d)
  23. class SignatureGenerator(object):
  24. """
  25. """
  26. name = "noop"
  27. def __init__(self, data):
  28. self.taskhash = {}
  29. self.runtaskdeps = {}
  30. self.file_checksum_values = {}
  31. self.taints = {}
  32. def finalise(self, fn, d, varient):
  33. return
  34. def get_taskhash(self, fn, task, deps, dataCache):
  35. return "0"
  36. def writeout_file_checksum_cache(self):
  37. """Write/update the file checksum cache onto disk"""
  38. return
  39. def stampfile(self, stampbase, file_name, taskname, extrainfo):
  40. return ("%s.%s.%s" % (stampbase, taskname, extrainfo)).rstrip('.')
  41. def stampcleanmask(self, stampbase, file_name, taskname, extrainfo):
  42. return ("%s.%s.%s" % (stampbase, taskname, extrainfo)).rstrip('.')
  43. def dump_sigtask(self, fn, task, stampbase, runtime):
  44. return
  45. def invalidate_task(self, task, d, fn):
  46. bb.build.del_stamp(task, d, fn)
  47. def dump_sigs(self, dataCache, options):
  48. return
  49. def get_taskdata(self):
  50. return (self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints)
  51. def set_taskdata(self, data):
  52. self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints = data
  53. class SignatureGeneratorBasic(SignatureGenerator):
  54. """
  55. """
  56. name = "basic"
  57. def __init__(self, data):
  58. self.basehash = {}
  59. self.taskhash = {}
  60. self.taskdeps = {}
  61. self.runtaskdeps = {}
  62. self.file_checksum_values = {}
  63. self.taints = {}
  64. self.gendeps = {}
  65. self.lookupcache = {}
  66. self.pkgnameextract = re.compile("(?P<fn>.*)\..*")
  67. self.basewhitelist = set((data.getVar("BB_HASHBASE_WHITELIST", True) or "").split())
  68. self.taskwhitelist = None
  69. self.init_rundepcheck(data)
  70. checksum_cache_file = data.getVar("BB_HASH_CHECKSUM_CACHE_FILE", True)
  71. if checksum_cache_file:
  72. self.checksum_cache = FileChecksumCache()
  73. self.checksum_cache.init_cache(data, checksum_cache_file)
  74. else:
  75. self.checksum_cache = None
  76. def init_rundepcheck(self, data):
  77. self.taskwhitelist = data.getVar("BB_HASHTASK_WHITELIST", True) or None
  78. if self.taskwhitelist:
  79. self.twl = re.compile(self.taskwhitelist)
  80. else:
  81. self.twl = None
  82. def _build_data(self, fn, d):
  83. tasklist, gendeps, lookupcache = bb.data.generate_dependencies(d)
  84. taskdeps = {}
  85. basehash = {}
  86. for task in tasklist:
  87. data = lookupcache[task]
  88. if data is None:
  89. bb.error("Task %s from %s seems to be empty?!" % (task, fn))
  90. data = ''
  91. gendeps[task] -= self.basewhitelist
  92. newdeps = gendeps[task]
  93. seen = set()
  94. while newdeps:
  95. nextdeps = newdeps
  96. seen |= nextdeps
  97. newdeps = set()
  98. for dep in nextdeps:
  99. if dep in self.basewhitelist:
  100. continue
  101. gendeps[dep] -= self.basewhitelist
  102. newdeps |= gendeps[dep]
  103. newdeps -= seen
  104. alldeps = sorted(seen)
  105. for dep in alldeps:
  106. data = data + dep
  107. var = lookupcache[dep]
  108. if var is not None:
  109. data = data + str(var)
  110. self.basehash[fn + "." + task] = hashlib.md5(data.encode("utf-8")).hexdigest()
  111. taskdeps[task] = alldeps
  112. self.taskdeps[fn] = taskdeps
  113. self.gendeps[fn] = gendeps
  114. self.lookupcache[fn] = lookupcache
  115. return taskdeps
  116. def finalise(self, fn, d, variant):
  117. if variant:
  118. fn = "virtual:" + variant + ":" + fn
  119. try:
  120. taskdeps = self._build_data(fn, d)
  121. except:
  122. bb.warn("Error during finalise of %s" % fn)
  123. raise
  124. #Slow but can be useful for debugging mismatched basehashes
  125. #for task in self.taskdeps[fn]:
  126. # self.dump_sigtask(fn, task, d.getVar("STAMP", True), False)
  127. for task in taskdeps:
  128. d.setVar("BB_BASEHASH_task-%s" % task, self.basehash[fn + "." + task])
  129. def rundep_check(self, fn, recipename, task, dep, depname, dataCache):
  130. # Return True if we should keep the dependency, False to drop it
  131. # We only manipulate the dependencies for packages not in the whitelist
  132. if self.twl and not self.twl.search(recipename):
  133. # then process the actual dependencies
  134. if self.twl.search(depname):
  135. return False
  136. return True
  137. def read_taint(self, fn, task, stampbase):
  138. taint = None
  139. try:
  140. with open(stampbase + '.' + task + '.taint', 'r') as taintf:
  141. taint = taintf.read()
  142. except IOError:
  143. pass
  144. return taint
  145. def get_taskhash(self, fn, task, deps, dataCache):
  146. k = fn + "." + task
  147. data = dataCache.basetaskhash[k]
  148. self.runtaskdeps[k] = []
  149. self.file_checksum_values[k] = []
  150. recipename = dataCache.pkg_fn[fn]
  151. for dep in sorted(deps, key=clean_basepath):
  152. depname = dataCache.pkg_fn[self.pkgnameextract.search(dep).group('fn')]
  153. if not self.rundep_check(fn, recipename, task, dep, depname, dataCache):
  154. continue
  155. if dep not in self.taskhash:
  156. bb.fatal("%s is not in taskhash, caller isn't calling in dependency order?", dep)
  157. data = data + self.taskhash[dep]
  158. self.runtaskdeps[k].append(dep)
  159. if task in dataCache.file_checksums[fn]:
  160. if self.checksum_cache:
  161. checksums = self.checksum_cache.get_checksums(dataCache.file_checksums[fn][task], recipename)
  162. else:
  163. checksums = bb.fetch2.get_file_checksums(dataCache.file_checksums[fn][task], recipename)
  164. for (f,cs) in checksums:
  165. self.file_checksum_values[k].append((f,cs))
  166. if cs:
  167. data = data + cs
  168. taskdep = dataCache.task_deps[fn]
  169. if 'nostamp' in taskdep and task in taskdep['nostamp']:
  170. # Nostamp tasks need an implicit taint so that they force any dependent tasks to run
  171. import uuid
  172. taint = str(uuid.uuid4())
  173. data = data + taint
  174. self.taints[k] = "nostamp:" + taint
  175. taint = self.read_taint(fn, task, dataCache.stamp[fn])
  176. if taint:
  177. data = data + taint
  178. self.taints[k] = taint
  179. logger.warning("%s is tainted from a forced run" % k)
  180. h = hashlib.md5(data.encode("utf-8")).hexdigest()
  181. self.taskhash[k] = h
  182. #d.setVar("BB_TASKHASH_task-%s" % task, taskhash[task])
  183. return h
  184. def writeout_file_checksum_cache(self):
  185. """Write/update the file checksum cache onto disk"""
  186. if self.checksum_cache:
  187. self.checksum_cache.save_extras()
  188. self.checksum_cache.save_merge()
  189. else:
  190. bb.fetch2.fetcher_parse_save()
  191. bb.fetch2.fetcher_parse_done()
  192. def dump_sigtask(self, fn, task, stampbase, runtime):
  193. k = fn + "." + task
  194. referencestamp = stampbase
  195. if isinstance(runtime, str) and runtime.startswith("customfile"):
  196. sigfile = stampbase
  197. referencestamp = runtime[11:]
  198. elif runtime and k in self.taskhash:
  199. sigfile = stampbase + "." + task + ".sigdata" + "." + self.taskhash[k]
  200. else:
  201. sigfile = stampbase + "." + task + ".sigbasedata" + "." + self.basehash[k]
  202. bb.utils.mkdirhier(os.path.dirname(sigfile))
  203. data = {}
  204. data['task'] = task
  205. data['basewhitelist'] = self.basewhitelist
  206. data['taskwhitelist'] = self.taskwhitelist
  207. data['taskdeps'] = self.taskdeps[fn][task]
  208. data['basehash'] = self.basehash[k]
  209. data['gendeps'] = {}
  210. data['varvals'] = {}
  211. data['varvals'][task] = self.lookupcache[fn][task]
  212. for dep in self.taskdeps[fn][task]:
  213. if dep in self.basewhitelist:
  214. continue
  215. data['gendeps'][dep] = self.gendeps[fn][dep]
  216. data['varvals'][dep] = self.lookupcache[fn][dep]
  217. if runtime and k in self.taskhash:
  218. data['runtaskdeps'] = self.runtaskdeps[k]
  219. data['file_checksum_values'] = [(os.path.basename(f), cs) for f,cs in self.file_checksum_values[k]]
  220. data['runtaskhashes'] = {}
  221. for dep in data['runtaskdeps']:
  222. data['runtaskhashes'][dep] = self.taskhash[dep]
  223. data['taskhash'] = self.taskhash[k]
  224. taint = self.read_taint(fn, task, referencestamp)
  225. if taint:
  226. data['taint'] = taint
  227. if runtime and k in self.taints:
  228. if 'nostamp:' in self.taints[k]:
  229. data['taint'] = self.taints[k]
  230. fd, tmpfile = tempfile.mkstemp(dir=os.path.dirname(sigfile), prefix="sigtask.")
  231. try:
  232. with os.fdopen(fd, "wb") as stream:
  233. p = pickle.dump(data, stream, -1)
  234. stream.flush()
  235. os.chmod(tmpfile, 0o664)
  236. os.rename(tmpfile, sigfile)
  237. except (OSError, IOError) as err:
  238. try:
  239. os.unlink(tmpfile)
  240. except OSError:
  241. pass
  242. raise err
  243. computed_basehash = calc_basehash(data)
  244. if computed_basehash != self.basehash[k]:
  245. bb.error("Basehash mismatch %s verses %s for %s" % (computed_basehash, self.basehash[k], k))
  246. if runtime and k in self.taskhash:
  247. computed_taskhash = calc_taskhash(data)
  248. if computed_taskhash != self.taskhash[k]:
  249. bb.error("Taskhash mismatch %s verses %s for %s" % (computed_taskhash, self.taskhash[k], k))
  250. def dump_sigs(self, dataCache, options):
  251. for fn in self.taskdeps:
  252. for task in self.taskdeps[fn]:
  253. k = fn + "." + task
  254. if k not in self.taskhash:
  255. continue
  256. if dataCache.basetaskhash[k] != self.basehash[k]:
  257. bb.error("Bitbake's cached basehash does not match the one we just generated (%s)!" % k)
  258. bb.error("The mismatched hashes were %s and %s" % (dataCache.basetaskhash[k], self.basehash[k]))
  259. self.dump_sigtask(fn, task, dataCache.stamp[fn], True)
  260. class SignatureGeneratorBasicHash(SignatureGeneratorBasic):
  261. name = "basichash"
  262. def stampfile(self, stampbase, fn, taskname, extrainfo, clean=False):
  263. if taskname != "do_setscene" and taskname.endswith("_setscene"):
  264. k = fn + "." + taskname[:-9]
  265. else:
  266. k = fn + "." + taskname
  267. if clean:
  268. h = "*"
  269. elif k in self.taskhash:
  270. h = self.taskhash[k]
  271. else:
  272. # If k is not in basehash, then error
  273. h = self.basehash[k]
  274. return ("%s.%s.%s.%s" % (stampbase, taskname, h, extrainfo)).rstrip('.')
  275. def stampcleanmask(self, stampbase, fn, taskname, extrainfo):
  276. return self.stampfile(stampbase, fn, taskname, extrainfo, clean=True)
  277. def invalidate_task(self, task, d, fn):
  278. bb.note("Tainting hash to force rebuild of task %s, %s" % (fn, task))
  279. bb.build.write_taint(task, d, fn)
  280. def dump_this_task(outfile, d):
  281. import bb.parse
  282. fn = d.getVar("BB_FILENAME", True)
  283. task = "do_" + d.getVar("BB_CURRENTTASK", True)
  284. referencestamp = bb.build.stamp_internal(task, d, None, True)
  285. bb.parse.siggen.dump_sigtask(fn, task, outfile, "customfile:" + referencestamp)
  286. def clean_basepath(a):
  287. b = a.rsplit("/", 2)[1] + a.rsplit("/", 2)[2]
  288. if a.startswith("virtual:"):
  289. b = b + ":" + a.rsplit(":", 1)[0]
  290. return b
  291. def clean_basepaths(a):
  292. b = {}
  293. for x in a:
  294. b[clean_basepath(x)] = a[x]
  295. return b
  296. def clean_basepaths_list(a):
  297. b = []
  298. for x in a:
  299. b.append(clean_basepath(x))
  300. return b
  301. def compare_sigfiles(a, b, recursecb = None):
  302. output = []
  303. p1 = pickle.Unpickler(open(a, "rb"))
  304. a_data = p1.load()
  305. p2 = pickle.Unpickler(open(b, "rb"))
  306. b_data = p2.load()
  307. def dict_diff(a, b, whitelist=set()):
  308. sa = set(a.keys())
  309. sb = set(b.keys())
  310. common = sa & sb
  311. changed = set()
  312. for i in common:
  313. if a[i] != b[i] and i not in whitelist:
  314. changed.add(i)
  315. added = sb - sa
  316. removed = sa - sb
  317. return changed, added, removed
  318. def file_checksums_diff(a, b):
  319. from collections import Counter
  320. # Handle old siginfo format
  321. if isinstance(a, dict):
  322. a = [(os.path.basename(f), cs) for f, cs in a.items()]
  323. if isinstance(b, dict):
  324. b = [(os.path.basename(f), cs) for f, cs in b.items()]
  325. # Compare lists, ensuring we can handle duplicate filenames if they exist
  326. removedcount = Counter(a)
  327. removedcount.subtract(b)
  328. addedcount = Counter(b)
  329. addedcount.subtract(a)
  330. added = []
  331. for x in b:
  332. if addedcount[x] > 0:
  333. addedcount[x] -= 1
  334. added.append(x)
  335. removed = []
  336. changed = []
  337. for x in a:
  338. if removedcount[x] > 0:
  339. removedcount[x] -= 1
  340. for y in added:
  341. if y[0] == x[0]:
  342. changed.append((x[0], x[1], y[1]))
  343. added.remove(y)
  344. break
  345. else:
  346. removed.append(x)
  347. added = [x[0] for x in added]
  348. removed = [x[0] for x in removed]
  349. return changed, added, removed
  350. if 'basewhitelist' in a_data and a_data['basewhitelist'] != b_data['basewhitelist']:
  351. output.append("basewhitelist changed from '%s' to '%s'" % (a_data['basewhitelist'], b_data['basewhitelist']))
  352. if a_data['basewhitelist'] and b_data['basewhitelist']:
  353. output.append("changed items: %s" % a_data['basewhitelist'].symmetric_difference(b_data['basewhitelist']))
  354. if 'taskwhitelist' in a_data and a_data['taskwhitelist'] != b_data['taskwhitelist']:
  355. output.append("taskwhitelist changed from '%s' to '%s'" % (a_data['taskwhitelist'], b_data['taskwhitelist']))
  356. if a_data['taskwhitelist'] and b_data['taskwhitelist']:
  357. output.append("changed items: %s" % a_data['taskwhitelist'].symmetric_difference(b_data['taskwhitelist']))
  358. if a_data['taskdeps'] != b_data['taskdeps']:
  359. output.append("Task dependencies changed from:\n%s\nto:\n%s" % (sorted(a_data['taskdeps']), sorted(b_data['taskdeps'])))
  360. if a_data['basehash'] != b_data['basehash']:
  361. output.append("basehash changed from %s to %s" % (a_data['basehash'], b_data['basehash']))
  362. changed, added, removed = dict_diff(a_data['gendeps'], b_data['gendeps'], a_data['basewhitelist'] & b_data['basewhitelist'])
  363. if changed:
  364. for dep in changed:
  365. output.append("List of dependencies for variable %s changed from '%s' to '%s'" % (dep, a_data['gendeps'][dep], b_data['gendeps'][dep]))
  366. if a_data['gendeps'][dep] and b_data['gendeps'][dep]:
  367. output.append("changed items: %s" % a_data['gendeps'][dep].symmetric_difference(b_data['gendeps'][dep]))
  368. if added:
  369. for dep in added:
  370. output.append("Dependency on variable %s was added" % (dep))
  371. if removed:
  372. for dep in removed:
  373. output.append("Dependency on Variable %s was removed" % (dep))
  374. changed, added, removed = dict_diff(a_data['varvals'], b_data['varvals'])
  375. if changed:
  376. for dep in changed:
  377. output.append("Variable %s value changed from '%s' to '%s'" % (dep, a_data['varvals'][dep], b_data['varvals'][dep]))
  378. if not 'file_checksum_values' in a_data:
  379. a_data['file_checksum_values'] = {}
  380. if not 'file_checksum_values' in b_data:
  381. b_data['file_checksum_values'] = {}
  382. changed, added, removed = file_checksums_diff(a_data['file_checksum_values'], b_data['file_checksum_values'])
  383. if changed:
  384. for f, old, new in changed:
  385. output.append("Checksum for file %s changed from %s to %s" % (f, old, new))
  386. if added:
  387. for f in added:
  388. output.append("Dependency on checksum of file %s was added" % (f))
  389. if removed:
  390. for f in removed:
  391. output.append("Dependency on checksum of file %s was removed" % (f))
  392. if not 'runtaskdeps' in a_data:
  393. a_data['runtaskdeps'] = {}
  394. if not 'runtaskdeps' in b_data:
  395. b_data['runtaskdeps'] = {}
  396. if len(a_data['runtaskdeps']) != len(b_data['runtaskdeps']):
  397. changed = ["Number of task dependencies changed"]
  398. else:
  399. changed = []
  400. for idx, task in enumerate(a_data['runtaskdeps']):
  401. a = a_data['runtaskdeps'][idx]
  402. b = b_data['runtaskdeps'][idx]
  403. if a_data['runtaskhashes'][a] != b_data['runtaskhashes'][b]:
  404. changed.append("%s with hash %s\n changed to\n%s with hash %s" % (a, a_data['runtaskhashes'][a], b, b_data['runtaskhashes'][b]))
  405. if changed:
  406. output.append("runtaskdeps changed from %s to %s" % (clean_basepaths_list(a_data['runtaskdeps']), clean_basepaths_list(b_data['runtaskdeps'])))
  407. output.append("\n".join(changed))
  408. if 'runtaskhashes' in a_data and 'runtaskhashes' in b_data:
  409. a = a_data['runtaskhashes']
  410. b = b_data['runtaskhashes']
  411. changed, added, removed = dict_diff(a, b)
  412. if added:
  413. for dep in added:
  414. bdep_found = False
  415. if removed:
  416. for bdep in removed:
  417. if b[dep] == a[bdep]:
  418. #output.append("Dependency on task %s was replaced by %s with same hash" % (dep, bdep))
  419. bdep_found = True
  420. if not bdep_found:
  421. output.append("Dependency on task %s was added with hash %s" % (clean_basepath(dep), b[dep]))
  422. if removed:
  423. for dep in removed:
  424. adep_found = False
  425. if added:
  426. for adep in added:
  427. if b[adep] == a[dep]:
  428. #output.append("Dependency on task %s was replaced by %s with same hash" % (adep, dep))
  429. adep_found = True
  430. if not adep_found:
  431. output.append("Dependency on task %s was removed with hash %s" % (clean_basepath(dep), a[dep]))
  432. if changed:
  433. for dep in changed:
  434. output.append("Hash for dependent task %s changed from %s to %s" % (clean_basepath(dep), a[dep], b[dep]))
  435. if callable(recursecb):
  436. # If a dependent hash changed, might as well print the line above and then defer to the changes in
  437. # that hash since in all likelyhood, they're the same changes this task also saw.
  438. recout = recursecb(dep, a[dep], b[dep])
  439. if recout:
  440. output = [output[-1]] + recout
  441. a_taint = a_data.get('taint', None)
  442. b_taint = b_data.get('taint', None)
  443. if a_taint != b_taint:
  444. output.append("Taint (by forced/invalidated task) changed from %s to %s" % (a_taint, b_taint))
  445. return output
  446. def calc_basehash(sigdata):
  447. task = sigdata['task']
  448. basedata = sigdata['varvals'][task]
  449. if basedata is None:
  450. basedata = ''
  451. alldeps = sigdata['taskdeps']
  452. for dep in alldeps:
  453. basedata = basedata + dep
  454. val = sigdata['varvals'][dep]
  455. if val is not None:
  456. basedata = basedata + str(val)
  457. return hashlib.md5(basedata.encode("utf-8")).hexdigest()
  458. def calc_taskhash(sigdata):
  459. data = sigdata['basehash']
  460. for dep in sigdata['runtaskdeps']:
  461. data = data + sigdata['runtaskhashes'][dep]
  462. for c in sigdata['file_checksum_values']:
  463. data = data + c[1]
  464. if 'taint' in sigdata:
  465. if 'nostamp:' in sigdata['taint']:
  466. data = data + sigdata['taint'][8:]
  467. else:
  468. data = data + sigdata['taint']
  469. return hashlib.md5(data.encode("utf-8")).hexdigest()
  470. def dump_sigfile(a):
  471. output = []
  472. p1 = pickle.Unpickler(open(a, "rb"))
  473. a_data = p1.load()
  474. output.append("basewhitelist: %s" % (a_data['basewhitelist']))
  475. output.append("taskwhitelist: %s" % (a_data['taskwhitelist']))
  476. output.append("Task dependencies: %s" % (sorted(a_data['taskdeps'])))
  477. output.append("basehash: %s" % (a_data['basehash']))
  478. for dep in a_data['gendeps']:
  479. output.append("List of dependencies for variable %s is %s" % (dep, a_data['gendeps'][dep]))
  480. for dep in a_data['varvals']:
  481. output.append("Variable %s value is %s" % (dep, a_data['varvals'][dep]))
  482. if 'runtaskdeps' in a_data:
  483. output.append("Tasks this task depends on: %s" % (a_data['runtaskdeps']))
  484. if 'file_checksum_values' in a_data:
  485. output.append("This task depends on the checksums of files: %s" % (a_data['file_checksum_values']))
  486. if 'runtaskhashes' in a_data:
  487. for dep in a_data['runtaskhashes']:
  488. output.append("Hash for dependent task %s is %s" % (dep, a_data['runtaskhashes'][dep]))
  489. if 'taint' in a_data:
  490. output.append("Tainted (by forced/invalidated task): %s" % a_data['taint'])
  491. if 'task' in a_data:
  492. computed_basehash = calc_basehash(a_data)
  493. output.append("Computed base hash is %s and from file %s" % (computed_basehash, a_data['basehash']))
  494. else:
  495. output.append("Unable to compute base hash")
  496. computed_taskhash = calc_taskhash(a_data)
  497. output.append("Computed task hash is %s" % computed_taskhash)
  498. return output