generate_legacy_perf_dashboard_json_unittest.py 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. #!/usr/bin/env vpython3
  2. # Copyright 2016 The Chromium Authors. All rights reserved.
  3. # Use of this source code is governed by a BSD-style license that can be
  4. # found in the LICENSE file.
  5. import json
  6. import os
  7. import unittest
  8. import six
  9. import generate_legacy_perf_dashboard_json
  10. class LegacyResultsProcessorUnittest(unittest.TestCase):
  11. def setUp(self):
  12. """Set up for all test method of each test method below."""
  13. super(LegacyResultsProcessorUnittest, self).setUp()
  14. if six.PY2:
  15. self.data_directory = os.path.join(
  16. os.path.dirname(os.path.abspath(__file__)), 'testdata')
  17. else:
  18. self.data_directory = os.path.join(
  19. os.path.dirname(os.path.abspath(__file__)), 'testdata', 'python3')
  20. def _ConstructDefaultProcessor(self):
  21. """Creates a LegacyResultsProcessor instance.
  22. Returns:
  23. An instance of LegacyResultsProcessor class
  24. """
  25. return generate_legacy_perf_dashboard_json.LegacyResultsProcessor()
  26. def _ProcessLog(self, log_processor, logfile): # pylint: disable=R0201
  27. """Reads in a input log file and processes it.
  28. This changes the state of the log processor object; the output is stored
  29. in the object and can be gotten using the PerformanceLogs() method.
  30. Args:
  31. log_processor: An PerformanceLogProcessor instance.
  32. logfile: File name of an input performance results log file.
  33. """
  34. for line in open(os.path.join(self.data_directory, logfile)):
  35. log_processor.ProcessLine(line)
  36. def _CheckFileExistsWithData(self, logs, graph):
  37. """Asserts that |graph| exists in the |logs| dict and is non-empty."""
  38. self.assertTrue(graph in logs, 'File %s was not output.' % graph)
  39. self.assertTrue(logs[graph], 'File %s did not contain data.' % graph)
  40. def _ConstructParseAndCheckLogfiles(self, inputfiles, graphs):
  41. """Uses a log processor to process the given input files.
  42. Args:
  43. inputfiles: A list of input performance results log file names.
  44. logfiles: List of expected output ".dat" file names.
  45. Returns:
  46. A dictionary mapping output file name to output file lines.
  47. """
  48. parser = self._ConstructDefaultProcessor()
  49. for inputfile in inputfiles:
  50. self._ProcessLog(parser, inputfile)
  51. logs = json.loads(parser.GenerateGraphJson())
  52. for graph in graphs:
  53. self._CheckFileExistsWithData(logs, graph)
  54. return logs
  55. def _ConstructParseAndCheckJSON(
  56. self, inputfiles, logfiles, graphs):
  57. """Processes input with a log processor and checks against expectations.
  58. Args:
  59. inputfiles: A list of input performance result log file names.
  60. logfiles: A list of expected output ".dat" file names.
  61. subdir: Subdirectory containing expected output files.
  62. log_processor_class: A log processor class.
  63. """
  64. logs = self._ConstructParseAndCheckLogfiles(inputfiles, graphs)
  65. index = 0
  66. for filename in logfiles:
  67. graph_name = graphs[index]
  68. actual = logs[graph_name]
  69. path = os.path.join(self.data_directory, filename)
  70. expected = json.load(open(path))
  71. self.assertEqual(expected, actual, 'JSON data in %s did not match '
  72. 'expectations.' % filename)
  73. index += 1
  74. def testSummary(self):
  75. graphs = ['commit_charge',
  76. 'ws_final_total', 'vm_final_browser', 'vm_final_total',
  77. 'ws_final_browser', 'processes', 'artificial_graph']
  78. # Tests the output of "summary" files, which contain per-graph data.
  79. input_files = ['graphing_processor.log']
  80. output_files = ['%s-summary.dat' % graph for graph in graphs]
  81. self._ConstructParseAndCheckJSON(input_files, output_files, graphs)
  82. if __name__ == '__main__':
  83. unittest.main()