forked from chromium/chromium
-
-
Notifications
You must be signed in to change notification settings - Fork 79
/
test_buildbucket_api_gpu_use_cases.py
executable file
·134 lines (112 loc) · 4.36 KB
/
test_buildbucket_api_gpu_use_cases.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
#!/usr/bin/env python
# Copyright 2019 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import argparse
import json
import os
import sys
# Add src/testing/ into sys.path for importing common without pylint errors.
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from scripts import common
# Add src/content/test/gpu into sys.path for importing common.
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
os.path.pardir, os.path.pardir, 'content',
'test', 'gpu')))
import gather_power_measurement_results
import gather_swarming_json_results
class BuildBucketApiGpuUseCaseTests:
@classmethod
def GenerateTests(cls):
return [
'TestGatherPowerMeasurementResultsFromLatestGreenBuild',
'TestGatherWebGL2TestTimesFromLatestGreenBuild',
]
@staticmethod
def TestGatherPowerMeasurementResultsFromLatestGreenBuild():
# Verify we can get power measurement test data from latest successful
# build, including the swarming bot that runs the test, and actual test
# results.
bot = 'Win10 FYI x64 Release (Intel)'
step = 'power_measurement_test'
build_id = gather_power_measurement_results.GetLatestGreenBuild(bot)
build_json = gather_power_measurement_results.GetJsonForBuildSteps(
bot, build_id)
if 'steps' not in build_json:
return '"steps" is missing from the build json'
stdout_url = gather_power_measurement_results.FindStepLogURL(
build_json['steps'], step, 'stdout')
if not stdout_url:
return 'Unable to find stdout from step %s' % step
results = { 'number': build_id, 'tests': [] }
gather_power_measurement_results.ProcessStepStdout(stdout_url, results)
if 'bot' not in results or not results['bot'].startswith('BUILD'):
return 'Failed to find bot name as BUILD*'
if not results['tests']:
return 'Failed to find power measurment test data'
return None
@staticmethod
def TestGatherWebGL2TestTimesFromLatestGreenBuild():
# Verify we can get more than 2000 WebGL2 tests running time from the
# latest successful build.
extracted_times, _ = gather_swarming_json_results.GatherResults(
bot='Linux FYI Release (NVIDIA)',
build=None, # Use the latest green build
step='webgl2_conformance_validating_tests')
if 'times' not in extracted_times:
return '"times" is missing from the extracted dict'
num_of_tests = len(extracted_times['times'])
# From local run, there are 2700 tests. This is sanity check that we
# get reasonable data.
if num_of_tests < 2000:
return 'expected 2000 tests, got %d tests' % num_of_tests
return None
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'--isolated-script-test-output', type=str)
parser.add_argument(
'--isolated-script-test-chartjson-output', type=str,
required=False)
parser.add_argument(
'--isolated-script-test-perf-output', type=str,
required=False)
parser.add_argument(
'--isolated-script-test-filter', type=str,
required=False)
args = parser.parse_args(argv)
# Run actual tests
failures = []
retval = 1
for test_name in BuildBucketApiGpuUseCaseTests.GenerateTests():
test = getattr(BuildBucketApiGpuUseCaseTests, test_name)
error_msg = test()
if error_msg is not None:
result = '%s: %s' % (test_name, error_msg)
print('FAIL: %s' % result)
failures.append(result)
if not failures:
print('PASS: test_buildbucket_api_gpu_use_cases ran successfully.')
retval = 0
if args.isolated_script_test_output:
with open(args.isolated_script_test_output, 'w') as json_file:
json.dump({
'valid': True,
'failures': failures,
}, json_file)
return retval
# This is not really a "script test" so does not need to manually add
# any additional compile targets.
def main_compile_targets(args):
json.dump([], args.output)
if __name__ == '__main__':
# Conform minimally to the protocol defined by ScriptTest.
if 'compile_targets' in sys.argv:
funcs = {
'run': None,
'compile_targets': main_compile_targets,
}
sys.exit(common.run_script(sys.argv[1:], funcs))
sys.exit(main(sys.argv[1:]))