1# -*- coding: utf-8 -*- 2# Copyright 2011 The ChromiumOS Authors 3# Use of this source code is governed by a BSD-style license that can be 4# found in the LICENSE file. 5 6"""Module to print help message.""" 7 8 9import sys 10import textwrap 11 12from settings_factory import BenchmarkSettings 13from settings_factory import GlobalSettings 14from settings_factory import LabelSettings 15 16 17class Help(object): 18 """The help class.""" 19 20 def GetUsage(self): 21 return """%s [OPTIONS] EXPERIMENT_FILE""" % (sys.argv[0]) 22 23 def _WrapLine(self, line): 24 return "\n".join(textwrap.wrap(line, 80)) 25 26 def _GetFieldDescriptions(self, fields): 27 res = "" 28 for field_name in fields: 29 field = fields[field_name] 30 res += "Field:\t\t%s\n" % field.name 31 res += self._WrapLine("Description:\t%s" % field.description) + "\n" 32 res += "Type:\t\t%s\n" % type(field).__name__.replace("Field", "") 33 res += "Required:\t%s\n" % field.required 34 if field.default: 35 res += "Default:\t%s\n" % field.default 36 res += "\n" 37 return res 38 39 def GetHelp(self): 40 global_fields = self._GetFieldDescriptions(GlobalSettings("").fields) 41 benchmark_fields = self._GetFieldDescriptions( 42 BenchmarkSettings("").fields 43 ) 44 label_fields = self._GetFieldDescriptions(LabelSettings("").fields) 45 46 return """%s is a script for running performance experiments on 47ChromeOS. It allows one to run ChromeOS Autotest benchmarks over 48several images and compare the results to determine whether there 49is a performance difference. 50 51Comparing several images using %s is referred to as running an 52"experiment". An "experiment file" is a configuration file which holds 53all the information that describes the experiment and how it should be 54run. An example of a simple experiment file is below: 55 56--------------------------------- test.exp --------------------------------- 57name: my_experiment 58board: x86-alex 59remote: chromeos2-row1-rack4-host7.cros 172.18.122.132 60 61benchmark: page_cycler_v2.morejs { 62 suite: telemetry_Crosperf 63 iterations: 3 64} 65 66my_first_image { 67 chromeos_image: /usr/local/chromeos-1/chromiumos_image.bin 68} 69 70my_second_image { 71 chromeos_image: /usr/local/chromeos-2/chromiumos_image.bin 72} 73---------------------------------------------------------------------------- 74 75This experiment file names the experiment "my_experiment". It will be 76run on the board x86-alex. Benchmarks will be run using two remote 77devices, one is a device specified by a hostname and the other is a 78device specified by it's IP address. Benchmarks will be run in 79parallel across these devices. There is currently no way to specify 80which benchmark will run on each device. 81 82We define one "benchmark" that will be run, page_cycler_v2.morejs. This 83benchmark has two "fields", one which specifies that this benchmark is 84part of the telemetry_Crosperf suite (this is the common way to run 85most Telemetry benchmarks), and the other which specifies how many 86iterations it will run for. 87 88We specify one or more "labels" or images which will be compared. The 89page_cycler_v2.morejs benchmark will be run on each of these images 3 90times and a result table will be output which compares them for all 91the images specified. 92 93The full list of fields that can be specified in the experiment file 94are as follows: 95================= 96Global Fields 97================= 98%s 99================= 100Benchmark Fields 101================= 102%s 103================= 104Label Fields 105================= 106%s 107 108Note that global fields are overidden by label or benchmark fields, if 109they can be specified in both places. Fields that are specified as 110arguments override fields specified in experiment files. 111 112%s is invoked by passing it a path to an experiment file, 113as well as any options (in addition to those specified in the 114experiment file). Crosperf runs the experiment and caches the results 115(or reads the previously cached experiment results out of the cache), 116generates and displays a report based on the run, and emails the 117report to the user. If the results were all read out of the cache, 118then by default no email is generated. 119""" % ( 120 sys.argv[0], 121 sys.argv[0], 122 global_fields, 123 benchmark_fields, 124 label_fields, 125 sys.argv[0], 126 ) 127