xref: /aosp_15_r20/external/autotest/frontend/afe/rpc_interface.py (revision 9c5db1993ded3edbeafc8092d69fe5de2ee02df7)
1*9c5db199SXin Li# pylint: disable=missing-docstring
2*9c5db199SXin Li
3*9c5db199SXin Li"""\
4*9c5db199SXin LiFunctions to expose over the RPC interface.
5*9c5db199SXin Li
6*9c5db199SXin LiFor all modify* and delete* functions that ask for an 'id' parameter to
7*9c5db199SXin Liidentify the object to operate on, the id may be either
8*9c5db199SXin Li * the database row ID
9*9c5db199SXin Li * the name of the object (label name, hostname, user login, etc.)
10*9c5db199SXin Li * a dictionary containing uniquely identifying field (this option should seldom
11*9c5db199SXin Li   be used)
12*9c5db199SXin Li
13*9c5db199SXin LiWhen specifying foreign key fields (i.e. adding hosts to a label, or adding
14*9c5db199SXin Liusers to an ACL group), the given value may be either the database row ID or the
15*9c5db199SXin Liname of the object.
16*9c5db199SXin Li
17*9c5db199SXin LiAll get* functions return lists of dictionaries.  Each dictionary represents one
18*9c5db199SXin Liobject and maps field names to values.
19*9c5db199SXin Li
20*9c5db199SXin LiSome examples:
21*9c5db199SXin Limodify_host(2, hostname='myhost') # modify hostname of host with database ID 2
22*9c5db199SXin Limodify_host('ipaj2', hostname='myhost') # modify hostname of host 'ipaj2'
23*9c5db199SXin Limodify_test('sleeptest', test_type='Client', params=', seconds=60')
24*9c5db199SXin Lidelete_acl_group(1) # delete by ID
25*9c5db199SXin Lidelete_acl_group('Everyone') # delete by name
26*9c5db199SXin Liacl_group_add_users('Everyone', ['mbligh', 'showard'])
27*9c5db199SXin Liget_jobs(owner='showard', status='Queued')
28*9c5db199SXin Li
29*9c5db199SXin LiSee doctests/001_rpc_test.txt for (lots) more examples.
30*9c5db199SXin Li"""
31*9c5db199SXin Li
32*9c5db199SXin Li__author__ = 'showard@google.com (Steve Howard)'
33*9c5db199SXin Li
34*9c5db199SXin Liimport ast
35*9c5db199SXin Liimport collections
36*9c5db199SXin Liimport contextlib
37*9c5db199SXin Liimport datetime
38*9c5db199SXin Liimport logging
39*9c5db199SXin Liimport os
40*9c5db199SXin Liimport sys
41*9c5db199SXin Liimport warnings
42*9c5db199SXin Li
43*9c5db199SXin Liimport six
44*9c5db199SXin Lifrom autotest_lib.client.common_lib import (control_data, error, global_config,
45*9c5db199SXin Li                                            priorities)
46*9c5db199SXin Lifrom autotest_lib.client.common_lib.cros import dev_server
47*9c5db199SXin Lifrom autotest_lib.frontend.afe import control_file as control_file_lib
48*9c5db199SXin Lifrom autotest_lib.frontend.afe import (model_attributes, model_logic, models,
49*9c5db199SXin Li                                       rpc_utils)
50*9c5db199SXin Lifrom autotest_lib.frontend.tko import models as tko_models
51*9c5db199SXin Lifrom autotest_lib.frontend.tko import rpc_interface as tko_rpc_interface
52*9c5db199SXin Lifrom autotest_lib.server import frontend, utils
53*9c5db199SXin Lifrom autotest_lib.server.cros import provision
54*9c5db199SXin Lifrom autotest_lib.server.cros.dynamic_suite import (constants,
55*9c5db199SXin Li                                                    control_file_getter,
56*9c5db199SXin Li                                                    suite_common, tools)
57*9c5db199SXin Lifrom autotest_lib.server.cros.dynamic_suite.suite import Suite
58*9c5db199SXin Lifrom autotest_lib.server.lib import status_history
59*9c5db199SXin Lifrom autotest_lib.site_utils import job_history, stable_version_utils
60*9c5db199SXin Lifrom django.db import connection as db_connection
61*9c5db199SXin Lifrom django.db import transaction
62*9c5db199SXin Lifrom django.db.models import Count
63*9c5db199SXin Lifrom django.db.utils import DatabaseError
64*9c5db199SXin Li
65*9c5db199SXin Liimport common
66*9c5db199SXin Li
67*9c5db199SXin Li_CONFIG = global_config.global_config
68*9c5db199SXin Li
69*9c5db199SXin Li# Definition of LabHealthIndicator
70*9c5db199SXin LiLabHealthIndicator = collections.namedtuple(
71*9c5db199SXin Li        'LabHealthIndicator',
72*9c5db199SXin Li        [
73*9c5db199SXin Li                'if_lab_close',
74*9c5db199SXin Li                'available_duts',
75*9c5db199SXin Li                'devserver_health',
76*9c5db199SXin Li                'upcoming_builds',
77*9c5db199SXin Li        ]
78*9c5db199SXin Li)
79*9c5db199SXin Li
80*9c5db199SXin LiRESPECT_STATIC_LABELS = global_config.global_config.get_config_value(
81*9c5db199SXin Li        'SKYLAB', 'respect_static_labels', type=bool, default=False)
82*9c5db199SXin Li
83*9c5db199SXin LiRESPECT_STATIC_ATTRIBUTES = global_config.global_config.get_config_value(
84*9c5db199SXin Li        'SKYLAB', 'respect_static_attributes', type=bool, default=False)
85*9c5db199SXin Li
86*9c5db199SXin Li# Relevant CrosDynamicSuiteExceptions are defined in client/common_lib/error.py.
87*9c5db199SXin Li
88*9c5db199SXin Li# labels
89*9c5db199SXin Li
90*9c5db199SXin Lidef modify_label(id, **data):
91*9c5db199SXin Li    """Modify a label.
92*9c5db199SXin Li
93*9c5db199SXin Li    @param id: id or name of a label. More often a label name.
94*9c5db199SXin Li    @param data: New data for a label.
95*9c5db199SXin Li    """
96*9c5db199SXin Li    label_model = models.Label.smart_get(id)
97*9c5db199SXin Li    if label_model.is_replaced_by_static():
98*9c5db199SXin Li        raise error.UnmodifiableLabelException(
99*9c5db199SXin Li                'Failed to delete label "%s" because it is a static label. '
100*9c5db199SXin Li                'Use go/chromeos-skylab-inventory-tools to modify this '
101*9c5db199SXin Li                'label.' % label_model.name)
102*9c5db199SXin Li
103*9c5db199SXin Li    label_model.update_object(data)
104*9c5db199SXin Li
105*9c5db199SXin Li    # Main forwards the RPC to shards
106*9c5db199SXin Li    if not utils.is_shard():
107*9c5db199SXin Li        rpc_utils.fanout_rpc(label_model.host_set.all(), 'modify_label', False,
108*9c5db199SXin Li                             id=id, **data)
109*9c5db199SXin Li
110*9c5db199SXin Li
111*9c5db199SXin Lidef delete_label(id):
112*9c5db199SXin Li    """Delete a label.
113*9c5db199SXin Li
114*9c5db199SXin Li    @param id: id or name of a label. More often a label name.
115*9c5db199SXin Li    """
116*9c5db199SXin Li    label_model = models.Label.smart_get(id)
117*9c5db199SXin Li    if label_model.is_replaced_by_static():
118*9c5db199SXin Li        raise error.UnmodifiableLabelException(
119*9c5db199SXin Li                'Failed to delete label "%s" because it is a static label. '
120*9c5db199SXin Li                'Use go/chromeos-skylab-inventory-tools to modify this '
121*9c5db199SXin Li                'label.' % label_model.name)
122*9c5db199SXin Li
123*9c5db199SXin Li    # Hosts that have the label to be deleted. Save this info before
124*9c5db199SXin Li    # the label is deleted to use it later.
125*9c5db199SXin Li    hosts = []
126*9c5db199SXin Li    for h in label_model.host_set.all():
127*9c5db199SXin Li        hosts.append(models.Host.smart_get(h.id))
128*9c5db199SXin Li    label_model.delete()
129*9c5db199SXin Li
130*9c5db199SXin Li    # Main forwards the RPC to shards
131*9c5db199SXin Li    if not utils.is_shard():
132*9c5db199SXin Li        rpc_utils.fanout_rpc(hosts, 'delete_label', False, id=id)
133*9c5db199SXin Li
134*9c5db199SXin Li
135*9c5db199SXin Lidef add_label(name, ignore_exception_if_exists=False, **kwargs):
136*9c5db199SXin Li    """Adds a new label of a given name.
137*9c5db199SXin Li
138*9c5db199SXin Li    @param name: label name.
139*9c5db199SXin Li    @param ignore_exception_if_exists: If True and the exception was
140*9c5db199SXin Li        thrown due to the duplicated label name when adding a label,
141*9c5db199SXin Li        then suppress the exception. Default is False.
142*9c5db199SXin Li    @param kwargs: keyword args that store more info about a label
143*9c5db199SXin Li        other than the name.
144*9c5db199SXin Li    @return: int/long id of a new label.
145*9c5db199SXin Li    """
146*9c5db199SXin Li    # models.Label.add_object() throws model_logic.ValidationError
147*9c5db199SXin Li    # when it is given a label name that already exists.
148*9c5db199SXin Li    # However, ValidationError can be thrown with different errors,
149*9c5db199SXin Li    # and those errors should be thrown up to the call chain.
150*9c5db199SXin Li    try:
151*9c5db199SXin Li        label = models.Label.add_object(name=name, **kwargs)
152*9c5db199SXin Li    except:
153*9c5db199SXin Li        exc_info = sys.exc_info()
154*9c5db199SXin Li        if ignore_exception_if_exists:
155*9c5db199SXin Li            label = rpc_utils.get_label(name)
156*9c5db199SXin Li            # If the exception is raised not because of duplicated
157*9c5db199SXin Li            # "name", then raise the original exception.
158*9c5db199SXin Li            if label is None:
159*9c5db199SXin Li                six.reraise(exc_info[0], exc_info[1], exc_info[2])
160*9c5db199SXin Li        else:
161*9c5db199SXin Li            six.reraise(exc_info[0], exc_info[1], exc_info[2])
162*9c5db199SXin Li    return label.id
163*9c5db199SXin Li
164*9c5db199SXin Li
165*9c5db199SXin Lidef add_label_to_hosts(id, hosts):
166*9c5db199SXin Li    """Adds a label of the given id to the given hosts only in local DB.
167*9c5db199SXin Li
168*9c5db199SXin Li    @param id: id or name of a label. More often a label name.
169*9c5db199SXin Li    @param hosts: The hostnames of hosts that need the label.
170*9c5db199SXin Li
171*9c5db199SXin Li    @raises models.Label.DoesNotExist: If the label with id doesn't exist.
172*9c5db199SXin Li    """
173*9c5db199SXin Li    label = models.Label.smart_get(id)
174*9c5db199SXin Li    if label.is_replaced_by_static():
175*9c5db199SXin Li        label = models.StaticLabel.smart_get(label.name)
176*9c5db199SXin Li
177*9c5db199SXin Li    host_objs = models.Host.smart_get_bulk(hosts)
178*9c5db199SXin Li    if label.platform:
179*9c5db199SXin Li        models.Host.check_no_platform(host_objs)
180*9c5db199SXin Li    # Ensure a host has no more than one board label with it.
181*9c5db199SXin Li    if label.name.startswith('board:'):
182*9c5db199SXin Li        models.Host.check_board_labels_allowed(host_objs, [label.name])
183*9c5db199SXin Li    label.host_set.add(*host_objs)
184*9c5db199SXin Li
185*9c5db199SXin Li
186*9c5db199SXin Lidef _create_label_everywhere(id, hosts):
187*9c5db199SXin Li    """
188*9c5db199SXin Li    Yet another method to create labels.
189*9c5db199SXin Li
190*9c5db199SXin Li    ALERT! This method should be run only on main not shards!
191*9c5db199SXin Li    DO NOT RUN THIS ON A SHARD!!!  Deputies will hate you if you do!!!
192*9c5db199SXin Li
193*9c5db199SXin Li    This method exists primarily to serve label_add_hosts() and
194*9c5db199SXin Li    host_add_labels().  Basically it pulls out the label check/add logic
195*9c5db199SXin Li    from label_add_hosts() into this nice method that not only creates
196*9c5db199SXin Li    the label but also tells the shards that service the hosts to also
197*9c5db199SXin Li    create the label.
198*9c5db199SXin Li
199*9c5db199SXin Li    @param id: id or name of a label. More often a label name.
200*9c5db199SXin Li    @param hosts: A list of hostnames or ids. More often hostnames.
201*9c5db199SXin Li    """
202*9c5db199SXin Li    try:
203*9c5db199SXin Li        label = models.Label.smart_get(id)
204*9c5db199SXin Li    except models.Label.DoesNotExist:
205*9c5db199SXin Li        # This matches the type checks in smart_get, which is a hack
206*9c5db199SXin Li        # in and off itself. The aim here is to create any non-existent
207*9c5db199SXin Li        # label, which we cannot do if the 'id' specified isn't a label name.
208*9c5db199SXin Li        if isinstance(id, six.string_types):
209*9c5db199SXin Li            label = models.Label.smart_get(add_label(id))
210*9c5db199SXin Li        else:
211*9c5db199SXin Li            raise ValueError('Label id (%s) does not exist. Please specify '
212*9c5db199SXin Li                             'the argument, id, as a string (label name).'
213*9c5db199SXin Li                             % id)
214*9c5db199SXin Li
215*9c5db199SXin Li    # Make sure the label exists on the shard with the same id
216*9c5db199SXin Li    # as it is on the main.
217*9c5db199SXin Li    # It is possible that the label is already in a shard because
218*9c5db199SXin Li    # we are adding a new label only to shards of hosts that the label
219*9c5db199SXin Li    # is going to be attached.
220*9c5db199SXin Li    # For example, we add a label L1 to a host in shard S1.
221*9c5db199SXin Li    # Main and S1 will have L1 but other shards won't.
222*9c5db199SXin Li    # Later, when we add the same label L1 to hosts in shards S1 and S2,
223*9c5db199SXin Li    # S1 already has the label but S2 doesn't.
224*9c5db199SXin Li    # S2 should have the new label without any problem.
225*9c5db199SXin Li    # We ignore exception in such a case.
226*9c5db199SXin Li    host_objs = models.Host.smart_get_bulk(hosts)
227*9c5db199SXin Li    rpc_utils.fanout_rpc(
228*9c5db199SXin Li            host_objs, 'add_label', include_hostnames=False,
229*9c5db199SXin Li            name=label.name, ignore_exception_if_exists=True,
230*9c5db199SXin Li            id=label.id, platform=label.platform)
231*9c5db199SXin Li
232*9c5db199SXin Li
233*9c5db199SXin Li@rpc_utils.route_rpc_to_main
234*9c5db199SXin Lidef label_add_hosts(id, hosts):
235*9c5db199SXin Li    """Adds a label with the given id to the given hosts.
236*9c5db199SXin Li
237*9c5db199SXin Li    This method should be run only on main not shards.
238*9c5db199SXin Li    The given label will be created if it doesn't exist, provided the `id`
239*9c5db199SXin Li    supplied is a label name not an int/long id.
240*9c5db199SXin Li
241*9c5db199SXin Li    @param id: id or name of a label. More often a label name.
242*9c5db199SXin Li    @param hosts: A list of hostnames or ids. More often hostnames.
243*9c5db199SXin Li
244*9c5db199SXin Li    @raises ValueError: If the id specified is an int/long (label id)
245*9c5db199SXin Li                        while the label does not exist.
246*9c5db199SXin Li    """
247*9c5db199SXin Li    # Create the label.
248*9c5db199SXin Li    _create_label_everywhere(id, hosts)
249*9c5db199SXin Li
250*9c5db199SXin Li    # Add it to the main.
251*9c5db199SXin Li    add_label_to_hosts(id, hosts)
252*9c5db199SXin Li
253*9c5db199SXin Li    # Add it to the shards.
254*9c5db199SXin Li    host_objs = models.Host.smart_get_bulk(hosts)
255*9c5db199SXin Li    rpc_utils.fanout_rpc(host_objs, 'add_label_to_hosts', id=id)
256*9c5db199SXin Li
257*9c5db199SXin Li
258*9c5db199SXin Lidef remove_label_from_hosts(id, hosts):
259*9c5db199SXin Li    """Removes a label of the given id from the given hosts only in local DB.
260*9c5db199SXin Li
261*9c5db199SXin Li    @param id: id or name of a label.
262*9c5db199SXin Li    @param hosts: The hostnames of hosts that need to remove the label from.
263*9c5db199SXin Li    """
264*9c5db199SXin Li    host_objs = models.Host.smart_get_bulk(hosts)
265*9c5db199SXin Li    label = models.Label.smart_get(id)
266*9c5db199SXin Li    if label.is_replaced_by_static():
267*9c5db199SXin Li        raise error.UnmodifiableLabelException(
268*9c5db199SXin Li                'Failed to remove label "%s" for hosts "%r" because it is a '
269*9c5db199SXin Li                'static label. Use go/chromeos-skylab-inventory-tools to '
270*9c5db199SXin Li                'modify this label.' % (label.name, hosts))
271*9c5db199SXin Li
272*9c5db199SXin Li    label.host_set.remove(*host_objs)
273*9c5db199SXin Li
274*9c5db199SXin Li
275*9c5db199SXin Li@rpc_utils.route_rpc_to_main
276*9c5db199SXin Lidef label_remove_hosts(id, hosts):
277*9c5db199SXin Li    """Removes a label of the given id from the given hosts.
278*9c5db199SXin Li
279*9c5db199SXin Li    This method should be run only on main not shards.
280*9c5db199SXin Li
281*9c5db199SXin Li    @param id: id or name of a label.
282*9c5db199SXin Li    @param hosts: A list of hostnames or ids. More often hostnames.
283*9c5db199SXin Li    """
284*9c5db199SXin Li    host_objs = models.Host.smart_get_bulk(hosts)
285*9c5db199SXin Li    remove_label_from_hosts(id, hosts)
286*9c5db199SXin Li
287*9c5db199SXin Li    rpc_utils.fanout_rpc(host_objs, 'remove_label_from_hosts', id=id)
288*9c5db199SXin Li
289*9c5db199SXin Li
290*9c5db199SXin Lidef get_labels(exclude_filters=(), **filter_data):
291*9c5db199SXin Li    """\
292*9c5db199SXin Li    @param exclude_filters: A sequence of dictionaries of filters.
293*9c5db199SXin Li
294*9c5db199SXin Li    @returns A sequence of nested dictionaries of label information.
295*9c5db199SXin Li    """
296*9c5db199SXin Li    labels = models.Label.query_objects(filter_data)
297*9c5db199SXin Li    for exclude_filter in exclude_filters:
298*9c5db199SXin Li        labels = labels.exclude(**exclude_filter)
299*9c5db199SXin Li
300*9c5db199SXin Li    if not RESPECT_STATIC_LABELS:
301*9c5db199SXin Li        return rpc_utils.prepare_rows_as_nested_dicts(labels, ())
302*9c5db199SXin Li
303*9c5db199SXin Li    static_labels = models.StaticLabel.query_objects(filter_data)
304*9c5db199SXin Li    for exclude_filter in exclude_filters:
305*9c5db199SXin Li        static_labels = static_labels.exclude(**exclude_filter)
306*9c5db199SXin Li
307*9c5db199SXin Li    non_static_lists = rpc_utils.prepare_rows_as_nested_dicts(labels, ())
308*9c5db199SXin Li    static_lists = rpc_utils.prepare_rows_as_nested_dicts(static_labels, ())
309*9c5db199SXin Li
310*9c5db199SXin Li    label_ids = [label.id for label in labels]
311*9c5db199SXin Li    replaced = models.ReplacedLabel.objects.filter(label__id__in=label_ids)
312*9c5db199SXin Li    replaced_ids = {r.label_id for r in replaced}
313*9c5db199SXin Li    replaced_label_names = {l.name for l in labels if l.id in replaced_ids}
314*9c5db199SXin Li
315*9c5db199SXin Li    return_lists  = []
316*9c5db199SXin Li    for non_static_label in non_static_lists:
317*9c5db199SXin Li        if non_static_label.get('id') not in replaced_ids:
318*9c5db199SXin Li            return_lists.append(non_static_label)
319*9c5db199SXin Li
320*9c5db199SXin Li    for static_label in static_lists:
321*9c5db199SXin Li        if static_label.get('name') in replaced_label_names:
322*9c5db199SXin Li            return_lists.append(static_label)
323*9c5db199SXin Li
324*9c5db199SXin Li    return return_lists
325*9c5db199SXin Li
326*9c5db199SXin Li
327*9c5db199SXin Li# hosts
328*9c5db199SXin Li
329*9c5db199SXin Lidef add_host(hostname, status=None, locked=None, lock_reason='', protection=None):
330*9c5db199SXin Li    if locked and not lock_reason:
331*9c5db199SXin Li        raise model_logic.ValidationError(
332*9c5db199SXin Li            {'locked': 'Please provide a reason for locking when adding host.'})
333*9c5db199SXin Li
334*9c5db199SXin Li    return models.Host.add_object(hostname=hostname, status=status,
335*9c5db199SXin Li                                  locked=locked, lock_reason=lock_reason,
336*9c5db199SXin Li                                  protection=protection).id
337*9c5db199SXin Li
338*9c5db199SXin Li
339*9c5db199SXin Li@rpc_utils.route_rpc_to_main
340*9c5db199SXin Lidef modify_host(id, **kwargs):
341*9c5db199SXin Li    """Modify local attributes of a host.
342*9c5db199SXin Li
343*9c5db199SXin Li    If this is called on the main, but the host is assigned to a shard, this
344*9c5db199SXin Li    will call `modify_host_local` RPC to the responsible shard. This means if
345*9c5db199SXin Li    a host is being locked using this function, this change will also propagate
346*9c5db199SXin Li    to shards.
347*9c5db199SXin Li    When this is called on a shard, the shard just routes the RPC to the main
348*9c5db199SXin Li    and does nothing.
349*9c5db199SXin Li
350*9c5db199SXin Li    @param id: id of the host to modify.
351*9c5db199SXin Li    @param kwargs: key=value pairs of values to set on the host.
352*9c5db199SXin Li    """
353*9c5db199SXin Li    rpc_utils.check_modify_host(kwargs)
354*9c5db199SXin Li    host = models.Host.smart_get(id)
355*9c5db199SXin Li    try:
356*9c5db199SXin Li        rpc_utils.check_modify_host_locking(host, kwargs)
357*9c5db199SXin Li    except model_logic.ValidationError as e:
358*9c5db199SXin Li        if not kwargs.get('force_modify_locking', False):
359*9c5db199SXin Li            raise
360*9c5db199SXin Li        logging.exception('The following exception will be ignored and lock '
361*9c5db199SXin Li                          'modification will be enforced. %s', e)
362*9c5db199SXin Li
363*9c5db199SXin Li    # This is required to make `lock_time` for a host be exactly same
364*9c5db199SXin Li    # between the main and a shard.
365*9c5db199SXin Li    if kwargs.get('locked', None) and 'lock_time' not in kwargs:
366*9c5db199SXin Li        kwargs['lock_time'] = datetime.datetime.now()
367*9c5db199SXin Li
368*9c5db199SXin Li    # force_modifying_locking is not an internal field in database, remove.
369*9c5db199SXin Li    shard_kwargs = dict(kwargs)
370*9c5db199SXin Li    shard_kwargs.pop('force_modify_locking', None)
371*9c5db199SXin Li    rpc_utils.fanout_rpc([host], 'modify_host_local',
372*9c5db199SXin Li                         include_hostnames=False, id=id, **shard_kwargs)
373*9c5db199SXin Li
374*9c5db199SXin Li    # Update the local DB **after** RPC fanout is complete.
375*9c5db199SXin Li    # This guarantees that the main state is only updated if the shards were
376*9c5db199SXin Li    # correctly updated.
377*9c5db199SXin Li    # In case the shard update fails mid-flight and the main-shard desync, we
378*9c5db199SXin Li    # always consider the main state to be the source-of-truth, and any
379*9c5db199SXin Li    # (automated) corrective actions will revert the (partial) shard updates.
380*9c5db199SXin Li    host.update_object(kwargs)
381*9c5db199SXin Li
382*9c5db199SXin Li
383*9c5db199SXin Lidef modify_host_local(id, **kwargs):
384*9c5db199SXin Li    """Modify host attributes in local DB.
385*9c5db199SXin Li
386*9c5db199SXin Li    @param id: Host id.
387*9c5db199SXin Li    @param kwargs: key=value pairs of values to set on the host.
388*9c5db199SXin Li    """
389*9c5db199SXin Li    models.Host.smart_get(id).update_object(kwargs)
390*9c5db199SXin Li
391*9c5db199SXin Li
392*9c5db199SXin Li@rpc_utils.route_rpc_to_main
393*9c5db199SXin Lidef modify_hosts(host_filter_data, update_data):
394*9c5db199SXin Li    """Modify local attributes of multiple hosts.
395*9c5db199SXin Li
396*9c5db199SXin Li    If this is called on the main, but one of the hosts in that match the
397*9c5db199SXin Li    filters is assigned to a shard, this will call `modify_hosts_local` RPC
398*9c5db199SXin Li    to the responsible shard.
399*9c5db199SXin Li    When this is called on a shard, the shard just routes the RPC to the main
400*9c5db199SXin Li    and does nothing.
401*9c5db199SXin Li
402*9c5db199SXin Li    The filters are always applied on the main, not on the shards. This means
403*9c5db199SXin Li    if the states of a host differ on the main and a shard, the state on the
404*9c5db199SXin Li    main will be used. I.e. this means:
405*9c5db199SXin Li    A host was synced to Shard 1. On Shard 1 the status of the host was set to
406*9c5db199SXin Li    'Repair Failed'.
407*9c5db199SXin Li    - A call to modify_hosts with host_filter_data={'status': 'Ready'} will
408*9c5db199SXin Li    update the host (both on the shard and on the main), because the state
409*9c5db199SXin Li    of the host as the main knows it is still 'Ready'.
410*9c5db199SXin Li    - A call to modify_hosts with host_filter_data={'status': 'Repair failed'
411*9c5db199SXin Li    will not update the host, because the filter doesn't apply on the main.
412*9c5db199SXin Li
413*9c5db199SXin Li    @param host_filter_data: Filters out which hosts to modify.
414*9c5db199SXin Li    @param update_data: A dictionary with the changes to make to the hosts.
415*9c5db199SXin Li    """
416*9c5db199SXin Li    update_data = update_data.copy()
417*9c5db199SXin Li    rpc_utils.check_modify_host(update_data)
418*9c5db199SXin Li    hosts = models.Host.query_objects(host_filter_data)
419*9c5db199SXin Li
420*9c5db199SXin Li    affected_shard_hostnames = set()
421*9c5db199SXin Li    affected_host_ids = []
422*9c5db199SXin Li
423*9c5db199SXin Li    # Check all hosts before changing data for exception safety.
424*9c5db199SXin Li    for host in hosts:
425*9c5db199SXin Li        try:
426*9c5db199SXin Li            rpc_utils.check_modify_host_locking(host, update_data)
427*9c5db199SXin Li        except model_logic.ValidationError as e:
428*9c5db199SXin Li            if not update_data.get('force_modify_locking', False):
429*9c5db199SXin Li                raise
430*9c5db199SXin Li            logging.exception('The following exception will be ignored and '
431*9c5db199SXin Li                              'lock modification will be enforced. %s', e)
432*9c5db199SXin Li
433*9c5db199SXin Li        if host.shard:
434*9c5db199SXin Li            affected_shard_hostnames.add(host.shard.hostname)
435*9c5db199SXin Li            affected_host_ids.append(host.id)
436*9c5db199SXin Li
437*9c5db199SXin Li    # This is required to make `lock_time` for a host be exactly same
438*9c5db199SXin Li    # between the main and a shard.
439*9c5db199SXin Li    if update_data.get('locked', None) and 'lock_time' not in update_data:
440*9c5db199SXin Li        update_data['lock_time'] = datetime.datetime.now()
441*9c5db199SXin Li    for host in hosts:
442*9c5db199SXin Li        host.update_object(update_data)
443*9c5db199SXin Li
444*9c5db199SXin Li    update_data.pop('force_modify_locking', None)
445*9c5db199SXin Li    # Caution: Changing the filter from the original here. See docstring.
446*9c5db199SXin Li    rpc_utils.run_rpc_on_multiple_hostnames(
447*9c5db199SXin Li            'modify_hosts_local', affected_shard_hostnames,
448*9c5db199SXin Li            host_filter_data={'id__in': affected_host_ids},
449*9c5db199SXin Li            update_data=update_data)
450*9c5db199SXin Li
451*9c5db199SXin Li
452*9c5db199SXin Lidef modify_hosts_local(host_filter_data, update_data):
453*9c5db199SXin Li    """Modify attributes of hosts in local DB.
454*9c5db199SXin Li
455*9c5db199SXin Li    @param host_filter_data: Filters out which hosts to modify.
456*9c5db199SXin Li    @param update_data: A dictionary with the changes to make to the hosts.
457*9c5db199SXin Li    """
458*9c5db199SXin Li    for host in models.Host.query_objects(host_filter_data):
459*9c5db199SXin Li        host.update_object(update_data)
460*9c5db199SXin Li
461*9c5db199SXin Li
462*9c5db199SXin Lidef add_labels_to_host(id, labels):
463*9c5db199SXin Li    """Adds labels to a given host only in local DB.
464*9c5db199SXin Li
465*9c5db199SXin Li    @param id: id or hostname for a host.
466*9c5db199SXin Li    @param labels: ids or names for labels.
467*9c5db199SXin Li    """
468*9c5db199SXin Li    label_objs = models.Label.smart_get_bulk(labels)
469*9c5db199SXin Li    if not RESPECT_STATIC_LABELS:
470*9c5db199SXin Li        models.Host.smart_get(id).labels.add(*label_objs)
471*9c5db199SXin Li    else:
472*9c5db199SXin Li        static_labels, non_static_labels = models.Host.classify_label_objects(
473*9c5db199SXin Li            label_objs)
474*9c5db199SXin Li        host = models.Host.smart_get(id)
475*9c5db199SXin Li        host.static_labels.add(*static_labels)
476*9c5db199SXin Li        host.labels.add(*non_static_labels)
477*9c5db199SXin Li
478*9c5db199SXin Li
479*9c5db199SXin Li@rpc_utils.route_rpc_to_main
480*9c5db199SXin Lidef host_add_labels(id, labels):
481*9c5db199SXin Li    """Adds labels to a given host.
482*9c5db199SXin Li
483*9c5db199SXin Li    @param id: id or hostname for a host.
484*9c5db199SXin Li    @param labels: ids or names for labels.
485*9c5db199SXin Li
486*9c5db199SXin Li    @raises ValidationError: If adding more than one platform/board label.
487*9c5db199SXin Li    """
488*9c5db199SXin Li    # Create the labels on the main/shards.
489*9c5db199SXin Li    for label in labels:
490*9c5db199SXin Li        _create_label_everywhere(label, [id])
491*9c5db199SXin Li
492*9c5db199SXin Li    label_objs = models.Label.smart_get_bulk(labels)
493*9c5db199SXin Li
494*9c5db199SXin Li    platforms = [label.name for label in label_objs if label.platform]
495*9c5db199SXin Li    if len(platforms) > 1:
496*9c5db199SXin Li        raise model_logic.ValidationError(
497*9c5db199SXin Li            {'labels': ('Adding more than one platform: %s' %
498*9c5db199SXin Li                        ', '.join(platforms))})
499*9c5db199SXin Li
500*9c5db199SXin Li    host_obj = models.Host.smart_get(id)
501*9c5db199SXin Li    if platforms:
502*9c5db199SXin Li        models.Host.check_no_platform([host_obj])
503*9c5db199SXin Li    if any(label_name.startswith('board:') for label_name in labels):
504*9c5db199SXin Li        models.Host.check_board_labels_allowed([host_obj], labels)
505*9c5db199SXin Li    add_labels_to_host(id, labels)
506*9c5db199SXin Li
507*9c5db199SXin Li    rpc_utils.fanout_rpc([host_obj], 'add_labels_to_host', False,
508*9c5db199SXin Li                         id=id, labels=labels)
509*9c5db199SXin Li
510*9c5db199SXin Li
511*9c5db199SXin Lidef remove_labels_from_host(id, labels):
512*9c5db199SXin Li    """Removes labels from a given host only in local DB.
513*9c5db199SXin Li
514*9c5db199SXin Li    @param id: id or hostname for a host.
515*9c5db199SXin Li    @param labels: ids or names for labels.
516*9c5db199SXin Li    """
517*9c5db199SXin Li    label_objs = models.Label.smart_get_bulk(labels)
518*9c5db199SXin Li    if not RESPECT_STATIC_LABELS:
519*9c5db199SXin Li        models.Host.smart_get(id).labels.remove(*label_objs)
520*9c5db199SXin Li    else:
521*9c5db199SXin Li        static_labels, non_static_labels = models.Host.classify_label_objects(
522*9c5db199SXin Li                label_objs)
523*9c5db199SXin Li        host = models.Host.smart_get(id)
524*9c5db199SXin Li        host.labels.remove(*non_static_labels)
525*9c5db199SXin Li        if static_labels:
526*9c5db199SXin Li            logging.info('Cannot remove labels "%r" for host "%r" due to they '
527*9c5db199SXin Li                         'are static labels. Use '
528*9c5db199SXin Li                         'go/chromeos-skylab-inventory-tools to modify these '
529*9c5db199SXin Li                         'labels.', static_labels, id)
530*9c5db199SXin Li
531*9c5db199SXin Li
532*9c5db199SXin Li@rpc_utils.route_rpc_to_main
533*9c5db199SXin Lidef host_remove_labels(id, labels):
534*9c5db199SXin Li    """Removes labels from a given host.
535*9c5db199SXin Li
536*9c5db199SXin Li    @param id: id or hostname for a host.
537*9c5db199SXin Li    @param labels: ids or names for labels.
538*9c5db199SXin Li    """
539*9c5db199SXin Li    remove_labels_from_host(id, labels)
540*9c5db199SXin Li
541*9c5db199SXin Li    host_obj = models.Host.smart_get(id)
542*9c5db199SXin Li    rpc_utils.fanout_rpc([host_obj], 'remove_labels_from_host', False,
543*9c5db199SXin Li                         id=id, labels=labels)
544*9c5db199SXin Li
545*9c5db199SXin Li
546*9c5db199SXin Lidef get_host_attribute(attribute, **host_filter_data):
547*9c5db199SXin Li    """
548*9c5db199SXin Li    @param attribute: string name of attribute
549*9c5db199SXin Li    @param host_filter_data: filter data to apply to Hosts to choose hosts to
550*9c5db199SXin Li                             act upon
551*9c5db199SXin Li    """
552*9c5db199SXin Li    hosts = rpc_utils.get_host_query((), False, True, host_filter_data)
553*9c5db199SXin Li    hosts = list(hosts)
554*9c5db199SXin Li    models.Host.objects.populate_relationships(hosts, models.HostAttribute,
555*9c5db199SXin Li                                               'attribute_list')
556*9c5db199SXin Li    host_attr_dicts = []
557*9c5db199SXin Li    host_objs = []
558*9c5db199SXin Li    for host_obj in hosts:
559*9c5db199SXin Li        for attr_obj in host_obj.attribute_list:
560*9c5db199SXin Li            if attr_obj.attribute == attribute:
561*9c5db199SXin Li                host_attr_dicts.append(attr_obj.get_object_dict())
562*9c5db199SXin Li                host_objs.append(host_obj)
563*9c5db199SXin Li
564*9c5db199SXin Li    if RESPECT_STATIC_ATTRIBUTES:
565*9c5db199SXin Li        for host_attr, host_obj in zip(host_attr_dicts, host_objs):
566*9c5db199SXin Li            static_attrs = models.StaticHostAttribute.query_objects(
567*9c5db199SXin Li                    {'host_id': host_obj.id, 'attribute': attribute})
568*9c5db199SXin Li            if len(static_attrs) > 0:
569*9c5db199SXin Li                host_attr['value'] = static_attrs[0].value
570*9c5db199SXin Li
571*9c5db199SXin Li    return rpc_utils.prepare_for_serialization(host_attr_dicts)
572*9c5db199SXin Li
573*9c5db199SXin Li
574*9c5db199SXin Li@rpc_utils.route_rpc_to_main
575*9c5db199SXin Lidef set_host_attribute(attribute, value, **host_filter_data):
576*9c5db199SXin Li    """Set an attribute on hosts.
577*9c5db199SXin Li
578*9c5db199SXin Li    This RPC is a shim that forwards calls to main to be handled there.
579*9c5db199SXin Li
580*9c5db199SXin Li    @param attribute: string name of attribute
581*9c5db199SXin Li    @param value: string, or None to delete an attribute
582*9c5db199SXin Li    @param host_filter_data: filter data to apply to Hosts to choose hosts to
583*9c5db199SXin Li                             act upon
584*9c5db199SXin Li    """
585*9c5db199SXin Li    assert not utils.is_shard()
586*9c5db199SXin Li    set_host_attribute_impl(attribute, value, **host_filter_data)
587*9c5db199SXin Li
588*9c5db199SXin Li
589*9c5db199SXin Lidef set_host_attribute_impl(attribute, value, **host_filter_data):
590*9c5db199SXin Li    """Set an attribute on hosts.
591*9c5db199SXin Li
592*9c5db199SXin Li    *** DO NOT CALL THIS RPC from client code ***
593*9c5db199SXin Li    This RPC exists for main-shard communication only.
594*9c5db199SXin Li    Call set_host_attribute instead.
595*9c5db199SXin Li
596*9c5db199SXin Li    @param attribute: string name of attribute
597*9c5db199SXin Li    @param value: string, or None to delete an attribute
598*9c5db199SXin Li    @param host_filter_data: filter data to apply to Hosts to choose hosts to
599*9c5db199SXin Li                             act upon
600*9c5db199SXin Li    """
601*9c5db199SXin Li    assert host_filter_data # disallow accidental actions on all hosts
602*9c5db199SXin Li    hosts = models.Host.query_objects(host_filter_data)
603*9c5db199SXin Li    models.AclGroup.check_for_acl_violation_hosts(hosts)
604*9c5db199SXin Li    for host in hosts:
605*9c5db199SXin Li        host.set_or_delete_attribute(attribute, value)
606*9c5db199SXin Li
607*9c5db199SXin Li    # Main forwards this RPC to shards.
608*9c5db199SXin Li    if not utils.is_shard():
609*9c5db199SXin Li        rpc_utils.fanout_rpc(hosts, 'set_host_attribute_impl', False,
610*9c5db199SXin Li                attribute=attribute, value=value, **host_filter_data)
611*9c5db199SXin Li
612*9c5db199SXin Li
613*9c5db199SXin Li@rpc_utils.forward_single_host_rpc_to_shard
614*9c5db199SXin Lidef delete_host(id):
615*9c5db199SXin Li    models.Host.smart_get(id).delete()
616*9c5db199SXin Li
617*9c5db199SXin Li
618*9c5db199SXin Lidef get_hosts(multiple_labels=(), exclude_only_if_needed_labels=False,
619*9c5db199SXin Li              valid_only=True, include_current_job=False, **filter_data):
620*9c5db199SXin Li    """Get a list of dictionaries which contains the information of hosts.
621*9c5db199SXin Li
622*9c5db199SXin Li    @param multiple_labels: match hosts in all of the labels given.  Should
623*9c5db199SXin Li            be a list of label names.
624*9c5db199SXin Li    @param exclude_only_if_needed_labels: Deprecated. Raise error if it's True.
625*9c5db199SXin Li    @param include_current_job: Set to True to include ids of currently running
626*9c5db199SXin Li            job and special task.
627*9c5db199SXin Li    """
628*9c5db199SXin Li    if exclude_only_if_needed_labels:
629*9c5db199SXin Li        raise error.RPCException('exclude_only_if_needed_labels is deprecated')
630*9c5db199SXin Li
631*9c5db199SXin Li    hosts = rpc_utils.get_host_query(multiple_labels,
632*9c5db199SXin Li                                     exclude_only_if_needed_labels,
633*9c5db199SXin Li                                     valid_only, filter_data)
634*9c5db199SXin Li    hosts = list(hosts)
635*9c5db199SXin Li    models.Host.objects.populate_relationships(hosts, models.Label,
636*9c5db199SXin Li                                               'label_list')
637*9c5db199SXin Li    models.Host.objects.populate_relationships(hosts, models.AclGroup,
638*9c5db199SXin Li                                               'acl_list')
639*9c5db199SXin Li    models.Host.objects.populate_relationships(hosts, models.HostAttribute,
640*9c5db199SXin Li                                               'attribute_list')
641*9c5db199SXin Li    models.Host.objects.populate_relationships(hosts,
642*9c5db199SXin Li                                               models.StaticHostAttribute,
643*9c5db199SXin Li                                               'staticattribute_list')
644*9c5db199SXin Li    host_dicts = []
645*9c5db199SXin Li    for host_obj in hosts:
646*9c5db199SXin Li        host_dict = host_obj.get_object_dict()
647*9c5db199SXin Li        host_dict['acls'] = [acl.name for acl in host_obj.acl_list]
648*9c5db199SXin Li        host_dict['attributes'] = dict((attribute.attribute, attribute.value)
649*9c5db199SXin Li                                       for attribute in host_obj.attribute_list)
650*9c5db199SXin Li        if RESPECT_STATIC_LABELS:
651*9c5db199SXin Li            label_list = []
652*9c5db199SXin Li            # Only keep static labels which has a corresponding entries in
653*9c5db199SXin Li            # afe_labels.
654*9c5db199SXin Li            for label in host_obj.label_list:
655*9c5db199SXin Li                if label.is_replaced_by_static():
656*9c5db199SXin Li                    static_label = models.StaticLabel.smart_get(label.name)
657*9c5db199SXin Li                    label_list.append(static_label)
658*9c5db199SXin Li                else:
659*9c5db199SXin Li                    label_list.append(label)
660*9c5db199SXin Li
661*9c5db199SXin Li            host_dict['labels'] = [label.name for label in label_list]
662*9c5db199SXin Li            host_dict['platform'] = rpc_utils.find_platform(
663*9c5db199SXin Li                    host_obj.hostname, label_list)
664*9c5db199SXin Li        else:
665*9c5db199SXin Li            host_dict['labels'] = [label.name for label in host_obj.label_list]
666*9c5db199SXin Li            host_dict['platform'] = rpc_utils.find_platform(
667*9c5db199SXin Li                    host_obj.hostname, host_obj.label_list)
668*9c5db199SXin Li
669*9c5db199SXin Li        if RESPECT_STATIC_ATTRIBUTES:
670*9c5db199SXin Li            # Overwrite attribute with values in afe_static_host_attributes.
671*9c5db199SXin Li            for attr in host_obj.staticattribute_list:
672*9c5db199SXin Li                if attr.attribute in host_dict['attributes']:
673*9c5db199SXin Li                    host_dict['attributes'][attr.attribute] = attr.value
674*9c5db199SXin Li
675*9c5db199SXin Li        if include_current_job:
676*9c5db199SXin Li            host_dict['current_job'] = None
677*9c5db199SXin Li            host_dict['current_special_task'] = None
678*9c5db199SXin Li            entries = models.HostQueueEntry.objects.filter(
679*9c5db199SXin Li                    host_id=host_dict['id'], active=True, complete=False)
680*9c5db199SXin Li            if entries:
681*9c5db199SXin Li                host_dict['current_job'] = (
682*9c5db199SXin Li                        entries[0].get_object_dict()['job'])
683*9c5db199SXin Li            tasks = models.SpecialTask.objects.filter(
684*9c5db199SXin Li                    host_id=host_dict['id'], is_active=True, is_complete=False)
685*9c5db199SXin Li            if tasks:
686*9c5db199SXin Li                host_dict['current_special_task'] = (
687*9c5db199SXin Li                        '%d-%s' % (tasks[0].get_object_dict()['id'],
688*9c5db199SXin Li                                   tasks[0].get_object_dict()['task'].lower()))
689*9c5db199SXin Li        host_dicts.append(host_dict)
690*9c5db199SXin Li
691*9c5db199SXin Li    return rpc_utils.prepare_for_serialization(host_dicts)
692*9c5db199SXin Li
693*9c5db199SXin Li
694*9c5db199SXin Lidef get_num_hosts(multiple_labels=(), exclude_only_if_needed_labels=False,
695*9c5db199SXin Li                  valid_only=True, **filter_data):
696*9c5db199SXin Li    """
697*9c5db199SXin Li    Same parameters as get_hosts().
698*9c5db199SXin Li
699*9c5db199SXin Li    @returns The number of matching hosts.
700*9c5db199SXin Li    """
701*9c5db199SXin Li    if exclude_only_if_needed_labels:
702*9c5db199SXin Li        raise error.RPCException('exclude_only_if_needed_labels is deprecated')
703*9c5db199SXin Li
704*9c5db199SXin Li    hosts = rpc_utils.get_host_query(multiple_labels,
705*9c5db199SXin Li                                     exclude_only_if_needed_labels,
706*9c5db199SXin Li                                     valid_only, filter_data)
707*9c5db199SXin Li    return len(hosts)
708*9c5db199SXin Li
709*9c5db199SXin Li
710*9c5db199SXin Li# tests
711*9c5db199SXin Li
712*9c5db199SXin Lidef get_tests(**filter_data):
713*9c5db199SXin Li    return rpc_utils.prepare_for_serialization(
714*9c5db199SXin Li        models.Test.list_objects(filter_data))
715*9c5db199SXin Li
716*9c5db199SXin Li
717*9c5db199SXin Lidef get_tests_status_counts_by_job_name_label(job_name_prefix, label_name):
718*9c5db199SXin Li    """Gets the counts of all passed and failed tests from the matching jobs.
719*9c5db199SXin Li
720*9c5db199SXin Li    @param job_name_prefix: Name prefix of the jobs to get the summary
721*9c5db199SXin Li           from, e.g., 'butterfly-release/r40-6457.21.0/bvt-cq/'. Prefix
722*9c5db199SXin Li           matching is case insensitive.
723*9c5db199SXin Li    @param label_name: Label that must be set in the jobs, e.g.,
724*9c5db199SXin Li            'cros-version:butterfly-release/R40-6457.21.0'.
725*9c5db199SXin Li
726*9c5db199SXin Li    @returns A summary of the counts of all the passed and failed tests.
727*9c5db199SXin Li    """
728*9c5db199SXin Li    job_ids = list(models.Job.objects.filter(
729*9c5db199SXin Li            name__istartswith=job_name_prefix,
730*9c5db199SXin Li            dependency_labels__name=label_name).values_list(
731*9c5db199SXin Li                'pk', flat=True))
732*9c5db199SXin Li    summary = {'passed': 0, 'failed': 0}
733*9c5db199SXin Li    if not job_ids:
734*9c5db199SXin Li        return summary
735*9c5db199SXin Li
736*9c5db199SXin Li    counts = (tko_models.TestView.objects.filter(
737*9c5db199SXin Li            afe_job_id__in=job_ids).exclude(
738*9c5db199SXin Li                test_name='SERVER_JOB').exclude(
739*9c5db199SXin Li                    test_name__startswith='CLIENT_JOB').values(
740*9c5db199SXin Li                        'status').annotate(
741*9c5db199SXin Li                            count=Count('status')))
742*9c5db199SXin Li    for status in counts:
743*9c5db199SXin Li        if status['status'] == 'GOOD':
744*9c5db199SXin Li            summary['passed'] += status['count']
745*9c5db199SXin Li        else:
746*9c5db199SXin Li            summary['failed'] += status['count']
747*9c5db199SXin Li    return summary
748*9c5db199SXin Li
749*9c5db199SXin Li
750*9c5db199SXin Li# profilers
751*9c5db199SXin Li
752*9c5db199SXin Lidef add_profiler(name, description=None):
753*9c5db199SXin Li    return models.Profiler.add_object(name=name, description=description).id
754*9c5db199SXin Li
755*9c5db199SXin Li
756*9c5db199SXin Lidef modify_profiler(id, **data):
757*9c5db199SXin Li    models.Profiler.smart_get(id).update_object(data)
758*9c5db199SXin Li
759*9c5db199SXin Li
760*9c5db199SXin Lidef delete_profiler(id):
761*9c5db199SXin Li    models.Profiler.smart_get(id).delete()
762*9c5db199SXin Li
763*9c5db199SXin Li
764*9c5db199SXin Lidef get_profilers(**filter_data):
765*9c5db199SXin Li    return rpc_utils.prepare_for_serialization(
766*9c5db199SXin Li        models.Profiler.list_objects(filter_data))
767*9c5db199SXin Li
768*9c5db199SXin Li
769*9c5db199SXin Li# users
770*9c5db199SXin Li
771*9c5db199SXin Lidef get_users(**filter_data):
772*9c5db199SXin Li    return rpc_utils.prepare_for_serialization(
773*9c5db199SXin Li        models.User.list_objects(filter_data))
774*9c5db199SXin Li
775*9c5db199SXin Li
776*9c5db199SXin Li# acl groups
777*9c5db199SXin Li
778*9c5db199SXin Lidef add_acl_group(name, description=None):
779*9c5db199SXin Li    group = models.AclGroup.add_object(name=name, description=description)
780*9c5db199SXin Li    group.users.add(models.User.current_user())
781*9c5db199SXin Li    return group.id
782*9c5db199SXin Li
783*9c5db199SXin Li
784*9c5db199SXin Lidef modify_acl_group(id, **data):
785*9c5db199SXin Li    group = models.AclGroup.smart_get(id)
786*9c5db199SXin Li    group.check_for_acl_violation_acl_group()
787*9c5db199SXin Li    group.update_object(data)
788*9c5db199SXin Li    group.add_current_user_if_empty()
789*9c5db199SXin Li
790*9c5db199SXin Li
791*9c5db199SXin Lidef acl_group_add_users(id, users):
792*9c5db199SXin Li    group = models.AclGroup.smart_get(id)
793*9c5db199SXin Li    group.check_for_acl_violation_acl_group()
794*9c5db199SXin Li    users = models.User.smart_get_bulk(users)
795*9c5db199SXin Li    group.users.add(*users)
796*9c5db199SXin Li
797*9c5db199SXin Li
798*9c5db199SXin Lidef acl_group_remove_users(id, users):
799*9c5db199SXin Li    group = models.AclGroup.smart_get(id)
800*9c5db199SXin Li    group.check_for_acl_violation_acl_group()
801*9c5db199SXin Li    users = models.User.smart_get_bulk(users)
802*9c5db199SXin Li    group.users.remove(*users)
803*9c5db199SXin Li    group.add_current_user_if_empty()
804*9c5db199SXin Li
805*9c5db199SXin Li
806*9c5db199SXin Lidef acl_group_add_hosts(id, hosts):
807*9c5db199SXin Li    group = models.AclGroup.smart_get(id)
808*9c5db199SXin Li    group.check_for_acl_violation_acl_group()
809*9c5db199SXin Li    hosts = models.Host.smart_get_bulk(hosts)
810*9c5db199SXin Li    group.hosts.add(*hosts)
811*9c5db199SXin Li    group.on_host_membership_change()
812*9c5db199SXin Li
813*9c5db199SXin Li
814*9c5db199SXin Lidef acl_group_remove_hosts(id, hosts):
815*9c5db199SXin Li    group = models.AclGroup.smart_get(id)
816*9c5db199SXin Li    group.check_for_acl_violation_acl_group()
817*9c5db199SXin Li    hosts = models.Host.smart_get_bulk(hosts)
818*9c5db199SXin Li    group.hosts.remove(*hosts)
819*9c5db199SXin Li    group.on_host_membership_change()
820*9c5db199SXin Li
821*9c5db199SXin Li
822*9c5db199SXin Lidef delete_acl_group(id):
823*9c5db199SXin Li    models.AclGroup.smart_get(id).delete()
824*9c5db199SXin Li
825*9c5db199SXin Li
826*9c5db199SXin Lidef get_acl_groups(**filter_data):
827*9c5db199SXin Li    acl_groups = models.AclGroup.list_objects(filter_data)
828*9c5db199SXin Li    for acl_group in acl_groups:
829*9c5db199SXin Li        acl_group_obj = models.AclGroup.objects.get(id=acl_group['id'])
830*9c5db199SXin Li        acl_group['users'] = [user.login
831*9c5db199SXin Li                              for user in acl_group_obj.users.all()]
832*9c5db199SXin Li        acl_group['hosts'] = [host.hostname
833*9c5db199SXin Li                              for host in acl_group_obj.hosts.all()]
834*9c5db199SXin Li    return rpc_utils.prepare_for_serialization(acl_groups)
835*9c5db199SXin Li
836*9c5db199SXin Li
837*9c5db199SXin Li# jobs
838*9c5db199SXin Li
839*9c5db199SXin Lidef generate_control_file(tests=(), profilers=(),
840*9c5db199SXin Li                          client_control_file='', use_container=False,
841*9c5db199SXin Li                          profile_only=None, db_tests=True,
842*9c5db199SXin Li                          test_source_build=None):
843*9c5db199SXin Li    """
844*9c5db199SXin Li    Generates a client-side control file to run tests.
845*9c5db199SXin Li
846*9c5db199SXin Li    @param tests List of tests to run. See db_tests for more information.
847*9c5db199SXin Li    @param profilers List of profilers to activate during the job.
848*9c5db199SXin Li    @param client_control_file The contents of a client-side control file to
849*9c5db199SXin Li        run at the end of all tests.  If this is supplied, all tests must be
850*9c5db199SXin Li        client side.
851*9c5db199SXin Li        TODO: in the future we should support server control files directly
852*9c5db199SXin Li        to wrap with a kernel.  That'll require changing the parameter
853*9c5db199SXin Li        name and adding a boolean to indicate if it is a client or server
854*9c5db199SXin Li        control file.
855*9c5db199SXin Li    @param use_container unused argument today.  TODO: Enable containers
856*9c5db199SXin Li        on the host during a client side test.
857*9c5db199SXin Li    @param profile_only A boolean that indicates what default profile_only
858*9c5db199SXin Li        mode to use in the control file. Passing None will generate a
859*9c5db199SXin Li        control file that does not explcitly set the default mode at all.
860*9c5db199SXin Li    @param db_tests: if True, the test object can be found in the database
861*9c5db199SXin Li                     backing the test model. In this case, tests is a tuple
862*9c5db199SXin Li                     of test IDs which are used to retrieve the test objects
863*9c5db199SXin Li                     from the database. If False, tests is a tuple of test
864*9c5db199SXin Li                     dictionaries stored client-side in the AFE.
865*9c5db199SXin Li    @param test_source_build: Build to be used to retrieve test code. Default
866*9c5db199SXin Li                              to None.
867*9c5db199SXin Li
868*9c5db199SXin Li    @returns a dict with the following keys:
869*9c5db199SXin Li        control_file: str, The control file text.
870*9c5db199SXin Li        is_server: bool, is the control file a server-side control file?
871*9c5db199SXin Li        synch_count: How many machines the job uses per autoserv execution.
872*9c5db199SXin Li            synch_count == 1 means the job is asynchronous.
873*9c5db199SXin Li        dependencies: A list of the names of labels on which the job depends.
874*9c5db199SXin Li    """
875*9c5db199SXin Li    if not tests and not client_control_file:
876*9c5db199SXin Li        return dict(control_file='', is_server=False, synch_count=1,
877*9c5db199SXin Li                    dependencies=[])
878*9c5db199SXin Li
879*9c5db199SXin Li    cf_info, test_objects, profiler_objects = (
880*9c5db199SXin Li        rpc_utils.prepare_generate_control_file(tests, profilers,
881*9c5db199SXin Li                                                db_tests))
882*9c5db199SXin Li    cf_info['control_file'] = control_file_lib.generate_control(
883*9c5db199SXin Li        tests=test_objects, profilers=profiler_objects,
884*9c5db199SXin Li        is_server=cf_info['is_server'],
885*9c5db199SXin Li        client_control_file=client_control_file, profile_only=profile_only,
886*9c5db199SXin Li        test_source_build=test_source_build)
887*9c5db199SXin Li    return cf_info
888*9c5db199SXin Li
889*9c5db199SXin Li
890*9c5db199SXin Lidef create_job_page_handler(name, priority, control_file, control_type,
891*9c5db199SXin Li                            image=None, hostless=False, firmware_rw_build=None,
892*9c5db199SXin Li                            firmware_ro_build=None, test_source_build=None,
893*9c5db199SXin Li                            is_cloning=False, cheets_build=None, **kwargs):
894*9c5db199SXin Li    """\
895*9c5db199SXin Li    Create and enqueue a job.
896*9c5db199SXin Li
897*9c5db199SXin Li    @param name name of this job
898*9c5db199SXin Li    @param priority Integer priority of this job.  Higher is more important.
899*9c5db199SXin Li    @param control_file String contents of the control file.
900*9c5db199SXin Li    @param control_type Type of control file, Client or Server.
901*9c5db199SXin Li    @param image: ChromeOS build to be installed in the dut. Default to None.
902*9c5db199SXin Li    @param firmware_rw_build: Firmware build to update RW firmware. Default to
903*9c5db199SXin Li                              None, i.e., RW firmware will not be updated.
904*9c5db199SXin Li    @param firmware_ro_build: Firmware build to update RO firmware. Default to
905*9c5db199SXin Li                              None, i.e., RO firmware will not be updated.
906*9c5db199SXin Li    @param test_source_build: Build to be used to retrieve test code. Default
907*9c5db199SXin Li                              to None.
908*9c5db199SXin Li    @param is_cloning: True if creating a cloning job.
909*9c5db199SXin Li    @param cheets_build: ChromeOS Android build  to be installed in the dut.
910*9c5db199SXin Li                         Default to None. Cheets build will not be updated.
911*9c5db199SXin Li    @param kwargs extra args that will be required by create_suite_job or
912*9c5db199SXin Li                  create_job.
913*9c5db199SXin Li
914*9c5db199SXin Li    @returns The created Job id number.
915*9c5db199SXin Li    """
916*9c5db199SXin Li    test_args = {}
917*9c5db199SXin Li    if kwargs.get('args'):
918*9c5db199SXin Li        # args' format is: ['disable_sysinfo=False', 'fast=True', ...]
919*9c5db199SXin Li        args = kwargs.get('args')
920*9c5db199SXin Li        for arg in args:
921*9c5db199SXin Li            k, v = arg.split('=')[0], arg.split('=')[1]
922*9c5db199SXin Li            test_args[k] = v
923*9c5db199SXin Li
924*9c5db199SXin Li    if is_cloning:
925*9c5db199SXin Li        logging.info('Start to clone a new job')
926*9c5db199SXin Li        # When cloning a job, hosts and meta_hosts should not exist together,
927*9c5db199SXin Li        # which would cause host-scheduler to schedule two hqe jobs to one host
928*9c5db199SXin Li        # at the same time, and crash itself. Clear meta_hosts for this case.
929*9c5db199SXin Li        if kwargs.get('hosts') and kwargs.get('meta_hosts'):
930*9c5db199SXin Li            kwargs['meta_hosts'] = []
931*9c5db199SXin Li    else:
932*9c5db199SXin Li        logging.info('Start to create a new job')
933*9c5db199SXin Li    control_file = rpc_utils.encode_ascii(control_file)
934*9c5db199SXin Li    if not control_file:
935*9c5db199SXin Li        raise model_logic.ValidationError({
936*9c5db199SXin Li                'control_file' : "Control file cannot be empty"})
937*9c5db199SXin Li
938*9c5db199SXin Li    if image and hostless:
939*9c5db199SXin Li        builds = {}
940*9c5db199SXin Li        builds[provision.CROS_VERSION_PREFIX] = image
941*9c5db199SXin Li        if cheets_build:
942*9c5db199SXin Li            builds[provision.CROS_ANDROID_VERSION_PREFIX] = cheets_build
943*9c5db199SXin Li        if firmware_rw_build:
944*9c5db199SXin Li            builds[provision.FW_RW_VERSION_PREFIX] = firmware_rw_build
945*9c5db199SXin Li        if firmware_ro_build:
946*9c5db199SXin Li            builds[provision.FW_RO_VERSION_PREFIX] = firmware_ro_build
947*9c5db199SXin Li        return create_suite_job(
948*9c5db199SXin Li                name=name, control_file=control_file, priority=priority,
949*9c5db199SXin Li                builds=builds, test_source_build=test_source_build,
950*9c5db199SXin Li                is_cloning=is_cloning, test_args=test_args, **kwargs)
951*9c5db199SXin Li
952*9c5db199SXin Li    return create_job(name, priority, control_file, control_type, image=image,
953*9c5db199SXin Li                      hostless=hostless, test_args=test_args, **kwargs)
954*9c5db199SXin Li
955*9c5db199SXin Li
956*9c5db199SXin Li@rpc_utils.route_rpc_to_main
957*9c5db199SXin Lidef create_job(
958*9c5db199SXin Li        name,
959*9c5db199SXin Li        priority,
960*9c5db199SXin Li        control_file,
961*9c5db199SXin Li        control_type,
962*9c5db199SXin Li        hosts=(),
963*9c5db199SXin Li        meta_hosts=(),
964*9c5db199SXin Li        one_time_hosts=(),
965*9c5db199SXin Li        synch_count=None,
966*9c5db199SXin Li        is_template=False,
967*9c5db199SXin Li        timeout=None,
968*9c5db199SXin Li        timeout_mins=None,
969*9c5db199SXin Li        max_runtime_mins=None,
970*9c5db199SXin Li        run_verify=False,
971*9c5db199SXin Li        email_list='',
972*9c5db199SXin Li        dependencies=(),
973*9c5db199SXin Li        reboot_before=None,
974*9c5db199SXin Li        reboot_after=None,
975*9c5db199SXin Li        parse_failed_repair=None,
976*9c5db199SXin Li        hostless=False,
977*9c5db199SXin Li        keyvals=None,
978*9c5db199SXin Li        drone_set=None,
979*9c5db199SXin Li        image=None,
980*9c5db199SXin Li        parent_job_id=None,
981*9c5db199SXin Li        test_retry=0,
982*9c5db199SXin Li        run_reset=True,
983*9c5db199SXin Li        require_ssp=None,
984*9c5db199SXin Li        test_args=None,
985*9c5db199SXin Li        **kwargs):
986*9c5db199SXin Li    """\
987*9c5db199SXin Li    Create and enqueue a job.
988*9c5db199SXin Li
989*9c5db199SXin Li    @param name name of this job
990*9c5db199SXin Li    @param priority Integer priority of this job.  Higher is more important.
991*9c5db199SXin Li    @param control_file String contents of the control file.
992*9c5db199SXin Li    @param control_type Type of control file, Client or Server.
993*9c5db199SXin Li    @param synch_count How many machines the job uses per autoserv execution.
994*9c5db199SXin Li        synch_count == 1 means the job is asynchronous.  If an atomic group is
995*9c5db199SXin Li        given this value is treated as a minimum.
996*9c5db199SXin Li    @param is_template If true then create a template job.
997*9c5db199SXin Li    @param timeout Hours after this call returns until the job times out.
998*9c5db199SXin Li    @param timeout_mins Minutes after this call returns until the job times
999*9c5db199SXin Li        out.
1000*9c5db199SXin Li    @param max_runtime_mins Minutes from job starting time until job times out
1001*9c5db199SXin Li    @param run_verify Should the host be verified before running the test?
1002*9c5db199SXin Li    @param email_list String containing emails to mail when the job is done
1003*9c5db199SXin Li    @param dependencies List of label names on which this job depends
1004*9c5db199SXin Li    @param reboot_before Never, If dirty, or Always
1005*9c5db199SXin Li    @param reboot_after Never, If all tests passed, or Always
1006*9c5db199SXin Li    @param parse_failed_repair if true, results of failed repairs launched by
1007*9c5db199SXin Li        this job will be parsed as part of the job.
1008*9c5db199SXin Li    @param hostless if true, create a hostless job
1009*9c5db199SXin Li    @param keyvals dict of keyvals to associate with the job
1010*9c5db199SXin Li    @param hosts List of hosts to run job on.
1011*9c5db199SXin Li    @param meta_hosts List where each entry is a label name, and for each entry
1012*9c5db199SXin Li        one host will be chosen from that label to run the job on.
1013*9c5db199SXin Li    @param one_time_hosts List of hosts not in the database to run the job on.
1014*9c5db199SXin Li    @param drone_set The name of the drone set to run this test on.
1015*9c5db199SXin Li    @param image OS image to install before running job.
1016*9c5db199SXin Li    @param parent_job_id id of a job considered to be parent of created job.
1017*9c5db199SXin Li    @param test_retry DEPRECATED
1018*9c5db199SXin Li    @param run_reset Should the host be reset before running the test?
1019*9c5db199SXin Li    @param require_ssp Set to True to require server-side packaging to run the
1020*9c5db199SXin Li                       test. If it's set to None, drone will still try to run
1021*9c5db199SXin Li                       the server side with server-side packaging. If the
1022*9c5db199SXin Li                       autotest-server package doesn't exist for the build or
1023*9c5db199SXin Li                       image is not set, drone will run the test without server-
1024*9c5db199SXin Li                       side packaging. Default is None.
1025*9c5db199SXin Li    @param test_args A dict of args passed to be injected into control file.
1026*9c5db199SXin Li    @param kwargs extra keyword args. NOT USED.
1027*9c5db199SXin Li
1028*9c5db199SXin Li    @returns The created Job id number.
1029*9c5db199SXin Li    """
1030*9c5db199SXin Li    if test_args:
1031*9c5db199SXin Li        control_file = tools.inject_vars(test_args, control_file)
1032*9c5db199SXin Li    if image:
1033*9c5db199SXin Li        dependencies += (provision.image_version_to_label(image),)
1034*9c5db199SXin Li    return rpc_utils.create_job_common(
1035*9c5db199SXin Li            name=name,
1036*9c5db199SXin Li            priority=priority,
1037*9c5db199SXin Li            control_type=control_type,
1038*9c5db199SXin Li            control_file=control_file,
1039*9c5db199SXin Li            hosts=hosts,
1040*9c5db199SXin Li            meta_hosts=meta_hosts,
1041*9c5db199SXin Li            one_time_hosts=one_time_hosts,
1042*9c5db199SXin Li            synch_count=synch_count,
1043*9c5db199SXin Li            is_template=is_template,
1044*9c5db199SXin Li            timeout=timeout,
1045*9c5db199SXin Li            timeout_mins=timeout_mins,
1046*9c5db199SXin Li            max_runtime_mins=max_runtime_mins,
1047*9c5db199SXin Li            run_verify=run_verify,
1048*9c5db199SXin Li            email_list=email_list,
1049*9c5db199SXin Li            dependencies=dependencies,
1050*9c5db199SXin Li            reboot_before=reboot_before,
1051*9c5db199SXin Li            reboot_after=reboot_after,
1052*9c5db199SXin Li            parse_failed_repair=parse_failed_repair,
1053*9c5db199SXin Li            hostless=hostless,
1054*9c5db199SXin Li            keyvals=keyvals,
1055*9c5db199SXin Li            drone_set=drone_set,
1056*9c5db199SXin Li            parent_job_id=parent_job_id,
1057*9c5db199SXin Li            run_reset=run_reset,
1058*9c5db199SXin Li            require_ssp=require_ssp)
1059*9c5db199SXin Li
1060*9c5db199SXin Li
1061*9c5db199SXin Lidef abort_host_queue_entries(**filter_data):
1062*9c5db199SXin Li    """\
1063*9c5db199SXin Li    Abort a set of host queue entries.
1064*9c5db199SXin Li
1065*9c5db199SXin Li    @return: A list of dictionaries, each contains information
1066*9c5db199SXin Li             about an aborted HQE.
1067*9c5db199SXin Li    """
1068*9c5db199SXin Li    query = models.HostQueueEntry.query_objects(filter_data)
1069*9c5db199SXin Li
1070*9c5db199SXin Li    # Dont allow aborts on:
1071*9c5db199SXin Li    #   1. Jobs that have already completed (whether or not they were aborted)
1072*9c5db199SXin Li    #   2. Jobs that we have already been aborted (but may not have completed)
1073*9c5db199SXin Li    query = query.filter(complete=False).filter(aborted=False)
1074*9c5db199SXin Li    models.AclGroup.check_abort_permissions(query)
1075*9c5db199SXin Li    host_queue_entries = list(query.select_related())
1076*9c5db199SXin Li    rpc_utils.check_abort_synchronous_jobs(host_queue_entries)
1077*9c5db199SXin Li
1078*9c5db199SXin Li    models.HostQueueEntry.abort_host_queue_entries(host_queue_entries)
1079*9c5db199SXin Li    hqe_info = [{'HostQueueEntry': hqe.id, 'Job': hqe.job_id,
1080*9c5db199SXin Li                 'Job name': hqe.job.name} for hqe in host_queue_entries]
1081*9c5db199SXin Li    return hqe_info
1082*9c5db199SXin Li
1083*9c5db199SXin Li
1084*9c5db199SXin Lidef abort_special_tasks(**filter_data):
1085*9c5db199SXin Li    """\
1086*9c5db199SXin Li    Abort the special task, or tasks, specified in the filter.
1087*9c5db199SXin Li    """
1088*9c5db199SXin Li    query = models.SpecialTask.query_objects(filter_data)
1089*9c5db199SXin Li    special_tasks = query.filter(is_active=True)
1090*9c5db199SXin Li    for task in special_tasks:
1091*9c5db199SXin Li        task.abort()
1092*9c5db199SXin Li
1093*9c5db199SXin Li
1094*9c5db199SXin Lidef _call_special_tasks_on_hosts(task, hosts):
1095*9c5db199SXin Li    """\
1096*9c5db199SXin Li    Schedules a set of hosts for a special task.
1097*9c5db199SXin Li
1098*9c5db199SXin Li    @returns A list of hostnames that a special task was created for.
1099*9c5db199SXin Li    """
1100*9c5db199SXin Li    models.AclGroup.check_for_acl_violation_hosts(hosts)
1101*9c5db199SXin Li    shard_host_map = rpc_utils.bucket_hosts_by_shard(hosts)
1102*9c5db199SXin Li    if shard_host_map and not utils.is_shard():
1103*9c5db199SXin Li        raise ValueError('The following hosts are on shards, please '
1104*9c5db199SXin Li                         'follow the link to the shards and create jobs '
1105*9c5db199SXin Li                         'there instead. %s.' % shard_host_map)
1106*9c5db199SXin Li    for host in hosts:
1107*9c5db199SXin Li        models.SpecialTask.schedule_special_task(host, task)
1108*9c5db199SXin Li    return list(sorted(host.hostname for host in hosts))
1109*9c5db199SXin Li
1110*9c5db199SXin Li
1111*9c5db199SXin Lidef _forward_special_tasks_on_hosts(task, rpc, **filter_data):
1112*9c5db199SXin Li    """Forward special tasks to corresponding shards.
1113*9c5db199SXin Li
1114*9c5db199SXin Li    For main, when special tasks are fired on hosts that are sharded,
1115*9c5db199SXin Li    forward the RPC to corresponding shards.
1116*9c5db199SXin Li
1117*9c5db199SXin Li    For shard, create special task records in local DB.
1118*9c5db199SXin Li
1119*9c5db199SXin Li    @param task: Enum value of frontend.afe.models.SpecialTask.Task
1120*9c5db199SXin Li    @param rpc: RPC name to forward.
1121*9c5db199SXin Li    @param filter_data: Filter keywords to be used for DB query.
1122*9c5db199SXin Li
1123*9c5db199SXin Li    @return: A list of hostnames that a special task was created for.
1124*9c5db199SXin Li    """
1125*9c5db199SXin Li    hosts = models.Host.query_objects(filter_data)
1126*9c5db199SXin Li    shard_host_map = rpc_utils.bucket_hosts_by_shard(hosts)
1127*9c5db199SXin Li
1128*9c5db199SXin Li    # Filter out hosts on a shard from those on the main, forward
1129*9c5db199SXin Li    # rpcs to the shard with an additional hostname__in filter, and
1130*9c5db199SXin Li    # create a local SpecialTask for each remaining host.
1131*9c5db199SXin Li    if shard_host_map and not utils.is_shard():
1132*9c5db199SXin Li        hosts = [h for h in hosts if h.shard is None]
1133*9c5db199SXin Li        for shard, hostnames in shard_host_map.iteritems():
1134*9c5db199SXin Li
1135*9c5db199SXin Li            # The main client of this module is the frontend website, and
1136*9c5db199SXin Li            # it invokes it with an 'id' or an 'id__in' filter. Regardless,
1137*9c5db199SXin Li            # the 'hostname' filter should narrow down the list of hosts on
1138*9c5db199SXin Li            # each shard even though we supply all the ids in filter_data.
1139*9c5db199SXin Li            # This method uses hostname instead of id because it fits better
1140*9c5db199SXin Li            # with the overall architecture of redirection functions in
1141*9c5db199SXin Li            # rpc_utils.
1142*9c5db199SXin Li            shard_filter = filter_data.copy()
1143*9c5db199SXin Li            shard_filter['hostname__in'] = hostnames
1144*9c5db199SXin Li            rpc_utils.run_rpc_on_multiple_hostnames(
1145*9c5db199SXin Li                    rpc, [shard], **shard_filter)
1146*9c5db199SXin Li
1147*9c5db199SXin Li    # There is a race condition here if someone assigns a shard to one of these
1148*9c5db199SXin Li    # hosts before we create the task. The host will stay on the main if:
1149*9c5db199SXin Li    # 1. The host is not Ready
1150*9c5db199SXin Li    # 2. The host is Ready but has a task
1151*9c5db199SXin Li    # But if the host is Ready and doesn't have a task yet, it will get sent
1152*9c5db199SXin Li    # to the shard as we're creating a task here.
1153*9c5db199SXin Li
1154*9c5db199SXin Li    # Given that we only rarely verify Ready hosts it isn't worth putting this
1155*9c5db199SXin Li    # entire method in a transaction. The worst case scenario is that we have
1156*9c5db199SXin Li    # a verify running on a Ready host while the shard is using it, if the
1157*9c5db199SXin Li    # verify fails no subsequent tasks will be created against the host on the
1158*9c5db199SXin Li    # main, and verifies are safe enough that this is OK.
1159*9c5db199SXin Li    return _call_special_tasks_on_hosts(task, hosts)
1160*9c5db199SXin Li
1161*9c5db199SXin Li
1162*9c5db199SXin Lidef reverify_hosts(**filter_data):
1163*9c5db199SXin Li    """\
1164*9c5db199SXin Li    Schedules a set of hosts for verify.
1165*9c5db199SXin Li
1166*9c5db199SXin Li    @returns A list of hostnames that a verify task was created for.
1167*9c5db199SXin Li    """
1168*9c5db199SXin Li    return _forward_special_tasks_on_hosts(
1169*9c5db199SXin Li            models.SpecialTask.Task.VERIFY, 'reverify_hosts', **filter_data)
1170*9c5db199SXin Li
1171*9c5db199SXin Li
1172*9c5db199SXin Lidef repair_hosts(**filter_data):
1173*9c5db199SXin Li    """\
1174*9c5db199SXin Li    Schedules a set of hosts for repair.
1175*9c5db199SXin Li
1176*9c5db199SXin Li    @returns A list of hostnames that a repair task was created for.
1177*9c5db199SXin Li    """
1178*9c5db199SXin Li    return _forward_special_tasks_on_hosts(
1179*9c5db199SXin Li            models.SpecialTask.Task.REPAIR, 'repair_hosts', **filter_data)
1180*9c5db199SXin Li
1181*9c5db199SXin Li
1182*9c5db199SXin Lidef get_jobs(not_yet_run=False, running=False, finished=False,
1183*9c5db199SXin Li             suite=False, sub=False, standalone=False, **filter_data):
1184*9c5db199SXin Li    """\
1185*9c5db199SXin Li    Extra status filter args for get_jobs:
1186*9c5db199SXin Li    -not_yet_run: Include only jobs that have not yet started running.
1187*9c5db199SXin Li    -running: Include only jobs that have start running but for which not
1188*9c5db199SXin Li    all hosts have completed.
1189*9c5db199SXin Li    -finished: Include only jobs for which all hosts have completed (or
1190*9c5db199SXin Li    aborted).
1191*9c5db199SXin Li
1192*9c5db199SXin Li    Extra type filter args for get_jobs:
1193*9c5db199SXin Li    -suite: Include only jobs with child jobs.
1194*9c5db199SXin Li    -sub: Include only jobs with a parent job.
1195*9c5db199SXin Li    -standalone: Inlcude only jobs with no child or parent jobs.
1196*9c5db199SXin Li    At most one of these three fields should be specified.
1197*9c5db199SXin Li    """
1198*9c5db199SXin Li    extra_args = rpc_utils.extra_job_status_filters(not_yet_run,
1199*9c5db199SXin Li                                                    running,
1200*9c5db199SXin Li                                                    finished)
1201*9c5db199SXin Li    filter_data['extra_args'] = rpc_utils.extra_job_type_filters(extra_args,
1202*9c5db199SXin Li                                                                 suite,
1203*9c5db199SXin Li                                                                 sub,
1204*9c5db199SXin Li                                                                 standalone)
1205*9c5db199SXin Li    job_dicts = []
1206*9c5db199SXin Li    jobs = list(models.Job.query_objects(filter_data))
1207*9c5db199SXin Li    models.Job.objects.populate_relationships(jobs, models.Label,
1208*9c5db199SXin Li                                              'dependencies')
1209*9c5db199SXin Li    models.Job.objects.populate_relationships(jobs, models.JobKeyval, 'keyvals')
1210*9c5db199SXin Li    for job in jobs:
1211*9c5db199SXin Li        job_dict = job.get_object_dict()
1212*9c5db199SXin Li        job_dict['dependencies'] = ','.join(label.name
1213*9c5db199SXin Li                                            for label in job.dependencies)
1214*9c5db199SXin Li        job_dict['keyvals'] = dict((keyval.key, keyval.value)
1215*9c5db199SXin Li                                   for keyval in job.keyvals)
1216*9c5db199SXin Li        job_dicts.append(job_dict)
1217*9c5db199SXin Li    return rpc_utils.prepare_for_serialization(job_dicts)
1218*9c5db199SXin Li
1219*9c5db199SXin Li
1220*9c5db199SXin Lidef get_num_jobs(not_yet_run=False, running=False, finished=False,
1221*9c5db199SXin Li                 suite=False, sub=False, standalone=False,
1222*9c5db199SXin Li                 **filter_data):
1223*9c5db199SXin Li    """\
1224*9c5db199SXin Li    See get_jobs() for documentation of extra filter parameters.
1225*9c5db199SXin Li    """
1226*9c5db199SXin Li    extra_args = rpc_utils.extra_job_status_filters(not_yet_run,
1227*9c5db199SXin Li                                                    running,
1228*9c5db199SXin Li                                                    finished)
1229*9c5db199SXin Li    filter_data['extra_args'] = rpc_utils.extra_job_type_filters(extra_args,
1230*9c5db199SXin Li                                                                 suite,
1231*9c5db199SXin Li                                                                 sub,
1232*9c5db199SXin Li                                                                 standalone)
1233*9c5db199SXin Li    return models.Job.query_count(filter_data)
1234*9c5db199SXin Li
1235*9c5db199SXin Li
1236*9c5db199SXin Lidef get_jobs_summary(**filter_data):
1237*9c5db199SXin Li    """\
1238*9c5db199SXin Li    Like get_jobs(), but adds 'status_counts' and 'result_counts' field.
1239*9c5db199SXin Li
1240*9c5db199SXin Li    'status_counts' filed is a dictionary mapping status strings to the number
1241*9c5db199SXin Li    of hosts currently with that status, i.e. {'Queued' : 4, 'Running' : 2}.
1242*9c5db199SXin Li
1243*9c5db199SXin Li    'result_counts' field is piped to tko's rpc_interface and has the return
1244*9c5db199SXin Li    format specified under get_group_counts.
1245*9c5db199SXin Li    """
1246*9c5db199SXin Li    jobs = get_jobs(**filter_data)
1247*9c5db199SXin Li    ids = [job['id'] for job in jobs]
1248*9c5db199SXin Li    all_status_counts = models.Job.objects.get_status_counts(ids)
1249*9c5db199SXin Li    for job in jobs:
1250*9c5db199SXin Li        job['status_counts'] = all_status_counts[job['id']]
1251*9c5db199SXin Li        job['result_counts'] = tko_rpc_interface.get_status_counts(
1252*9c5db199SXin Li                ['afe_job_id', 'afe_job_id'],
1253*9c5db199SXin Li                header_groups=[['afe_job_id'], ['afe_job_id']],
1254*9c5db199SXin Li                **{'afe_job_id': job['id']})
1255*9c5db199SXin Li    return rpc_utils.prepare_for_serialization(jobs)
1256*9c5db199SXin Li
1257*9c5db199SXin Li
1258*9c5db199SXin Lidef get_info_for_clone(id, preserve_metahosts, queue_entry_filter_data=None):
1259*9c5db199SXin Li    """\
1260*9c5db199SXin Li    Retrieves all the information needed to clone a job.
1261*9c5db199SXin Li    """
1262*9c5db199SXin Li    job = models.Job.objects.get(id=id)
1263*9c5db199SXin Li    job_info = rpc_utils.get_job_info(job,
1264*9c5db199SXin Li                                      preserve_metahosts,
1265*9c5db199SXin Li                                      queue_entry_filter_data)
1266*9c5db199SXin Li
1267*9c5db199SXin Li    host_dicts = []
1268*9c5db199SXin Li    for host in job_info['hosts']:
1269*9c5db199SXin Li        host_dict = get_hosts(id=host.id)[0]
1270*9c5db199SXin Li        other_labels = host_dict['labels']
1271*9c5db199SXin Li        if host_dict['platform']:
1272*9c5db199SXin Li            other_labels.remove(host_dict['platform'])
1273*9c5db199SXin Li        host_dict['other_labels'] = ', '.join(other_labels)
1274*9c5db199SXin Li        host_dicts.append(host_dict)
1275*9c5db199SXin Li
1276*9c5db199SXin Li    for host in job_info['one_time_hosts']:
1277*9c5db199SXin Li        host_dict = dict(hostname=host.hostname,
1278*9c5db199SXin Li                         id=host.id,
1279*9c5db199SXin Li                         platform='(one-time host)',
1280*9c5db199SXin Li                         locked_text='')
1281*9c5db199SXin Li        host_dicts.append(host_dict)
1282*9c5db199SXin Li
1283*9c5db199SXin Li    # convert keys from Label objects to strings (names of labels)
1284*9c5db199SXin Li    meta_host_counts = dict((meta_host.name, count) for meta_host, count
1285*9c5db199SXin Li                            in job_info['meta_host_counts'].iteritems())
1286*9c5db199SXin Li
1287*9c5db199SXin Li    info = dict(job=job.get_object_dict(),
1288*9c5db199SXin Li                meta_host_counts=meta_host_counts,
1289*9c5db199SXin Li                hosts=host_dicts)
1290*9c5db199SXin Li    info['job']['dependencies'] = job_info['dependencies']
1291*9c5db199SXin Li    info['hostless'] = job_info['hostless']
1292*9c5db199SXin Li    info['drone_set'] = job.drone_set and job.drone_set.name
1293*9c5db199SXin Li
1294*9c5db199SXin Li    image = _get_image_for_job(job, job_info['hostless'])
1295*9c5db199SXin Li    if image:
1296*9c5db199SXin Li        info['job']['image'] = image
1297*9c5db199SXin Li
1298*9c5db199SXin Li    return rpc_utils.prepare_for_serialization(info)
1299*9c5db199SXin Li
1300*9c5db199SXin Li
1301*9c5db199SXin Lidef _get_image_for_job(job, hostless):
1302*9c5db199SXin Li    """Gets the image used for a job.
1303*9c5db199SXin Li
1304*9c5db199SXin Li    Gets the image used for an AFE job from the job's keyvals 'build' or
1305*9c5db199SXin Li    'builds'. If that fails, and the job is a hostless job, tries to
1306*9c5db199SXin Li    get the image from its control file attributes 'build' or 'builds'.
1307*9c5db199SXin Li
1308*9c5db199SXin Li    TODO(ntang): Needs to handle FAFT with two builds for ro/rw.
1309*9c5db199SXin Li
1310*9c5db199SXin Li    @param job      An AFE job object.
1311*9c5db199SXin Li    @param hostless Boolean indicating whether the job is hostless.
1312*9c5db199SXin Li
1313*9c5db199SXin Li    @returns The image build used for the job.
1314*9c5db199SXin Li    """
1315*9c5db199SXin Li    keyvals = job.keyval_dict()
1316*9c5db199SXin Li    image = keyvals.get('build')
1317*9c5db199SXin Li    if not image:
1318*9c5db199SXin Li        value = keyvals.get('builds')
1319*9c5db199SXin Li        builds = None
1320*9c5db199SXin Li        if isinstance(value, dict):
1321*9c5db199SXin Li            builds = value
1322*9c5db199SXin Li        elif isinstance(value, six.string_types):
1323*9c5db199SXin Li            builds = ast.literal_eval(value)
1324*9c5db199SXin Li        if builds:
1325*9c5db199SXin Li            image = builds.get('cros-version')
1326*9c5db199SXin Li    if not image and hostless and job.control_file:
1327*9c5db199SXin Li        try:
1328*9c5db199SXin Li            control_obj = control_data.parse_control_string(
1329*9c5db199SXin Li                    job.control_file)
1330*9c5db199SXin Li            if hasattr(control_obj, 'build'):
1331*9c5db199SXin Li                image = getattr(control_obj, 'build')
1332*9c5db199SXin Li            if not image and hasattr(control_obj, 'builds'):
1333*9c5db199SXin Li                builds = getattr(control_obj, 'builds')
1334*9c5db199SXin Li                image = builds.get('cros-version')
1335*9c5db199SXin Li        except:
1336*9c5db199SXin Li            logging.warning('Failed to parse control file for job: %s',
1337*9c5db199SXin Li                            job.name)
1338*9c5db199SXin Li    return image
1339*9c5db199SXin Li
1340*9c5db199SXin Li
1341*9c5db199SXin Lidef get_host_queue_entries_by_insert_time(
1342*9c5db199SXin Li    insert_time_after=None, insert_time_before=None, **filter_data):
1343*9c5db199SXin Li    """Like get_host_queue_entries, but using the insert index table.
1344*9c5db199SXin Li
1345*9c5db199SXin Li    @param insert_time_after: A lower bound on insert_time
1346*9c5db199SXin Li    @param insert_time_before: An upper bound on insert_time
1347*9c5db199SXin Li    @returns A sequence of nested dictionaries of host and job information.
1348*9c5db199SXin Li    """
1349*9c5db199SXin Li    assert insert_time_after is not None or insert_time_before is not None, \
1350*9c5db199SXin Li      ('Caller to get_host_queue_entries_by_insert_time must provide either'
1351*9c5db199SXin Li       ' insert_time_after or insert_time_before.')
1352*9c5db199SXin Li    # Get insert bounds on the index of the host queue entries.
1353*9c5db199SXin Li    if insert_time_after:
1354*9c5db199SXin Li        query = models.HostQueueEntryStartTimes.objects.filter(
1355*9c5db199SXin Li            # Note: '-insert_time' means descending. We want the largest
1356*9c5db199SXin Li            # insert time smaller than the insert time.
1357*9c5db199SXin Li            insert_time__lte=insert_time_after).order_by('-insert_time')
1358*9c5db199SXin Li        try:
1359*9c5db199SXin Li            constraint = query[0].highest_hqe_id
1360*9c5db199SXin Li            if 'id__gte' in filter_data:
1361*9c5db199SXin Li                constraint = max(constraint, filter_data['id__gte'])
1362*9c5db199SXin Li            filter_data['id__gte'] = constraint
1363*9c5db199SXin Li        except IndexError:
1364*9c5db199SXin Li            pass
1365*9c5db199SXin Li
1366*9c5db199SXin Li    # Get end bounds.
1367*9c5db199SXin Li    if insert_time_before:
1368*9c5db199SXin Li        query = models.HostQueueEntryStartTimes.objects.filter(
1369*9c5db199SXin Li            insert_time__gte=insert_time_before).order_by('insert_time')
1370*9c5db199SXin Li        try:
1371*9c5db199SXin Li            constraint = query[0].highest_hqe_id
1372*9c5db199SXin Li            if 'id__lte' in filter_data:
1373*9c5db199SXin Li                constraint = min(constraint, filter_data['id__lte'])
1374*9c5db199SXin Li            filter_data['id__lte'] = constraint
1375*9c5db199SXin Li        except IndexError:
1376*9c5db199SXin Li            pass
1377*9c5db199SXin Li
1378*9c5db199SXin Li    return rpc_utils.prepare_rows_as_nested_dicts(
1379*9c5db199SXin Li            models.HostQueueEntry.query_objects(filter_data),
1380*9c5db199SXin Li            ('host', 'job'))
1381*9c5db199SXin Li
1382*9c5db199SXin Li
1383*9c5db199SXin Lidef get_host_queue_entries(start_time=None, end_time=None, **filter_data):
1384*9c5db199SXin Li    """\
1385*9c5db199SXin Li    @returns A sequence of nested dictionaries of host and job information.
1386*9c5db199SXin Li    """
1387*9c5db199SXin Li    filter_data = rpc_utils.inject_times_to_filter('started_on__gte',
1388*9c5db199SXin Li                                                   'started_on__lte',
1389*9c5db199SXin Li                                                   start_time,
1390*9c5db199SXin Li                                                   end_time,
1391*9c5db199SXin Li                                                   **filter_data)
1392*9c5db199SXin Li    return rpc_utils.prepare_rows_as_nested_dicts(
1393*9c5db199SXin Li            models.HostQueueEntry.query_objects(filter_data),
1394*9c5db199SXin Li            ('host', 'job'))
1395*9c5db199SXin Li
1396*9c5db199SXin Li
1397*9c5db199SXin Lidef get_num_host_queue_entries(start_time=None, end_time=None, **filter_data):
1398*9c5db199SXin Li    """\
1399*9c5db199SXin Li    Get the number of host queue entries associated with this job.
1400*9c5db199SXin Li    """
1401*9c5db199SXin Li    filter_data = rpc_utils.inject_times_to_filter('started_on__gte',
1402*9c5db199SXin Li                                                   'started_on__lte',
1403*9c5db199SXin Li                                                   start_time,
1404*9c5db199SXin Li                                                   end_time,
1405*9c5db199SXin Li                                                   **filter_data)
1406*9c5db199SXin Li    return models.HostQueueEntry.query_count(filter_data)
1407*9c5db199SXin Li
1408*9c5db199SXin Li
1409*9c5db199SXin Lidef get_hqe_percentage_complete(**filter_data):
1410*9c5db199SXin Li    """
1411*9c5db199SXin Li    Computes the fraction of host queue entries matching the given filter data
1412*9c5db199SXin Li    that are complete.
1413*9c5db199SXin Li    """
1414*9c5db199SXin Li    query = models.HostQueueEntry.query_objects(filter_data)
1415*9c5db199SXin Li    complete_count = query.filter(complete=True).count()
1416*9c5db199SXin Li    total_count = query.count()
1417*9c5db199SXin Li    if total_count == 0:
1418*9c5db199SXin Li        return 1
1419*9c5db199SXin Li    return float(complete_count) / total_count
1420*9c5db199SXin Li
1421*9c5db199SXin Li
1422*9c5db199SXin Li# special tasks
1423*9c5db199SXin Li
1424*9c5db199SXin Lidef get_special_tasks(**filter_data):
1425*9c5db199SXin Li    """Get special task entries from the local database.
1426*9c5db199SXin Li
1427*9c5db199SXin Li    Query the special tasks table for tasks matching the given
1428*9c5db199SXin Li    `filter_data`, and return a list of the results.  No attempt is
1429*9c5db199SXin Li    made to forward the call to shards; the buck will stop here.
1430*9c5db199SXin Li    The caller is expected to know the target shard for such reasons
1431*9c5db199SXin Li    as:
1432*9c5db199SXin Li      * The caller is a service (such as gs_offloader) configured
1433*9c5db199SXin Li        to operate on behalf of one specific shard, and no other.
1434*9c5db199SXin Li      * The caller has a host as a parameter, and knows that this is
1435*9c5db199SXin Li        the shard assigned to that host.
1436*9c5db199SXin Li
1437*9c5db199SXin Li    @param filter_data  Filter keywords to pass to the underlying
1438*9c5db199SXin Li                        database query.
1439*9c5db199SXin Li
1440*9c5db199SXin Li    """
1441*9c5db199SXin Li    return rpc_utils.prepare_rows_as_nested_dicts(
1442*9c5db199SXin Li            models.SpecialTask.query_objects(filter_data),
1443*9c5db199SXin Li            ('host', 'queue_entry'))
1444*9c5db199SXin Li
1445*9c5db199SXin Li
1446*9c5db199SXin Lidef get_host_special_tasks(host_id, **filter_data):
1447*9c5db199SXin Li    """Get special task entries for a given host.
1448*9c5db199SXin Li
1449*9c5db199SXin Li    Query the special tasks table for tasks that ran on the host
1450*9c5db199SXin Li    given by `host_id` and matching the given `filter_data`.
1451*9c5db199SXin Li    Return a list of the results.  If the host is assigned to a
1452*9c5db199SXin Li    shard, forward this call to that shard.
1453*9c5db199SXin Li
1454*9c5db199SXin Li    @param host_id      Id in the database of the target host.
1455*9c5db199SXin Li    @param filter_data  Filter keywords to pass to the underlying
1456*9c5db199SXin Li                        database query.
1457*9c5db199SXin Li
1458*9c5db199SXin Li    """
1459*9c5db199SXin Li    # Retrieve host data even if the host is in an invalid state.
1460*9c5db199SXin Li    host = models.Host.smart_get(host_id, False)
1461*9c5db199SXin Li    if not host.shard:
1462*9c5db199SXin Li        return get_special_tasks(host_id=host_id, **filter_data)
1463*9c5db199SXin Li    else:
1464*9c5db199SXin Li        # The return values from AFE methods are post-processed
1465*9c5db199SXin Li        # objects that aren't JSON-serializable.  So, we have to
1466*9c5db199SXin Li        # call AFE.run() to get the raw, serializable output from
1467*9c5db199SXin Li        # the shard.
1468*9c5db199SXin Li        shard_afe = frontend.AFE(server=host.shard.hostname)
1469*9c5db199SXin Li        return shard_afe.run('get_special_tasks',
1470*9c5db199SXin Li                             host_id=host_id, **filter_data)
1471*9c5db199SXin Li
1472*9c5db199SXin Li
1473*9c5db199SXin Lidef get_num_special_tasks(**kwargs):
1474*9c5db199SXin Li    """Get the number of special task entries from the local database.
1475*9c5db199SXin Li
1476*9c5db199SXin Li    Query the special tasks table for tasks matching the given 'kwargs',
1477*9c5db199SXin Li    and return the number of the results. No attempt is made to forward
1478*9c5db199SXin Li    the call to shards; the buck will stop here.
1479*9c5db199SXin Li
1480*9c5db199SXin Li    @param kwargs    Filter keywords to pass to the underlying database query.
1481*9c5db199SXin Li
1482*9c5db199SXin Li    """
1483*9c5db199SXin Li    return models.SpecialTask.query_count(kwargs)
1484*9c5db199SXin Li
1485*9c5db199SXin Li
1486*9c5db199SXin Lidef get_host_num_special_tasks(host, **kwargs):
1487*9c5db199SXin Li    """Get special task entries for a given host.
1488*9c5db199SXin Li
1489*9c5db199SXin Li    Query the special tasks table for tasks that ran on the host
1490*9c5db199SXin Li    given by 'host' and matching the given 'kwargs'.
1491*9c5db199SXin Li    Return a list of the results.  If the host is assigned to a
1492*9c5db199SXin Li    shard, forward this call to that shard.
1493*9c5db199SXin Li
1494*9c5db199SXin Li    @param host      id or name of a host. More often a hostname.
1495*9c5db199SXin Li    @param kwargs    Filter keywords to pass to the underlying database query.
1496*9c5db199SXin Li
1497*9c5db199SXin Li    """
1498*9c5db199SXin Li    # Retrieve host data even if the host is in an invalid state.
1499*9c5db199SXin Li    host_model = models.Host.smart_get(host, False)
1500*9c5db199SXin Li    if not host_model.shard:
1501*9c5db199SXin Li        return get_num_special_tasks(host=host, **kwargs)
1502*9c5db199SXin Li    else:
1503*9c5db199SXin Li        shard_afe = frontend.AFE(server=host_model.shard.hostname)
1504*9c5db199SXin Li        return shard_afe.run('get_num_special_tasks', host=host, **kwargs)
1505*9c5db199SXin Li
1506*9c5db199SXin Li
1507*9c5db199SXin Lidef get_status_task(host_id, end_time):
1508*9c5db199SXin Li    """Get the "status task" for a host from the local shard.
1509*9c5db199SXin Li
1510*9c5db199SXin Li    Returns a single special task representing the given host's
1511*9c5db199SXin Li    "status task".  The status task is a completed special task that
1512*9c5db199SXin Li    identifies whether the corresponding host was working or broken
1513*9c5db199SXin Li    when it completed.  A successful task indicates a working host;
1514*9c5db199SXin Li    a failed task indicates broken.
1515*9c5db199SXin Li
1516*9c5db199SXin Li    This call will not be forward to a shard; the receiving server
1517*9c5db199SXin Li    must be the shard that owns the host.
1518*9c5db199SXin Li
1519*9c5db199SXin Li    @param host_id      Id in the database of the target host.
1520*9c5db199SXin Li    @param end_time     Time reference for the host's status.
1521*9c5db199SXin Li
1522*9c5db199SXin Li    @return A single task; its status (successful or not)
1523*9c5db199SXin Li            corresponds to the status of the host (working or
1524*9c5db199SXin Li            broken) at the given time.  If no task is found, return
1525*9c5db199SXin Li            `None`.
1526*9c5db199SXin Li
1527*9c5db199SXin Li    """
1528*9c5db199SXin Li    tasklist = rpc_utils.prepare_rows_as_nested_dicts(
1529*9c5db199SXin Li            status_history.get_status_task(host_id, end_time),
1530*9c5db199SXin Li            ('host', 'queue_entry'))
1531*9c5db199SXin Li    return tasklist[0] if tasklist else None
1532*9c5db199SXin Li
1533*9c5db199SXin Li
1534*9c5db199SXin Lidef get_host_status_task(host_id, end_time):
1535*9c5db199SXin Li    """Get the "status task" for a host from its owning shard.
1536*9c5db199SXin Li
1537*9c5db199SXin Li    Finds the given host's owning shard, and forwards to it a call
1538*9c5db199SXin Li    to `get_status_task()` (see above).
1539*9c5db199SXin Li
1540*9c5db199SXin Li    @param host_id      Id in the database of the target host.
1541*9c5db199SXin Li    @param end_time     Time reference for the host's status.
1542*9c5db199SXin Li
1543*9c5db199SXin Li    @return A single task; its status (successful or not)
1544*9c5db199SXin Li            corresponds to the status of the host (working or
1545*9c5db199SXin Li            broken) at the given time.  If no task is found, return
1546*9c5db199SXin Li            `None`.
1547*9c5db199SXin Li
1548*9c5db199SXin Li    """
1549*9c5db199SXin Li    host = models.Host.smart_get(host_id)
1550*9c5db199SXin Li    if not host.shard:
1551*9c5db199SXin Li        return get_status_task(host_id, end_time)
1552*9c5db199SXin Li    else:
1553*9c5db199SXin Li        # The return values from AFE methods are post-processed
1554*9c5db199SXin Li        # objects that aren't JSON-serializable.  So, we have to
1555*9c5db199SXin Li        # call AFE.run() to get the raw, serializable output from
1556*9c5db199SXin Li        # the shard.
1557*9c5db199SXin Li        shard_afe = frontend.AFE(server=host.shard.hostname)
1558*9c5db199SXin Li        return shard_afe.run('get_status_task',
1559*9c5db199SXin Li                             host_id=host_id, end_time=end_time)
1560*9c5db199SXin Li
1561*9c5db199SXin Li
1562*9c5db199SXin Lidef get_host_diagnosis_interval(host_id, end_time, success):
1563*9c5db199SXin Li    """Find a "diagnosis interval" for a given host.
1564*9c5db199SXin Li
1565*9c5db199SXin Li    A "diagnosis interval" identifies a start and end time where
1566*9c5db199SXin Li    the host went from "working" to "broken", or vice versa.  The
1567*9c5db199SXin Li    interval's starting time is the starting time of the last status
1568*9c5db199SXin Li    task with the old status; the end time is the finish time of the
1569*9c5db199SXin Li    first status task with the new status.
1570*9c5db199SXin Li
1571*9c5db199SXin Li    This routine finds the most recent diagnosis interval for the
1572*9c5db199SXin Li    given host prior to `end_time`, with a starting status matching
1573*9c5db199SXin Li    `success`.  If `success` is true, the interval will start with a
1574*9c5db199SXin Li    successful status task; if false the interval will start with a
1575*9c5db199SXin Li    failed status task.
1576*9c5db199SXin Li
1577*9c5db199SXin Li    @param host_id      Id in the database of the target host.
1578*9c5db199SXin Li    @param end_time     Time reference for the diagnosis interval.
1579*9c5db199SXin Li    @param success      Whether the diagnosis interval should start
1580*9c5db199SXin Li                        with a successful or failed status task.
1581*9c5db199SXin Li
1582*9c5db199SXin Li    @return A list of two strings.  The first is the timestamp for
1583*9c5db199SXin Li            the beginning of the interval; the second is the
1584*9c5db199SXin Li            timestamp for the end.  If the host has never changed
1585*9c5db199SXin Li            state, the list is empty.
1586*9c5db199SXin Li
1587*9c5db199SXin Li    """
1588*9c5db199SXin Li    host = models.Host.smart_get(host_id)
1589*9c5db199SXin Li    if not host.shard or utils.is_shard():
1590*9c5db199SXin Li        return status_history.get_diagnosis_interval(
1591*9c5db199SXin Li                host_id, end_time, success)
1592*9c5db199SXin Li    else:
1593*9c5db199SXin Li        shard_afe = frontend.AFE(server=host.shard.hostname)
1594*9c5db199SXin Li        return shard_afe.get_host_diagnosis_interval(
1595*9c5db199SXin Li                host_id, end_time, success)
1596*9c5db199SXin Li
1597*9c5db199SXin Li
1598*9c5db199SXin Li# support for host detail view
1599*9c5db199SXin Li
1600*9c5db199SXin Lidef get_host_queue_entries_and_special_tasks(host, query_start=None,
1601*9c5db199SXin Li                                             query_limit=None, start_time=None,
1602*9c5db199SXin Li                                             end_time=None):
1603*9c5db199SXin Li    """
1604*9c5db199SXin Li    @returns an interleaved list of HostQueueEntries and SpecialTasks,
1605*9c5db199SXin Li            in approximate run order.  each dict contains keys for type, host,
1606*9c5db199SXin Li            job, status, started_on, execution_path, and ID.
1607*9c5db199SXin Li    """
1608*9c5db199SXin Li    total_limit = None
1609*9c5db199SXin Li    if query_limit is not None:
1610*9c5db199SXin Li        total_limit = query_start + query_limit
1611*9c5db199SXin Li    filter_data_common = {'host': host,
1612*9c5db199SXin Li                          'query_limit': total_limit,
1613*9c5db199SXin Li                          'sort_by': ['-id']}
1614*9c5db199SXin Li
1615*9c5db199SXin Li    filter_data_special_tasks = rpc_utils.inject_times_to_filter(
1616*9c5db199SXin Li            'time_started__gte', 'time_started__lte', start_time, end_time,
1617*9c5db199SXin Li            **filter_data_common)
1618*9c5db199SXin Li
1619*9c5db199SXin Li    queue_entries = get_host_queue_entries(
1620*9c5db199SXin Li            start_time, end_time, **filter_data_common)
1621*9c5db199SXin Li    special_tasks = get_host_special_tasks(host, **filter_data_special_tasks)
1622*9c5db199SXin Li
1623*9c5db199SXin Li    interleaved_entries = rpc_utils.interleave_entries(queue_entries,
1624*9c5db199SXin Li                                                       special_tasks)
1625*9c5db199SXin Li    if query_start is not None:
1626*9c5db199SXin Li        interleaved_entries = interleaved_entries[query_start:]
1627*9c5db199SXin Li    if query_limit is not None:
1628*9c5db199SXin Li        interleaved_entries = interleaved_entries[:query_limit]
1629*9c5db199SXin Li    return rpc_utils.prepare_host_queue_entries_and_special_tasks(
1630*9c5db199SXin Li            interleaved_entries, queue_entries)
1631*9c5db199SXin Li
1632*9c5db199SXin Li
1633*9c5db199SXin Lidef get_num_host_queue_entries_and_special_tasks(host, start_time=None,
1634*9c5db199SXin Li                                                 end_time=None):
1635*9c5db199SXin Li    filter_data_common = {'host': host}
1636*9c5db199SXin Li
1637*9c5db199SXin Li    filter_data_queue_entries, filter_data_special_tasks = (
1638*9c5db199SXin Li            rpc_utils.inject_times_to_hqe_special_tasks_filters(
1639*9c5db199SXin Li                    filter_data_common, start_time, end_time))
1640*9c5db199SXin Li
1641*9c5db199SXin Li    return (models.HostQueueEntry.query_count(filter_data_queue_entries)
1642*9c5db199SXin Li            + get_host_num_special_tasks(**filter_data_special_tasks))
1643*9c5db199SXin Li
1644*9c5db199SXin Li
1645*9c5db199SXin Li# other
1646*9c5db199SXin Li
1647*9c5db199SXin Lidef echo(data=""):
1648*9c5db199SXin Li    """\
1649*9c5db199SXin Li    Returns a passed in string. For doing a basic test to see if RPC calls
1650*9c5db199SXin Li    can successfully be made.
1651*9c5db199SXin Li    """
1652*9c5db199SXin Li    return data
1653*9c5db199SXin Li
1654*9c5db199SXin Li
1655*9c5db199SXin Lidef get_motd():
1656*9c5db199SXin Li    """\
1657*9c5db199SXin Li    Returns the message of the day as a string.
1658*9c5db199SXin Li    """
1659*9c5db199SXin Li    return rpc_utils.get_motd()
1660*9c5db199SXin Li
1661*9c5db199SXin Li
1662*9c5db199SXin Lidef get_static_data():
1663*9c5db199SXin Li    """\
1664*9c5db199SXin Li    Returns a dictionary containing a bunch of data that shouldn't change
1665*9c5db199SXin Li    often and is otherwise inaccessible.  This includes:
1666*9c5db199SXin Li
1667*9c5db199SXin Li    priorities: List of job priority choices.
1668*9c5db199SXin Li    default_priority: Default priority value for new jobs.
1669*9c5db199SXin Li    users: Sorted list of all users.
1670*9c5db199SXin Li    labels: Sorted list of labels not start with 'cros-version' and
1671*9c5db199SXin Li            'fw-version'.
1672*9c5db199SXin Li    tests: Sorted list of all tests.
1673*9c5db199SXin Li    profilers: Sorted list of all profilers.
1674*9c5db199SXin Li    current_user: Logged-in username.
1675*9c5db199SXin Li    host_statuses: Sorted list of possible Host statuses.
1676*9c5db199SXin Li    job_statuses: Sorted list of possible HostQueueEntry statuses.
1677*9c5db199SXin Li    job_timeout_default: The default job timeout length in minutes.
1678*9c5db199SXin Li    parse_failed_repair_default: Default value for the parse_failed_repair job
1679*9c5db199SXin Li            option.
1680*9c5db199SXin Li    reboot_before_options: A list of valid RebootBefore string enums.
1681*9c5db199SXin Li    reboot_after_options: A list of valid RebootAfter string enums.
1682*9c5db199SXin Li    motd: Server's message of the day.
1683*9c5db199SXin Li    status_dictionary: A mapping from one word job status names to a more
1684*9c5db199SXin Li            informative description.
1685*9c5db199SXin Li    """
1686*9c5db199SXin Li
1687*9c5db199SXin Li    default_drone_set_name = models.DroneSet.default_drone_set_name()
1688*9c5db199SXin Li    drone_sets = ([default_drone_set_name] +
1689*9c5db199SXin Li                  sorted(drone_set.name for drone_set in
1690*9c5db199SXin Li                         models.DroneSet.objects.exclude(
1691*9c5db199SXin Li                                 name=default_drone_set_name)))
1692*9c5db199SXin Li
1693*9c5db199SXin Li    result = {}
1694*9c5db199SXin Li    result['priorities'] = priorities.Priority.choices()
1695*9c5db199SXin Li    result['default_priority'] = 'Default'
1696*9c5db199SXin Li    result['max_schedulable_priority'] = priorities.Priority.DEFAULT
1697*9c5db199SXin Li    result['users'] = get_users(sort_by=['login'])
1698*9c5db199SXin Li
1699*9c5db199SXin Li    label_exclude_filters = [{'name__startswith': 'cros-version'},
1700*9c5db199SXin Li                             {'name__startswith': 'fw-version'},
1701*9c5db199SXin Li                             {'name__startswith': 'fwrw-version'},
1702*9c5db199SXin Li                             {'name__startswith': 'fwro-version'}]
1703*9c5db199SXin Li    result['labels'] = get_labels(
1704*9c5db199SXin Li        label_exclude_filters,
1705*9c5db199SXin Li        sort_by=['-platform', 'name'])
1706*9c5db199SXin Li
1707*9c5db199SXin Li    result['tests'] = get_tests(sort_by=['name'])
1708*9c5db199SXin Li    result['profilers'] = get_profilers(sort_by=['name'])
1709*9c5db199SXin Li    result['current_user'] = rpc_utils.prepare_for_serialization(
1710*9c5db199SXin Li        models.User.current_user().get_object_dict())
1711*9c5db199SXin Li    result['host_statuses'] = sorted(models.Host.Status.names)
1712*9c5db199SXin Li    result['job_statuses'] = sorted(models.HostQueueEntry.Status.names)
1713*9c5db199SXin Li    result['job_timeout_mins_default'] = models.Job.DEFAULT_TIMEOUT_MINS
1714*9c5db199SXin Li    result['job_max_runtime_mins_default'] = (
1715*9c5db199SXin Li        models.Job.DEFAULT_MAX_RUNTIME_MINS)
1716*9c5db199SXin Li    result['parse_failed_repair_default'] = bool(
1717*9c5db199SXin Li        models.Job.DEFAULT_PARSE_FAILED_REPAIR)
1718*9c5db199SXin Li    result['reboot_before_options'] = model_attributes.RebootBefore.names
1719*9c5db199SXin Li    result['reboot_after_options'] = model_attributes.RebootAfter.names
1720*9c5db199SXin Li    result['motd'] = rpc_utils.get_motd()
1721*9c5db199SXin Li    result['drone_sets_enabled'] = models.DroneSet.drone_sets_enabled()
1722*9c5db199SXin Li    result['drone_sets'] = drone_sets
1723*9c5db199SXin Li
1724*9c5db199SXin Li    result['status_dictionary'] = {"Aborted": "Aborted",
1725*9c5db199SXin Li                                   "Verifying": "Verifying Host",
1726*9c5db199SXin Li                                   "Provisioning": "Provisioning Host",
1727*9c5db199SXin Li                                   "Pending": "Waiting on other hosts",
1728*9c5db199SXin Li                                   "Running": "Running autoserv",
1729*9c5db199SXin Li                                   "Completed": "Autoserv completed",
1730*9c5db199SXin Li                                   "Failed": "Failed to complete",
1731*9c5db199SXin Li                                   "Queued": "Queued",
1732*9c5db199SXin Li                                   "Starting": "Next in host's queue",
1733*9c5db199SXin Li                                   "Stopped": "Other host(s) failed verify",
1734*9c5db199SXin Li                                   "Parsing": "Awaiting parse of final results",
1735*9c5db199SXin Li                                   "Gathering": "Gathering log files",
1736*9c5db199SXin Li                                   "Waiting": "Waiting for scheduler action",
1737*9c5db199SXin Li                                   "Archiving": "Archiving results",
1738*9c5db199SXin Li                                   "Resetting": "Resetting hosts"}
1739*9c5db199SXin Li
1740*9c5db199SXin Li    result['wmatrix_url'] = rpc_utils.get_wmatrix_url()
1741*9c5db199SXin Li    result['stainless_url'] = rpc_utils.get_stainless_url()
1742*9c5db199SXin Li    result['is_moblab'] = bool(utils.is_moblab())
1743*9c5db199SXin Li
1744*9c5db199SXin Li    return result
1745*9c5db199SXin Li
1746*9c5db199SXin Li
1747*9c5db199SXin Lidef get_server_time():
1748*9c5db199SXin Li    return datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
1749*9c5db199SXin Li
1750*9c5db199SXin Li
1751*9c5db199SXin Lidef ping_db():
1752*9c5db199SXin Li    """Simple connection test to db"""
1753*9c5db199SXin Li    try:
1754*9c5db199SXin Li        db_connection.cursor()
1755*9c5db199SXin Li    except DatabaseError:
1756*9c5db199SXin Li        return [False]
1757*9c5db199SXin Li    return [True]
1758*9c5db199SXin Li
1759*9c5db199SXin Li
1760*9c5db199SXin Lidef get_hosts_by_attribute(attribute, value):
1761*9c5db199SXin Li    """
1762*9c5db199SXin Li    Get the list of valid hosts that share the same host attribute value.
1763*9c5db199SXin Li
1764*9c5db199SXin Li    @param attribute: String of the host attribute to check.
1765*9c5db199SXin Li    @param value: String of the value that is shared between hosts.
1766*9c5db199SXin Li
1767*9c5db199SXin Li    @returns List of hostnames that all have the same host attribute and
1768*9c5db199SXin Li             value.
1769*9c5db199SXin Li    """
1770*9c5db199SXin Li    rows = models.HostAttribute.query_objects({'attribute': attribute,
1771*9c5db199SXin Li                                               'value': value})
1772*9c5db199SXin Li    if RESPECT_STATIC_ATTRIBUTES:
1773*9c5db199SXin Li        returned_hosts = set()
1774*9c5db199SXin Li        # Add hosts:
1775*9c5db199SXin Li        #     * Non-valid
1776*9c5db199SXin Li        #     * Exist in afe_host_attribute with given attribute.
1777*9c5db199SXin Li        #     * Don't exist in afe_static_host_attribute OR exist in
1778*9c5db199SXin Li        #       afe_static_host_attribute with the same given value.
1779*9c5db199SXin Li        for row in rows:
1780*9c5db199SXin Li            if row.host.invalid != 0:
1781*9c5db199SXin Li                continue
1782*9c5db199SXin Li
1783*9c5db199SXin Li            static_hosts = models.StaticHostAttribute.query_objects(
1784*9c5db199SXin Li                {'host_id': row.host.id, 'attribute': attribute})
1785*9c5db199SXin Li            values = [static_host.value for static_host in static_hosts]
1786*9c5db199SXin Li            if len(values) == 0 or values[0] == value:
1787*9c5db199SXin Li                returned_hosts.add(row.host.hostname)
1788*9c5db199SXin Li
1789*9c5db199SXin Li        # Add hosts:
1790*9c5db199SXin Li        #     * Non-valid
1791*9c5db199SXin Li        #     * Exist in afe_static_host_attribute with given attribute
1792*9c5db199SXin Li        #       and value
1793*9c5db199SXin Li        #     * No need to check whether each static attribute has its
1794*9c5db199SXin Li        #       corresponding entry in afe_host_attribute since it is ensured
1795*9c5db199SXin Li        #       in inventory sync.
1796*9c5db199SXin Li        static_rows = models.StaticHostAttribute.query_objects(
1797*9c5db199SXin Li                {'attribute': attribute, 'value': value})
1798*9c5db199SXin Li        for row in static_rows:
1799*9c5db199SXin Li            if row.host.invalid != 0:
1800*9c5db199SXin Li                continue
1801*9c5db199SXin Li
1802*9c5db199SXin Li            returned_hosts.add(row.host.hostname)
1803*9c5db199SXin Li
1804*9c5db199SXin Li        return list(returned_hosts)
1805*9c5db199SXin Li    else:
1806*9c5db199SXin Li        return [row.host.hostname for row in rows if row.host.invalid == 0]
1807*9c5db199SXin Li
1808*9c5db199SXin Li
1809*9c5db199SXin Lidef _get_control_file_by_suite(suite_name):
1810*9c5db199SXin Li    """Get control file contents by suite name.
1811*9c5db199SXin Li
1812*9c5db199SXin Li    @param suite_name: Suite name as string.
1813*9c5db199SXin Li    @returns: Control file contents as string.
1814*9c5db199SXin Li    """
1815*9c5db199SXin Li    getter = control_file_getter.FileSystemGetter(
1816*9c5db199SXin Li            [_CONFIG.get_config_value('SCHEDULER',
1817*9c5db199SXin Li                                      'drone_installation_directory')])
1818*9c5db199SXin Li    return getter.get_control_file_contents_by_name(suite_name)
1819*9c5db199SXin Li
1820*9c5db199SXin Li
1821*9c5db199SXin Li@rpc_utils.route_rpc_to_main
1822*9c5db199SXin Lidef create_suite_job(
1823*9c5db199SXin Li        name='',
1824*9c5db199SXin Li        board='',
1825*9c5db199SXin Li        pool='',
1826*9c5db199SXin Li        child_dependencies=(),
1827*9c5db199SXin Li        control_file='',
1828*9c5db199SXin Li        check_hosts=True,
1829*9c5db199SXin Li        num=None,
1830*9c5db199SXin Li        file_bugs=False,
1831*9c5db199SXin Li        timeout=24,
1832*9c5db199SXin Li        timeout_mins=None,
1833*9c5db199SXin Li        priority=priorities.Priority.DEFAULT,
1834*9c5db199SXin Li        suite_args=None,
1835*9c5db199SXin Li        wait_for_results=True,
1836*9c5db199SXin Li        job_retry=False,
1837*9c5db199SXin Li        max_retries=None,
1838*9c5db199SXin Li        max_runtime_mins=None,
1839*9c5db199SXin Li        suite_min_duts=0,
1840*9c5db199SXin Li        offload_failures_only=False,
1841*9c5db199SXin Li        builds=None,
1842*9c5db199SXin Li        test_source_build=None,
1843*9c5db199SXin Li        run_prod_code=False,
1844*9c5db199SXin Li        delay_minutes=0,
1845*9c5db199SXin Li        is_cloning=False,
1846*9c5db199SXin Li        job_keyvals=None,
1847*9c5db199SXin Li        test_args=None,
1848*9c5db199SXin Li        **kwargs):
1849*9c5db199SXin Li    """
1850*9c5db199SXin Li    Create a job to run a test suite on the given device with the given image.
1851*9c5db199SXin Li
1852*9c5db199SXin Li    When the timeout specified in the control file is reached, the
1853*9c5db199SXin Li    job is guaranteed to have completed and results will be available.
1854*9c5db199SXin Li
1855*9c5db199SXin Li    @param name: The test name if control_file is supplied, otherwise the name
1856*9c5db199SXin Li                 of the test suite to run, e.g. 'bvt'.
1857*9c5db199SXin Li    @param board: the kind of device to run the tests on.
1858*9c5db199SXin Li    @param builds: the builds to install e.g.
1859*9c5db199SXin Li                   {'cros-version:': 'x86-alex-release/R18-1655.0.0',
1860*9c5db199SXin Li                    'fwrw-version:':  'x86-alex-firmware/R36-5771.50.0',
1861*9c5db199SXin Li                    'fwro-version:':  'x86-alex-firmware/R36-5771.49.0'}
1862*9c5db199SXin Li                   If builds is given a value, it overrides argument build.
1863*9c5db199SXin Li    @param test_source_build: Build that contains the server-side test code.
1864*9c5db199SXin Li    @param pool: Specify the pool of machines to use for scheduling
1865*9c5db199SXin Li            purposes.
1866*9c5db199SXin Li    @param child_dependencies: (optional) list of additional dependency labels
1867*9c5db199SXin Li            (strings) that will be added as dependency labels to child jobs.
1868*9c5db199SXin Li    @param control_file: the control file of the job.
1869*9c5db199SXin Li    @param check_hosts: require appropriate live hosts to exist in the lab.
1870*9c5db199SXin Li    @param num: Specify the number of machines to schedule across (integer).
1871*9c5db199SXin Li                Leave unspecified or use None to use default sharding factor.
1872*9c5db199SXin Li    @param file_bugs: File a bug on each test failure in this suite.
1873*9c5db199SXin Li    @param timeout: The max lifetime of this suite, in hours.
1874*9c5db199SXin Li    @param timeout_mins: The max lifetime of this suite, in minutes. Takes
1875*9c5db199SXin Li                         priority over timeout.
1876*9c5db199SXin Li    @param priority: Integer denoting priority. Higher is more important.
1877*9c5db199SXin Li    @param suite_args: Optional arguments which will be parsed by the suite
1878*9c5db199SXin Li                       control file. Used by control.test_that_wrapper to
1879*9c5db199SXin Li                       determine which tests to run.
1880*9c5db199SXin Li    @param wait_for_results: Set to False to run the suite job without waiting
1881*9c5db199SXin Li                             for test jobs to finish. Default is True.
1882*9c5db199SXin Li    @param job_retry: Set to True to enable job-level retry. Default is False.
1883*9c5db199SXin Li    @param max_retries: Integer, maximum job retries allowed at suite level.
1884*9c5db199SXin Li                        None for no max.
1885*9c5db199SXin Li    @param max_runtime_mins: Maximum amount of time a job can be running in
1886*9c5db199SXin Li                             minutes.
1887*9c5db199SXin Li    @param suite_min_duts: Integer. Scheduler will prioritize getting the
1888*9c5db199SXin Li                           minimum number of machines for the suite when it is
1889*9c5db199SXin Li                           competing with another suite that has a higher
1890*9c5db199SXin Li                           priority but already got minimum machines it needs.
1891*9c5db199SXin Li    @param offload_failures_only: Only enable gs_offloading for failed jobs.
1892*9c5db199SXin Li    @param run_prod_code: If True, the suite will run the test code that
1893*9c5db199SXin Li                          lives in prod aka the test code currently on the
1894*9c5db199SXin Li                          lab servers. If False, the control files and test
1895*9c5db199SXin Li                          code for this suite run will be retrieved from the
1896*9c5db199SXin Li                          build artifacts.
1897*9c5db199SXin Li    @param delay_minutes: Delay the creation of test jobs for a given number of
1898*9c5db199SXin Li                          minutes.
1899*9c5db199SXin Li    @param is_cloning: True if creating a cloning job.
1900*9c5db199SXin Li    @param job_keyvals: A dict of job keyvals to be inject to control file.
1901*9c5db199SXin Li    @param test_args: A dict of args passed all the way to each individual test
1902*9c5db199SXin Li                      that will be actually run.
1903*9c5db199SXin Li    @param kwargs: extra keyword args. NOT USED.
1904*9c5db199SXin Li
1905*9c5db199SXin Li    @raises ControlFileNotFound: if a unique suite control file doesn't exist.
1906*9c5db199SXin Li    @raises NoControlFileList: if we can't list the control files at all.
1907*9c5db199SXin Li    @raises StageControlFileFailure: If the dev server throws 500 while
1908*9c5db199SXin Li                                     staging test_suites.
1909*9c5db199SXin Li    @raises ControlFileEmpty: if the control file exists on the server, but
1910*9c5db199SXin Li                              can't be read.
1911*9c5db199SXin Li
1912*9c5db199SXin Li    @return: the job ID of the suite; -1 on error.
1913*9c5db199SXin Li    """
1914*9c5db199SXin Li    if num is not None:
1915*9c5db199SXin Li        warnings.warn('num is deprecated for create_suite_job')
1916*9c5db199SXin Li    del num
1917*9c5db199SXin Li
1918*9c5db199SXin Li    if builds is None:
1919*9c5db199SXin Li        builds = {}
1920*9c5db199SXin Li
1921*9c5db199SXin Li    # Default test source build to CrOS build if it's not specified and
1922*9c5db199SXin Li    # run_prod_code is set to False.
1923*9c5db199SXin Li    if not run_prod_code:
1924*9c5db199SXin Li        test_source_build = Suite.get_test_source_build(
1925*9c5db199SXin Li                builds, test_source_build=test_source_build)
1926*9c5db199SXin Li
1927*9c5db199SXin Li    sample_dut = rpc_utils.get_sample_dut(board, pool)
1928*9c5db199SXin Li
1929*9c5db199SXin Li    suite_name = suite_common.canonicalize_suite_name(name)
1930*9c5db199SXin Li    if run_prod_code:
1931*9c5db199SXin Li        ds = dev_server.resolve(test_source_build, hostname=sample_dut)
1932*9c5db199SXin Li        keyvals = {}
1933*9c5db199SXin Li    else:
1934*9c5db199SXin Li        ds, keyvals = suite_common.stage_build_artifacts(
1935*9c5db199SXin Li                test_source_build, hostname=sample_dut)
1936*9c5db199SXin Li    keyvals[constants.SUITE_MIN_DUTS_KEY] = suite_min_duts
1937*9c5db199SXin Li
1938*9c5db199SXin Li    # Do not change this naming convention without updating
1939*9c5db199SXin Li    # site_utils.parse_job_name.
1940*9c5db199SXin Li    if run_prod_code:
1941*9c5db199SXin Li        # If run_prod_code is True, test_source_build is not set, use the
1942*9c5db199SXin Li        # first build in the builds list for the sutie job name.
1943*9c5db199SXin Li        name = '%s-%s' % (builds.values()[0], suite_name)
1944*9c5db199SXin Li    else:
1945*9c5db199SXin Li        name = '%s-%s' % (test_source_build, suite_name)
1946*9c5db199SXin Li
1947*9c5db199SXin Li    timeout_mins = timeout_mins or timeout * 60
1948*9c5db199SXin Li    max_runtime_mins = max_runtime_mins or timeout * 60
1949*9c5db199SXin Li
1950*9c5db199SXin Li    if not board:
1951*9c5db199SXin Li        board = utils.ParseBuildName(builds[provision.CROS_VERSION_PREFIX])[0]
1952*9c5db199SXin Li
1953*9c5db199SXin Li    if run_prod_code:
1954*9c5db199SXin Li        control_file = _get_control_file_by_suite(suite_name)
1955*9c5db199SXin Li
1956*9c5db199SXin Li    if not control_file:
1957*9c5db199SXin Li        # No control file was supplied so look it up from the build artifacts.
1958*9c5db199SXin Li        control_file = suite_common.get_control_file_by_build(
1959*9c5db199SXin Li                test_source_build, ds, suite_name)
1960*9c5db199SXin Li
1961*9c5db199SXin Li    # Prepend builds and board to the control file.
1962*9c5db199SXin Li    if is_cloning:
1963*9c5db199SXin Li        control_file = tools.remove_injection(control_file)
1964*9c5db199SXin Li
1965*9c5db199SXin Li    if suite_args is None:
1966*9c5db199SXin Li        suite_args = dict()
1967*9c5db199SXin Li
1968*9c5db199SXin Li    inject_dict = {
1969*9c5db199SXin Li        'board': board,
1970*9c5db199SXin Li        # `build` is needed for suites like AU to stage image inside suite
1971*9c5db199SXin Li        # control file.
1972*9c5db199SXin Li        'build': test_source_build,
1973*9c5db199SXin Li        'builds': builds,
1974*9c5db199SXin Li        'check_hosts': check_hosts,
1975*9c5db199SXin Li        'pool': pool,
1976*9c5db199SXin Li        'child_dependencies': child_dependencies,
1977*9c5db199SXin Li        'file_bugs': file_bugs,
1978*9c5db199SXin Li        'timeout': timeout,
1979*9c5db199SXin Li        'timeout_mins': timeout_mins,
1980*9c5db199SXin Li        'devserver_url': ds.url(),
1981*9c5db199SXin Li        'priority': priority,
1982*9c5db199SXin Li        'wait_for_results': wait_for_results,
1983*9c5db199SXin Li        'job_retry': job_retry,
1984*9c5db199SXin Li        'max_retries': max_retries,
1985*9c5db199SXin Li        'max_runtime_mins': max_runtime_mins,
1986*9c5db199SXin Li        'offload_failures_only': offload_failures_only,
1987*9c5db199SXin Li        'test_source_build': test_source_build,
1988*9c5db199SXin Li        'run_prod_code': run_prod_code,
1989*9c5db199SXin Li        'delay_minutes': delay_minutes,
1990*9c5db199SXin Li        'job_keyvals': job_keyvals,
1991*9c5db199SXin Li        'test_args': test_args,
1992*9c5db199SXin Li    }
1993*9c5db199SXin Li    inject_dict.update(suite_args)
1994*9c5db199SXin Li    control_file = tools.inject_vars(inject_dict, control_file)
1995*9c5db199SXin Li
1996*9c5db199SXin Li    return rpc_utils.create_job_common(name,
1997*9c5db199SXin Li                                       priority=priority,
1998*9c5db199SXin Li                                       timeout_mins=timeout_mins,
1999*9c5db199SXin Li                                       max_runtime_mins=max_runtime_mins,
2000*9c5db199SXin Li                                       control_type='Server',
2001*9c5db199SXin Li                                       control_file=control_file,
2002*9c5db199SXin Li                                       hostless=True,
2003*9c5db199SXin Li                                       keyvals=keyvals)
2004*9c5db199SXin Li
2005*9c5db199SXin Li
2006*9c5db199SXin Lidef get_job_history(**filter_data):
2007*9c5db199SXin Li    """Get history of the job, including the special tasks executed for the job
2008*9c5db199SXin Li
2009*9c5db199SXin Li    @param filter_data: filter for the call, should at least include
2010*9c5db199SXin Li                        {'job_id': [job id]}
2011*9c5db199SXin Li    @returns: JSON string of the job's history, including the information such
2012*9c5db199SXin Li              as the hosts run the job and the special tasks executed before
2013*9c5db199SXin Li              and after the job.
2014*9c5db199SXin Li    """
2015*9c5db199SXin Li    job_id = filter_data['job_id']
2016*9c5db199SXin Li    job_info = job_history.get_job_info(job_id)
2017*9c5db199SXin Li    return rpc_utils.prepare_for_serialization(job_info.get_history())
2018*9c5db199SXin Li
2019*9c5db199SXin Li
2020*9c5db199SXin Lidef shard_heartbeat(shard_hostname, jobs=(), hqes=(), known_job_ids=(),
2021*9c5db199SXin Li                    known_host_ids=(), known_host_statuses=()):
2022*9c5db199SXin Li    """Receive updates for job statuses from shards and assign hosts and jobs.
2023*9c5db199SXin Li
2024*9c5db199SXin Li    @param shard_hostname: Hostname of the calling shard
2025*9c5db199SXin Li    @param jobs: Jobs in serialized form that should be updated with newer
2026*9c5db199SXin Li                 status from a shard.
2027*9c5db199SXin Li    @param hqes: Hostqueueentries in serialized form that should be updated with
2028*9c5db199SXin Li                 newer status from a shard. Note that for every hostqueueentry
2029*9c5db199SXin Li                 the corresponding job must be in jobs.
2030*9c5db199SXin Li    @param known_job_ids: List of ids of jobs the shard already has.
2031*9c5db199SXin Li    @param known_host_ids: List of ids of hosts the shard already has.
2032*9c5db199SXin Li    @param known_host_statuses: List of statuses of hosts the shard already has.
2033*9c5db199SXin Li
2034*9c5db199SXin Li    @returns: Serialized representations of hosts, jobs, suite job keyvals
2035*9c5db199SXin Li              and their dependencies to be inserted into a shard's database.
2036*9c5db199SXin Li    """
2037*9c5db199SXin Li    # The following alternatives to sending host and job ids in every heartbeat
2038*9c5db199SXin Li    # have been considered:
2039*9c5db199SXin Li    # 1. Sending the highest known job and host ids. This would work for jobs:
2040*9c5db199SXin Li    #    Newer jobs always have larger ids. Also, if a job is not assigned to a
2041*9c5db199SXin Li    #    particular shard during a heartbeat, it never will be assigned to this
2042*9c5db199SXin Li    #    shard later.
2043*9c5db199SXin Li    #    This is not true for hosts though: A host that is leased won't be sent
2044*9c5db199SXin Li    #    to the shard now, but might be sent in a future heartbeat. This means
2045*9c5db199SXin Li    #    sometimes hosts should be transfered that have a lower id than the
2046*9c5db199SXin Li    #    maximum host id the shard knows.
2047*9c5db199SXin Li    # 2. Send the number of jobs/hosts the shard knows to the main in each
2048*9c5db199SXin Li    #    heartbeat. Compare these to the number of records that already have
2049*9c5db199SXin Li    #    the shard_id set to this shard. In the normal case, they should match.
2050*9c5db199SXin Li    #    In case they don't, resend all entities of that type.
2051*9c5db199SXin Li    #    This would work well for hosts, because there aren't that many.
2052*9c5db199SXin Li    #    Resending all jobs is quite a big overhead though.
2053*9c5db199SXin Li    #    Also, this approach might run into edge cases when entities are
2054*9c5db199SXin Li    #    ever deleted.
2055*9c5db199SXin Li    # 3. Mixtures of the above: Use 1 for jobs and 2 for hosts.
2056*9c5db199SXin Li    #    Using two different approaches isn't consistent and might cause
2057*9c5db199SXin Li    #    confusion. Also the issues with the case of deletions might still
2058*9c5db199SXin Li    #    occur.
2059*9c5db199SXin Li    #
2060*9c5db199SXin Li    # The overhead of sending all job and host ids in every heartbeat is low:
2061*9c5db199SXin Li    # At peaks one board has about 1200 created but unfinished jobs.
2062*9c5db199SXin Li    # See the numbers here: http://goo.gl/gQCGWH
2063*9c5db199SXin Li    # Assuming that job id's have 6 digits and that json serialization takes a
2064*9c5db199SXin Li    # comma and a space as overhead, the traffic per id sent is about 8 bytes.
2065*9c5db199SXin Li    # If 5000 ids need to be sent, this means 40 kilobytes of traffic.
2066*9c5db199SXin Li    # A NOT IN query with 5000 ids took about 30ms in tests made.
2067*9c5db199SXin Li    # These numbers seem low enough to outweigh the disadvantages of the
2068*9c5db199SXin Li    # solutions described above.
2069*9c5db199SXin Li    shard_obj = rpc_utils.retrieve_shard(shard_hostname=shard_hostname)
2070*9c5db199SXin Li    rpc_utils.persist_records_sent_from_shard(shard_obj, jobs, hqes)
2071*9c5db199SXin Li    assert len(known_host_ids) == len(known_host_statuses)
2072*9c5db199SXin Li    for i in range(len(known_host_ids)):
2073*9c5db199SXin Li        host_model = models.Host.objects.get(pk=known_host_ids[i])
2074*9c5db199SXin Li        if host_model.status != known_host_statuses[i]:
2075*9c5db199SXin Li            host_model.status = known_host_statuses[i]
2076*9c5db199SXin Li            host_model.save()
2077*9c5db199SXin Li
2078*9c5db199SXin Li    hosts, jobs, suite_keyvals, inc_ids = rpc_utils.find_records_for_shard(
2079*9c5db199SXin Li            shard_obj, known_job_ids=known_job_ids,
2080*9c5db199SXin Li            known_host_ids=known_host_ids)
2081*9c5db199SXin Li    return {
2082*9c5db199SXin Li        'hosts': [host.serialize() for host in hosts],
2083*9c5db199SXin Li        'jobs': [job.serialize() for job in jobs],
2084*9c5db199SXin Li        'suite_keyvals': [kv.serialize() for kv in suite_keyvals],
2085*9c5db199SXin Li        'incorrect_host_ids': [int(i) for i in inc_ids],
2086*9c5db199SXin Li    }
2087*9c5db199SXin Li
2088*9c5db199SXin Li
2089*9c5db199SXin Lidef get_shards(**filter_data):
2090*9c5db199SXin Li    """Return a list of all shards.
2091*9c5db199SXin Li
2092*9c5db199SXin Li    @returns A sequence of nested dictionaries of shard information.
2093*9c5db199SXin Li    """
2094*9c5db199SXin Li    shards = models.Shard.query_objects(filter_data)
2095*9c5db199SXin Li    serialized_shards = rpc_utils.prepare_rows_as_nested_dicts(shards, ())
2096*9c5db199SXin Li    for serialized, shard in zip(serialized_shards, shards):
2097*9c5db199SXin Li        serialized['labels'] = [label.name for label in shard.labels.all()]
2098*9c5db199SXin Li
2099*9c5db199SXin Li    return serialized_shards
2100*9c5db199SXin Li
2101*9c5db199SXin Li
2102*9c5db199SXin Lidef _assign_board_to_shard_precheck(labels):
2103*9c5db199SXin Li    """Verify whether board labels are valid to be added to a given shard.
2104*9c5db199SXin Li
2105*9c5db199SXin Li    First check whether board label is in correct format. Second, check whether
2106*9c5db199SXin Li    the board label exist. Third, check whether the board has already been
2107*9c5db199SXin Li    assigned to shard.
2108*9c5db199SXin Li
2109*9c5db199SXin Li    @param labels: Board labels separated by comma.
2110*9c5db199SXin Li
2111*9c5db199SXin Li    @raises error.RPCException: If label provided doesn't start with `board:`
2112*9c5db199SXin Li            or board has been added to shard already.
2113*9c5db199SXin Li    @raises models.Label.DoesNotExist: If the label specified doesn't exist.
2114*9c5db199SXin Li
2115*9c5db199SXin Li    @returns: A list of label models that ready to be added to shard.
2116*9c5db199SXin Li    """
2117*9c5db199SXin Li    if not labels:
2118*9c5db199SXin Li        # allow creation of label-less shards (labels='' would otherwise fail the
2119*9c5db199SXin Li        # checks below)
2120*9c5db199SXin Li        return []
2121*9c5db199SXin Li    labels = labels.split(',')
2122*9c5db199SXin Li    label_models = []
2123*9c5db199SXin Li    for label in labels:
2124*9c5db199SXin Li        # Check whether the board label is in correct format.
2125*9c5db199SXin Li        if not label.startswith('board:'):
2126*9c5db199SXin Li            raise error.RPCException('Sharding only supports `board:.*` label.')
2127*9c5db199SXin Li        # Check whether the board label exist. If not, exception will be thrown
2128*9c5db199SXin Li        # by smart_get function.
2129*9c5db199SXin Li        label = models.Label.smart_get(label)
2130*9c5db199SXin Li        # Check whether the board has been sharded already
2131*9c5db199SXin Li        try:
2132*9c5db199SXin Li            shard = models.Shard.objects.get(labels=label)
2133*9c5db199SXin Li            raise error.RPCException(
2134*9c5db199SXin Li                    '%s is already on shard %s' % (label, shard.hostname))
2135*9c5db199SXin Li        except models.Shard.DoesNotExist:
2136*9c5db199SXin Li            # board is not on any shard, so it's valid.
2137*9c5db199SXin Li            label_models.append(label)
2138*9c5db199SXin Li    return label_models
2139*9c5db199SXin Li
2140*9c5db199SXin Li
2141*9c5db199SXin Lidef add_shard(hostname, labels):
2142*9c5db199SXin Li    """Add a shard and start running jobs on it.
2143*9c5db199SXin Li
2144*9c5db199SXin Li    @param hostname: The hostname of the shard to be added; needs to be unique.
2145*9c5db199SXin Li    @param labels: Board labels separated by comma. Jobs of one of the labels
2146*9c5db199SXin Li                   will be assigned to the shard.
2147*9c5db199SXin Li
2148*9c5db199SXin Li    @raises error.RPCException: If label provided doesn't start with `board:` or
2149*9c5db199SXin Li            board has been added to shard already.
2150*9c5db199SXin Li    @raises model_logic.ValidationError: If a shard with the given hostname
2151*9c5db199SXin Li            already exist.
2152*9c5db199SXin Li    @raises models.Label.DoesNotExist: If the label specified doesn't exist.
2153*9c5db199SXin Li
2154*9c5db199SXin Li    @returns: The id of the added shard.
2155*9c5db199SXin Li    """
2156*9c5db199SXin Li    labels = _assign_board_to_shard_precheck(labels)
2157*9c5db199SXin Li    shard = models.Shard.add_object(hostname=hostname)
2158*9c5db199SXin Li    for label in labels:
2159*9c5db199SXin Li        shard.labels.add(label)
2160*9c5db199SXin Li    return shard.id
2161*9c5db199SXin Li
2162*9c5db199SXin Li
2163*9c5db199SXin Lidef add_board_to_shard(hostname, labels):
2164*9c5db199SXin Li    """Add boards to a given shard
2165*9c5db199SXin Li
2166*9c5db199SXin Li    @param hostname: The hostname of the shard to be changed.
2167*9c5db199SXin Li    @param labels: Board labels separated by comma.
2168*9c5db199SXin Li
2169*9c5db199SXin Li    @raises error.RPCException: If label provided doesn't start with `board:` or
2170*9c5db199SXin Li            board has been added to shard already.
2171*9c5db199SXin Li    @raises models.Label.DoesNotExist: If the label specified doesn't exist.
2172*9c5db199SXin Li
2173*9c5db199SXin Li    @returns: The id of the changed shard.
2174*9c5db199SXin Li    """
2175*9c5db199SXin Li    labels = _assign_board_to_shard_precheck(labels)
2176*9c5db199SXin Li    shard = models.Shard.objects.get(hostname=hostname)
2177*9c5db199SXin Li    for label in labels:
2178*9c5db199SXin Li        shard.labels.add(label)
2179*9c5db199SXin Li    return shard.id
2180*9c5db199SXin Li
2181*9c5db199SXin Li
2182*9c5db199SXin Li# Remove board RPCs are rare, so we can afford to make them a bit more
2183*9c5db199SXin Li# expensive (by performing in a transaction) in order to guarantee
2184*9c5db199SXin Li# atomicity.
2185*9c5db199SXin Li@transaction.commit_on_success
2186*9c5db199SXin Lidef remove_board_from_shard(hostname, label):
2187*9c5db199SXin Li    """Remove board from the given shard.
2188*9c5db199SXin Li    @param hostname: The hostname of the shard to be changed.
2189*9c5db199SXin Li    @param labels: Board label.
2190*9c5db199SXin Li
2191*9c5db199SXin Li    @raises models.Label.DoesNotExist: If the label specified doesn't exist.
2192*9c5db199SXin Li
2193*9c5db199SXin Li    @returns: The id of the changed shard.
2194*9c5db199SXin Li    """
2195*9c5db199SXin Li    shard = models.Shard.objects.get(hostname=hostname)
2196*9c5db199SXin Li    label = models.Label.smart_get(label)
2197*9c5db199SXin Li    if label not in shard.labels.all():
2198*9c5db199SXin Li        raise error.RPCException(
2199*9c5db199SXin Li          'Cannot remove label from shard that does not belong to it.')
2200*9c5db199SXin Li
2201*9c5db199SXin Li    shard.labels.remove(label)
2202*9c5db199SXin Li    if label.is_replaced_by_static():
2203*9c5db199SXin Li        static_label = models.StaticLabel.smart_get(label.name)
2204*9c5db199SXin Li        models.Host.objects.filter(
2205*9c5db199SXin Li                static_labels__in=[static_label]).update(shard=None)
2206*9c5db199SXin Li    else:
2207*9c5db199SXin Li        models.Host.objects.filter(labels__in=[label]).update(shard=None)
2208*9c5db199SXin Li
2209*9c5db199SXin Li
2210*9c5db199SXin Lidef delete_shard(hostname):
2211*9c5db199SXin Li    """Delete a shard and reclaim all resources from it.
2212*9c5db199SXin Li
2213*9c5db199SXin Li    This claims back all assigned hosts from the shard.
2214*9c5db199SXin Li    """
2215*9c5db199SXin Li    shard = rpc_utils.retrieve_shard(shard_hostname=hostname)
2216*9c5db199SXin Li
2217*9c5db199SXin Li    # Remove shard information.
2218*9c5db199SXin Li    models.Host.objects.filter(shard=shard).update(shard=None)
2219*9c5db199SXin Li
2220*9c5db199SXin Li    # Note: The original job-cleanup query was performed with django call
2221*9c5db199SXin Li    #   models.Job.objects.filter(shard=shard).update(shard=None)
2222*9c5db199SXin Li    #
2223*9c5db199SXin Li    # But that started becoming unreliable due to the large size of afe_jobs.
2224*9c5db199SXin Li    #
2225*9c5db199SXin Li    # We don't need atomicity here, so the new cleanup method is iterative, in
2226*9c5db199SXin Li    # chunks of 100k jobs.
2227*9c5db199SXin Li    QUERY = ('UPDATE afe_jobs SET shard_id = NULL WHERE shard_id = %s '
2228*9c5db199SXin Li             'LIMIT 100000')
2229*9c5db199SXin Li    try:
2230*9c5db199SXin Li        with contextlib.closing(db_connection.cursor()) as cursor:
2231*9c5db199SXin Li            clear_jobs = True
2232*9c5db199SXin Li            assert shard.id is not None
2233*9c5db199SXin Li            while clear_jobs:
2234*9c5db199SXin Li                cursor.execute(QUERY % shard.id)
2235*9c5db199SXin Li                clear_jobs = bool(cursor.fetchone())
2236*9c5db199SXin Li    # Unit tests use sqlite backend instead of MySQL. sqlite does not support
2237*9c5db199SXin Li    # UPDATE ... LIMIT, so fall back to the old behavior.
2238*9c5db199SXin Li    except DatabaseError as e:
2239*9c5db199SXin Li        if 'syntax error' in str(e):
2240*9c5db199SXin Li            models.Job.objects.filter(shard=shard).update(shard=None)
2241*9c5db199SXin Li        else:
2242*9c5db199SXin Li            raise
2243*9c5db199SXin Li
2244*9c5db199SXin Li    shard.labels.clear()
2245*9c5db199SXin Li    shard.delete()
2246*9c5db199SXin Li
2247*9c5db199SXin Li
2248*9c5db199SXin Lidef get_servers(hostname=None, role=None, status=None):
2249*9c5db199SXin Li    """Get a list of servers with matching role and status.
2250*9c5db199SXin Li
2251*9c5db199SXin Li    @param hostname: FQDN of the server.
2252*9c5db199SXin Li    @param role: Name of the server role, e.g., drone, scheduler. Default to
2253*9c5db199SXin Li                 None to match any role.
2254*9c5db199SXin Li    @param status: Status of the server, e.g., primary, backup, repair_required.
2255*9c5db199SXin Li                   Default to None to match any server status.
2256*9c5db199SXin Li
2257*9c5db199SXin Li    @raises error.RPCException: If server database is not used.
2258*9c5db199SXin Li    @return: A list of server names for servers with matching role and status.
2259*9c5db199SXin Li    """
2260*9c5db199SXin Li    raise DeprecationWarning("server_manager_utils has been removed.")
2261*9c5db199SXin Li
2262*9c5db199SXin Li
2263*9c5db199SXin Li@rpc_utils.route_rpc_to_main
2264*9c5db199SXin Lidef get_stable_version(board=stable_version_utils.DEFAULT, android=False):
2265*9c5db199SXin Li    """Get stable version for the given board.
2266*9c5db199SXin Li
2267*9c5db199SXin Li    @param board: Name of the board.
2268*9c5db199SXin Li    @param android: Unused legacy parameter.  This is maintained for the
2269*9c5db199SXin Li            sake of clients on old branches that still pass the
2270*9c5db199SXin Li            parameter.  TODO(jrbarnette) Remove this completely once R68
2271*9c5db199SXin Li            drops off stable.
2272*9c5db199SXin Li
2273*9c5db199SXin Li    @return: Stable version of the given board. Return global configure value
2274*9c5db199SXin Li             of CROS.stable_cros_version if stable_versinos table does not have
2275*9c5db199SXin Li             entry of board DEFAULT.
2276*9c5db199SXin Li    """
2277*9c5db199SXin Li    assert not android, 'get_stable_version no longer supports `android`.'
2278*9c5db199SXin Li    return stable_version_utils.get(board=board)
2279*9c5db199SXin Li
2280*9c5db199SXin Li
2281*9c5db199SXin Li@rpc_utils.route_rpc_to_main
2282*9c5db199SXin Lidef get_all_stable_versions():
2283*9c5db199SXin Li    """Get stable versions for all boards.
2284*9c5db199SXin Li
2285*9c5db199SXin Li    @return: A dictionary of board:version.
2286*9c5db199SXin Li    """
2287*9c5db199SXin Li    return stable_version_utils.get_all()
2288*9c5db199SXin Li
2289*9c5db199SXin Li
2290*9c5db199SXin Li@rpc_utils.route_rpc_to_main
2291*9c5db199SXin Lidef set_stable_version(version, board=stable_version_utils.DEFAULT):
2292*9c5db199SXin Li    """Modify stable version for the given board.
2293*9c5db199SXin Li
2294*9c5db199SXin Li    @param version: The new value of stable version for given board.
2295*9c5db199SXin Li    @param board: Name of the board, default to value `DEFAULT`.
2296*9c5db199SXin Li    """
2297*9c5db199SXin Li    logging.warning("rpc_interface::set_stable_version: attempted to set stable version. setting the stable version is not permitted")
2298*9c5db199SXin Li    return None
2299*9c5db199SXin Li
2300*9c5db199SXin Li
2301*9c5db199SXin Li@rpc_utils.route_rpc_to_main
2302*9c5db199SXin Lidef delete_stable_version(board):
2303*9c5db199SXin Li    """Modify stable version for the given board.
2304*9c5db199SXin Li
2305*9c5db199SXin Li    Delete a stable version entry in afe_stable_versions table for a given
2306*9c5db199SXin Li    board, so default stable version will be used.
2307*9c5db199SXin Li
2308*9c5db199SXin Li    @param board: Name of the board.
2309*9c5db199SXin Li    """
2310*9c5db199SXin Li    stable_version_utils.delete(board=board)
2311*9c5db199SXin Li
2312*9c5db199SXin Li
2313*9c5db199SXin Lidef get_tests_by_build(build, ignore_invalid_tests=True):
2314*9c5db199SXin Li    """Get the tests that are available for the specified build.
2315*9c5db199SXin Li
2316*9c5db199SXin Li    @param build: unique name by which to refer to the image.
2317*9c5db199SXin Li    @param ignore_invalid_tests: flag on if unparsable tests are ignored.
2318*9c5db199SXin Li
2319*9c5db199SXin Li    @return: A sorted list of all tests that are in the build specified.
2320*9c5db199SXin Li    """
2321*9c5db199SXin Li    # Collect the control files specified in this build
2322*9c5db199SXin Li    cfile_getter = control_file_lib._initialize_control_file_getter(build)
2323*9c5db199SXin Li    if suite_common.ENABLE_CONTROLS_IN_BATCH:
2324*9c5db199SXin Li        control_file_info_list = cfile_getter.get_suite_info()
2325*9c5db199SXin Li        control_file_list = control_file_info_list.keys()
2326*9c5db199SXin Li    else:
2327*9c5db199SXin Li        control_file_list = cfile_getter.get_control_file_list()
2328*9c5db199SXin Li
2329*9c5db199SXin Li    test_objects = []
2330*9c5db199SXin Li    _id = 0
2331*9c5db199SXin Li    for control_file_path in control_file_list:
2332*9c5db199SXin Li        # Read and parse the control file
2333*9c5db199SXin Li        if suite_common.ENABLE_CONTROLS_IN_BATCH:
2334*9c5db199SXin Li            control_file = control_file_info_list[control_file_path]
2335*9c5db199SXin Li        else:
2336*9c5db199SXin Li            control_file = cfile_getter.get_control_file_contents(
2337*9c5db199SXin Li                    control_file_path)
2338*9c5db199SXin Li        try:
2339*9c5db199SXin Li            control_obj = control_data.parse_control_string(control_file)
2340*9c5db199SXin Li        except:
2341*9c5db199SXin Li            logging.info('Failed to parse control file: %s', control_file_path)
2342*9c5db199SXin Li            if not ignore_invalid_tests:
2343*9c5db199SXin Li                raise
2344*9c5db199SXin Li
2345*9c5db199SXin Li        # Extract the values needed for the AFE from the control_obj.
2346*9c5db199SXin Li        # The keys list represents attributes in the control_obj that
2347*9c5db199SXin Li        # are required by the AFE
2348*9c5db199SXin Li        keys = ['author', 'doc', 'name', 'time', 'test_type', 'experimental',
2349*9c5db199SXin Li                'test_category', 'test_class', 'dependencies', 'run_verify',
2350*9c5db199SXin Li                'sync_count', 'job_retries', 'path']
2351*9c5db199SXin Li
2352*9c5db199SXin Li        test_object = {}
2353*9c5db199SXin Li        for key in keys:
2354*9c5db199SXin Li            test_object[key] = getattr(control_obj, key) if hasattr(
2355*9c5db199SXin Li                    control_obj, key) else ''
2356*9c5db199SXin Li
2357*9c5db199SXin Li        # Unfortunately, the AFE expects different key-names for certain
2358*9c5db199SXin Li        # values, these must be corrected to avoid the risk of tests
2359*9c5db199SXin Li        # being omitted by the AFE.
2360*9c5db199SXin Li        # The 'id' is an additional value used in the AFE.
2361*9c5db199SXin Li        # The control_data parsing does not reference 'run_reset', but it
2362*9c5db199SXin Li        # is also used in the AFE and defaults to True.
2363*9c5db199SXin Li        test_object['id'] = _id
2364*9c5db199SXin Li        test_object['run_reset'] = True
2365*9c5db199SXin Li        test_object['description'] = test_object.get('doc', '')
2366*9c5db199SXin Li        test_object['test_time'] = test_object.get('time', 0)
2367*9c5db199SXin Li
2368*9c5db199SXin Li        # TODO(crbug.com/873716) DEPRECATED. Remove entirely.
2369*9c5db199SXin Li        test_object['test_retry'] = 0
2370*9c5db199SXin Li
2371*9c5db199SXin Li        # Fix the test name to be consistent with the current presentation
2372*9c5db199SXin Li        # of test names in the AFE.
2373*9c5db199SXin Li        testpath, subname = os.path.split(control_file_path)
2374*9c5db199SXin Li        testname = os.path.basename(testpath)
2375*9c5db199SXin Li        subname = subname.split('.')[1:]
2376*9c5db199SXin Li        if subname:
2377*9c5db199SXin Li            testname = '%s:%s' % (testname, ':'.join(subname))
2378*9c5db199SXin Li
2379*9c5db199SXin Li        test_object['name'] = testname
2380*9c5db199SXin Li
2381*9c5db199SXin Li        # Correct the test path as parse_control_string sets an empty string.
2382*9c5db199SXin Li        test_object['path'] = control_file_path
2383*9c5db199SXin Li
2384*9c5db199SXin Li        _id += 1
2385*9c5db199SXin Li        test_objects.append(test_object)
2386*9c5db199SXin Li
2387*9c5db199SXin Li    test_objects = sorted(test_objects, key=lambda x: x.get('name'))
2388*9c5db199SXin Li    return rpc_utils.prepare_for_serialization(test_objects)
2389*9c5db199SXin Li
2390*9c5db199SXin Li
2391*9c5db199SXin Li@rpc_utils.route_rpc_to_main
2392*9c5db199SXin Lidef get_lab_health_indicators(board=None):
2393*9c5db199SXin Li    """Get the healthy indicators for whole lab.
2394*9c5db199SXin Li
2395*9c5db199SXin Li    The indicators now includes:
2396*9c5db199SXin Li    1. lab is closed or not.
2397*9c5db199SXin Li    2. Available DUTs list for a given board.
2398*9c5db199SXin Li    3. Devserver capacity.
2399*9c5db199SXin Li    4. When is the next major DUT utilization (e.g. CQ is coming in 3 minutes).
2400*9c5db199SXin Li
2401*9c5db199SXin Li    @param board: if board is specified, a list of available DUTs will be
2402*9c5db199SXin Li        returned for it. Otherwise, skip this indicator.
2403*9c5db199SXin Li
2404*9c5db199SXin Li    @returns: A healthy indicator object including health info.
2405*9c5db199SXin Li    """
2406*9c5db199SXin Li    return LabHealthIndicator(None, None, None, None)
2407