-
Notifications
You must be signed in to change notification settings - Fork 2.1k
/
matter_testing.py
2407 lines (1907 loc) · 105 KB
/
matter_testing.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#
# Copyright (c) 2022 Project CHIP Authors
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import asyncio
import builtins
import inspect
import json
import logging
import os
import pathlib
import queue
import random
import re
import sys
import textwrap
import threading
import time
import typing
import uuid
from binascii import hexlify, unhexlify
from dataclasses import asdict as dataclass_asdict
from dataclasses import dataclass, field
from datetime import datetime, timedelta, timezone
from enum import Enum, IntFlag
from functools import partial
from itertools import chain
from typing import Any, Iterable, List, Optional, Tuple
from chip.tlv import float32, uint
# isort: off
from chip import ChipDeviceCtrl # Needed before chip.FabricAdmin
import chip.FabricAdmin # Needed before chip.CertificateAuthority
import chip.CertificateAuthority
from chip.ChipDeviceCtrl import CommissioningParameters
# isort: on
from time import sleep
import chip.clusters as Clusters
import chip.logging
import chip.native
from chip import discovery
from chip.ChipStack import ChipStack
from chip.clusters import Attribute
from chip.clusters import ClusterObjects as ClusterObjects
from chip.clusters.Attribute import EventReadResult, SubscriptionTransaction, TypedAttributePath
from chip.exceptions import ChipStackError
from chip.interaction_model import InteractionModelError, Status
from chip.setup_payload import SetupPayload
from chip.storage import PersistentStorage
from chip.testing.global_attribute_ids import GlobalAttributeIds
from chip.testing.pics import read_pics_from_file
from chip.tracing import TracingContext
from mobly import asserts, base_test, signals, utils
from mobly.config_parser import ENV_MOBLY_LOGPATH, TestRunConfig
from mobly.test_runner import TestRunner
try:
from matter_yamltests.hooks import TestRunnerHooks
except ImportError:
class TestRunnerHooks:
pass
# TODO: Add utility to commission a device if needed
# TODO: Add utilities to keep track of controllers/fabrics
logger = logging.getLogger("matter.python_testing")
logger.setLevel(logging.INFO)
DiscoveryFilterType = ChipDeviceCtrl.DiscoveryFilterType
_DEFAULT_ADMIN_VENDOR_ID = 0xFFF1
_DEFAULT_STORAGE_PATH = "admin_storage.json"
_DEFAULT_LOG_PATH = "/tmp/matter_testing/logs"
_DEFAULT_CONTROLLER_NODE_ID = 112233
_DEFAULT_DUT_NODE_ID = 0x12344321
_DEFAULT_TRUST_ROOT_INDEX = 1
# Mobly cannot deal with user config passing of ctypes objects,
# so we use this dict of uuid -> object to recover items stashed
# by reference.
_GLOBAL_DATA = {}
def stash_globally(o: object) -> str:
id = str(uuid.uuid1())
_GLOBAL_DATA[id] = o
return id
def unstash_globally(id: str) -> object:
return _GLOBAL_DATA.get(id)
def default_paa_rootstore_from_root(root_path: pathlib.Path) -> Optional[pathlib.Path]:
"""Attempt to find a PAA trust store following SDK convention at `root_path`
This attempts to find {root_path}/credentials/development/paa-root-certs.
Returns the fully resolved path on success or None if not found.
"""
start_path = root_path.resolve()
cred_path = start_path.joinpath("credentials")
dev_path = cred_path.joinpath("development")
paa_path = dev_path.joinpath("paa-root-certs")
return paa_path.resolve() if all([path.exists() for path in [cred_path, dev_path, paa_path]]) else None
def get_default_paa_trust_store(root_path: pathlib.Path) -> pathlib.Path:
"""Attempt to find a PAA trust store starting at `root_path`.
This tries to find by various heuristics, and goes up one level at a time
until found. After a given number of levels, it will stop.
This returns `root_path` if not PAA store is not found.
"""
# TODO: Add heuristics about TH default PAA location
cur_dir = pathlib.Path.cwd()
max_levels = 10
for level in range(max_levels):
paa_trust_store_path = default_paa_rootstore_from_root(cur_dir)
if paa_trust_store_path is not None:
return paa_trust_store_path
# Go back one level
cur_dir = cur_dir.joinpath("..")
else:
# On not having found a PAA dir, just return current dir to avoid blow-ups
return pathlib.Path.cwd()
def type_matches(received_value, desired_type):
""" Checks if the value received matches the expected type.
Handles unpacking Nullable and Optional types and
compares list value types for non-empty lists.
"""
if typing.get_origin(desired_type) == typing.Union:
return any(type_matches(received_value, t) for t in typing.get_args(desired_type))
elif typing.get_origin(desired_type) == list:
if isinstance(received_value, list):
# Assume an empty list is of the correct type
return True if received_value == [] else any(type_matches(received_value[0], t) for t in typing.get_args(desired_type))
else:
return False
elif desired_type == uint:
return isinstance(received_value, int) and received_value >= 0
elif desired_type == float32:
return isinstance(received_value, float)
else:
return isinstance(received_value, desired_type)
# TODO(#31177): Need to add unit tests for all time conversion methods.
def utc_time_in_matter_epoch(desired_datetime: Optional[datetime] = None):
""" Returns the time in matter epoch in us.
If desired_datetime is None, it will return the current time.
"""
if desired_datetime is None:
utc_native = datetime.now(tz=timezone.utc)
else:
utc_native = desired_datetime
# Matter epoch is 0 hours, 0 minutes, 0 seconds on Jan 1, 2000 UTC
utc_th_delta = utc_native - datetime(2000, 1, 1, 0, 0, 0, 0, timezone.utc)
utc_th_us = int(utc_th_delta.total_seconds() * 1000000)
return utc_th_us
matter_epoch_us_from_utc_datetime = utc_time_in_matter_epoch
def utc_datetime_from_matter_epoch_us(matter_epoch_us: int) -> datetime:
"""Returns the given Matter epoch time as a usable Python datetime in UTC."""
delta_from_epoch = timedelta(microseconds=matter_epoch_us)
matter_epoch = datetime(2000, 1, 1, 0, 0, 0, 0, timezone.utc)
return matter_epoch + delta_from_epoch
def utc_datetime_from_posix_time_ms(posix_time_ms: int) -> datetime:
millis = posix_time_ms % 1000
seconds = posix_time_ms // 1000
return datetime.fromtimestamp(seconds, timezone.utc) + timedelta(milliseconds=millis)
def compare_time(received: int, offset: timedelta = timedelta(), utc: int = None, tolerance: timedelta = timedelta(seconds=5)) -> None:
if utc is None:
utc = utc_time_in_matter_epoch()
# total seconds includes fractional for microseconds
expected = utc + offset.total_seconds() * 1000000
delta_us = abs(expected - received)
delta = timedelta(microseconds=delta_us)
asserts.assert_less_equal(delta, tolerance, "Received time is out of tolerance")
def get_wait_seconds_from_set_time(set_time_matter_us: int, wait_seconds: int):
seconds_passed = (utc_time_in_matter_epoch() - set_time_matter_us) // 1000000
return wait_seconds - seconds_passed
class SimpleEventCallback:
def __init__(self, name: str, expected_cluster_id: int, expected_event_id: int, output_queue: queue.SimpleQueue):
self._name = name
self._expected_cluster_id = expected_cluster_id
self._expected_event_id = expected_event_id
self._output_queue = output_queue
def __call__(self, event_result: EventReadResult, transaction: SubscriptionTransaction):
if (self._expected_cluster_id == event_result.Header.ClusterId and
self._expected_event_id == event_result.Header.EventId):
self._output_queue.put(event_result)
@property
def name(self) -> str:
return self._name
class EventChangeCallback:
def __init__(self, expected_cluster: ClusterObjects.Cluster):
"""This class creates a queue to store received event callbacks, that can be checked by the test script
expected_cluster: is the cluster from which the events are expected
"""
self._q = queue.Queue()
self._expected_cluster = expected_cluster
async def start(self, dev_ctrl, node_id: int, endpoint: int, fabric_filtered: bool = False, min_interval_sec: int = 0, max_interval_sec: int = 30) -> Any:
"""This starts a subscription for events on the specified node_id and endpoint. The cluster is specified when the class instance is created."""
urgent = True
self._subscription = await dev_ctrl.ReadEvent(node_id,
events=[(endpoint, self._expected_cluster, urgent)], reportInterval=(
min_interval_sec, max_interval_sec),
fabricFiltered=fabric_filtered, keepSubscriptions=True, autoResubscribe=False)
self._subscription.SetEventUpdateCallback(self.__call__)
return self._subscription
def __call__(self, res: EventReadResult, transaction: SubscriptionTransaction):
"""This is the subscription callback when an event is received.
It checks the event is from the expected_cluster and then posts it into the queue for later processing."""
if res.Status == Status.Success and res.Header.ClusterId == self._expected_cluster.id:
logging.info(
f'Got subscription report for event on cluster {self._expected_cluster}: {res.Data}')
self._q.put(res)
def wait_for_event_report(self, expected_event: ClusterObjects.ClusterEvent, timeout_sec: float = 10.0) -> Any:
"""This function allows a test script to block waiting for the specific event to be the next event
to arrive within a timeout (specified in seconds). It returns the event data so that the values can be checked."""
logging.info(f"Waiting for {expected_event} for {timeout_sec:.1f} seconds")
try:
res = self._q.get(block=True, timeout=timeout_sec)
except queue.Empty:
asserts.fail("Failed to receive a report for the event {}".format(expected_event))
asserts.assert_equal(res.Header.ClusterId, expected_event.cluster_id, "Expected cluster ID not found in event report")
asserts.assert_equal(res.Header.EventId, expected_event.event_id, "Expected event ID not found in event report")
logging.info(f"Successfully waited for {expected_event}")
return res.Data
def wait_for_event_expect_no_report(self, timeout_sec: float = 10.0):
"""This function returns if an event does not arrive within the timeout specified in seconds.
If any event does arrive, an assert failure occurs."""
try:
res = self._q.get(block=True, timeout=timeout_sec)
except queue.Empty:
return
asserts.fail(f"Event reported when not expected {res}")
def get_last_event(self) -> Optional[Any]:
"""Flush entire queue, returning last (newest) event only."""
last_event: Optional[Any] = None
while True:
try:
last_event = self._q.get(block=False)
except queue.Empty:
return last_event
def flush_events(self) -> None:
"""Flush entire queue, returning nothing."""
_ = self.get_last_event()
return
def reset(self) -> None:
"""Resets state as if no events had ever been received."""
self.flush_events()
@property
def event_queue(self) -> queue.Queue:
return self._q
class AttributeChangeCallback:
def __init__(self, expected_attribute: ClusterObjects.ClusterAttributeDescriptor):
self._output = queue.Queue()
self._expected_attribute = expected_attribute
def __call__(self, path: TypedAttributePath, transaction: SubscriptionTransaction):
"""This is the subscription callback when an attribute is updated.
It checks the passed in attribute is the same as the subscribed to attribute and
then posts it into the queue for later processing."""
asserts.assert_equal(path.AttributeType, self._expected_attribute,
f"[AttributeChangeCallback] Attribute mismatch. Expected: {self._expected_attribute}, received: {path.AttributeType}")
logging.debug(f"[AttributeChangeCallback] Attribute update callback for {path.AttributeType}")
q = (path, transaction)
self._output.put(q)
def wait_for_report(self):
try:
path, transaction = self._output.get(block=True, timeout=10)
except queue.Empty:
asserts.fail(
f"[AttributeChangeCallback] Failed to receive a report for the {self._expected_attribute} attribute change")
asserts.assert_equal(path.AttributeType, self._expected_attribute,
f"[AttributeChangeCallback] Received incorrect report. Expected: {self._expected_attribute}, received: {path.AttributeType}")
try:
attribute_value = transaction.GetAttribute(path)
logging.info(
f"[AttributeChangeCallback] Got attribute subscription report. Attribute {path.AttributeType}. Updated value: {attribute_value}. SubscriptionId: {transaction.subscriptionId}")
except KeyError:
asserts.fail(f"[AttributeChangeCallback] Attribute {self._expected_attribute} not found in returned report")
def clear_queue(report_queue: queue.Queue):
"""Flush all contents of a report queue. Useful to get back to empty point."""
while not report_queue.empty():
try:
report_queue.get(block=False)
except queue.Empty:
break
@dataclass
class AttributeValue:
endpoint_id: int
attribute: ClusterObjects.ClusterAttributeDescriptor
value: Any
timestamp_utc: Optional[datetime] = None
def await_sequence_of_reports(report_queue: queue.Queue, endpoint_id: int, attribute: TypedAttributePath, sequence: list[Any], timeout_sec: float) -> None:
"""Given a queue.Queue hooked-up to an attribute change accumulator, await a given expected sequence of attribute reports.
Args:
- report_queue: the queue that receives all the reports.
- endpoint_id: endpoint ID to match for reports to check.
- attribute: attribute to match for reports to check.
- sequence: list of attribute values in order that are expected.
- timeout_sec: number of seconds to wait for.
*** WARNING: The queue contains every report since the sub was established. Use
clear_queue to make it empty. ***
This will fail current Mobly test with assertion failure if the data is not as expected in order.
Returns nothing on success so the test can go on.
"""
start_time = time.time()
elapsed = 0.0
time_remaining = timeout_sec
sequence_idx = 0
actual_values = []
while time_remaining > 0:
expected_value = sequence[sequence_idx]
logging.info(f"Expecting value {expected_value} for attribute {attribute} on endpoint {endpoint_id}")
logging.info(f"Waiting for {timeout_sec:.1f} seconds for all reports.")
try:
item: AttributeValue = report_queue.get(block=True, timeout=time_remaining)
# Track arrival of all values for the given attribute.
if item.endpoint_id == endpoint_id and item.attribute == attribute:
actual_values.append(item.value)
if item.value == expected_value:
logging.info(f"Got expected attribute change {sequence_idx+1}/{len(sequence)} for attribute {attribute}")
sequence_idx += 1
else:
asserts.assert_equal(item.value, expected_value,
msg=f"Did not get expected attribute value in correct sequence. Sequence so far: {actual_values}")
# We are done waiting when we have accumulated all results.
if sequence_idx == len(sequence):
logging.info("Got all attribute changes, done waiting.")
return
except queue.Empty:
# No error, we update timeouts and keep going
pass
elapsed = time.time() - start_time
time_remaining = timeout_sec - elapsed
asserts.fail(f"Did not get full sequence {sequence} in {timeout_sec:.1f} seconds. Got {actual_values} before time-out.")
class ClusterAttributeChangeAccumulator:
def __init__(self, expected_cluster: ClusterObjects.Cluster):
self._expected_cluster = expected_cluster
self._subscription = None
self._lock = threading.Lock()
self._q = queue.Queue()
self._endpoint_id = 0
self.reset()
def reset(self):
with self._lock:
self._attribute_report_counts = {}
attrs = [cls for name, cls in inspect.getmembers(self._expected_cluster.Attributes) if inspect.isclass(
cls) and issubclass(cls, ClusterObjects.ClusterAttributeDescriptor)]
self._attribute_reports = {}
for a in attrs:
self._attribute_report_counts[a] = 0
self._attribute_reports[a] = []
self.flush_reports()
async def start(self, dev_ctrl, node_id: int, endpoint: int, fabric_filtered: bool = False, min_interval_sec: int = 0, max_interval_sec: int = 5, keepSubscriptions: bool = True) -> Any:
"""This starts a subscription for attributes on the specified node_id and endpoint. The cluster is specified when the class instance is created."""
self._subscription = await dev_ctrl.ReadAttribute(
nodeid=node_id,
attributes=[(endpoint, self._expected_cluster)],
reportInterval=(int(min_interval_sec), int(max_interval_sec)),
fabricFiltered=fabric_filtered,
keepSubscriptions=keepSubscriptions
)
self._endpoint_id = endpoint
self._subscription.SetAttributeUpdateCallback(self.__call__)
return self._subscription
async def cancel(self):
"""This cancels a subscription."""
# Wait for the asyncio.CancelledError to be called before returning
try:
self._subscription.Shutdown()
await asyncio.sleep(5)
except asyncio.CancelledError:
pass
def __call__(self, path: TypedAttributePath, transaction: SubscriptionTransaction):
"""This is the subscription callback when an attribute report is received.
It checks the report is from the expected_cluster and then posts it into the queue for later processing."""
if path.ClusterType == self._expected_cluster:
data = transaction.GetAttribute(path)
value = AttributeValue(endpoint_id=path.Path.EndpointId, attribute=path.AttributeType,
value=data, timestamp_utc=datetime.now(timezone.utc))
logging.info(f"Got subscription report for {path.AttributeType}: {data}")
self._q.put(value)
with self._lock:
self._attribute_report_counts[path.AttributeType] += 1
self._attribute_reports[path.AttributeType].append(value)
def await_all_final_values_reported(self, expected_final_values: Iterable[AttributeValue], timeout_sec: float = 1.0):
"""Expect that every `expected_final_value` report is the last value reported for the given attribute, ignoring timestamps.
Waits for at least `timeout_sec` seconds.
This is a form of barrier for a set of attribute changes that should all happen together for an action.
"""
start_time = time.time()
elapsed = 0.0
time_remaining = timeout_sec
last_report_matches: dict[int, bool] = {idx: False for idx, _ in enumerate(expected_final_values)}
for element in expected_final_values:
logging.info(
f"--> Expecting report for value {element.value} for attribute {element.attribute} on endpoint {element.endpoint_id}")
logging.info(f"Waiting for {timeout_sec:.1f} seconds for all reports.")
while time_remaining > 0:
# Snapshot copy at the beginning of the loop. This is thread-safe based on the design.
all_reports = self._attribute_reports
# Recompute all last-value matches
for expected_idx, expected_element in enumerate(expected_final_values):
last_value = None
for report in all_reports.get(expected_element.attribute, []):
if report.endpoint_id == expected_element.endpoint_id:
last_value = report.value
last_report_matches[expected_idx] = (last_value is not None and last_value == expected_element.value)
# Determine if all were met
if all(last_report_matches.values()):
logging.info("Found all expected reports were true.")
return
elapsed = time.time() - start_time
time_remaining = timeout_sec - elapsed
time.sleep(0.1)
# If we reach here, there was no early return and we failed to find all the values.
logging.error("Reached time-out without finding all expected report values.")
logging.info("Values found:")
for expected_idx, expected_element in enumerate(expected_final_values):
logging.info(f" -> {expected_element} found: {last_report_matches.get(expected_idx)}")
asserts.fail("Did not find all expected last report values before time-out")
def await_sequence_of_reports(self, attribute: TypedAttributePath, sequence: list[Any], timeout_sec: float) -> None:
"""Await a given expected sequence of attribute reports in the accumulator for the endpoint associated.
Args:
- attribute: attribute to match for reports to check.
- sequence: list of attribute values in order that are expected.
- timeout_sec: number of seconds to wait for.
*** WARNING: The queue contains every report since the sub was established. Use
self.reset() to make it empty. ***
This will fail current Mobly test with assertion failure if the data is not as expected in order.
Returns nothing on success so the test can go on.
"""
await_sequence_of_reports(report_queue=self.attribute_queue, endpoint_id=self._endpoint_id,
attribute=attribute, sequence=sequence, timeout_sec=timeout_sec)
@property
def attribute_queue(self) -> queue.Queue:
return self._q
@property
def attribute_report_counts(self) -> dict[ClusterObjects.ClusterAttributeDescriptor, int]:
with self._lock:
return self._attribute_report_counts
@property
def attribute_reports(self) -> dict[ClusterObjects.ClusterAttributeDescriptor, AttributeValue]:
with self._lock:
return self._attribute_reports.copy()
def get_last_report(self) -> Optional[Any]:
"""Flush entire queue, returning last (newest) report only."""
last_report: Optional[Any] = None
while True:
try:
last_report = self._q.get(block=False)
except queue.Empty:
return last_report
def flush_reports(self) -> None:
"""Flush entire queue, returning nothing."""
_ = self.get_last_report()
return
class InternalTestRunnerHooks(TestRunnerHooks):
def start(self, count: int):
logging.info(f'Starting test set, running {count} tests')
def stop(self, duration: int):
logging.info(f'Finished test set, ran for {duration}ms')
def test_start(self, filename: str, name: str, count: int, steps: list[str] = []):
logging.info(f'Starting test from {filename}: {name} - {count} steps')
def test_stop(self, exception: Exception, duration: int):
logging.info(f'Finished test in {duration}ms')
def step_skipped(self, name: str, expression: str):
# TODO: Do we really need the expression as a string? We can evaluate this in code very easily
logging.info(f'\t\t**** Skipping: {name}')
def step_start(self, name: str):
# The way I'm calling this, the name is already includes the step number, but it seems like it might be good to separate these
logging.info(f'\t\t***** Test Step {name}')
def step_success(self, logger, logs, duration: int, request):
pass
def step_failure(self, logger, logs, duration: int, request, received):
# TODO: there's supposed to be some kind of error message here, but I have no idea where it's meant to come from in this API
logging.info('\t\t***** Test Failure : ')
def step_unknown(self):
"""
This method is called when the result of running a step is unknown. For example during a dry-run.
"""
pass
def show_prompt(self,
msg: str,
placeholder: Optional[str] = None,
default_value: Optional[str] = None) -> None:
pass
def test_skipped(self, filename: str, name: str):
logging.info(f"Skipping test from {filename}: {name}")
@dataclass
class MatterTestConfig:
storage_path: pathlib.Path = pathlib.Path(".")
logs_path: pathlib.Path = pathlib.Path(".")
paa_trust_store_path: Optional[pathlib.Path] = None
ble_interface_id: Optional[int] = None
commission_only: bool = False
admin_vendor_id: int = _DEFAULT_ADMIN_VENDOR_ID
case_admin_subject: Optional[int] = None
global_test_params: dict = field(default_factory=dict)
# List of explicit tests to run by name. If empty, all tests will run
tests: List[str] = field(default_factory=list)
timeout: typing.Union[int, None] = None
endpoint: typing.Union[int, None] = 0
app_pid: int = 0
commissioning_method: Optional[str] = None
discriminators: List[int] = field(default_factory=list)
setup_passcodes: List[int] = field(default_factory=list)
commissionee_ip_address_just_for_testing: Optional[str] = None
# By default, we start with maximized cert chains, as required for RR-1.1.
# This allows cert tests to be run without re-commissioning for RR-1.1.
maximize_cert_chains: bool = True
qr_code_content: List[str] = field(default_factory=list)
manual_code: List[str] = field(default_factory=list)
wifi_ssid: Optional[str] = None
wifi_passphrase: Optional[str] = None
thread_operational_dataset: Optional[str] = None
pics: dict[bool, str] = field(default_factory=dict)
# Node ID for basic DUT
dut_node_ids: List[int] = field(default_factory=list)
# Node ID to use for controller/commissioner
controller_node_id: int = _DEFAULT_CONTROLLER_NODE_ID
# CAT Tags for default controller/commissioner
# By default, we commission with CAT tags specified for RR-1.1
# so the cert tests can be run without re-commissioning the device
# for this one test. This can be overwritten from the command line
controller_cat_tags: List[int] = field(default_factory=lambda: [0x0001_0001])
# Fabric ID which to use
fabric_id: int = 1
# "Alpha" by default
root_of_trust_index: int = _DEFAULT_TRUST_ROOT_INDEX
# If this is set, we will reuse root of trust keys at that location
chip_tool_credentials_path: Optional[pathlib.Path] = None
trace_to: List[str] = field(default_factory=list)
class ClusterMapper:
"""Describe clusters/attributes using schema names."""
def __init__(self, legacy_cluster_mapping) -> None:
self._mapping = legacy_cluster_mapping
def get_cluster_string(self, cluster_id: int) -> str:
mapping = self._mapping._CLUSTER_ID_DICT.get(cluster_id, None)
if not mapping:
return f"Cluster Unknown ({cluster_id}, 0x{cluster_id:08X})"
else:
name = mapping["clusterName"]
return f"Cluster {name} ({cluster_id}, 0x{cluster_id:04X})"
def get_attribute_string(self, cluster_id: int, attribute_id) -> str:
global_attrs = [item.value for item in GlobalAttributeIds]
if attribute_id in global_attrs:
return f"Attribute {GlobalAttributeIds(attribute_id).to_name()} {attribute_id}, 0x{attribute_id:04X}"
mapping = self._mapping._CLUSTER_ID_DICT.get(cluster_id, None)
if not mapping:
return f"Attribute Unknown ({attribute_id}, 0x{attribute_id:08X})"
else:
attribute_mapping = mapping["attributes"].get(attribute_id, None)
if not attribute_mapping:
return f"Attribute Unknown ({attribute_id}, 0x{attribute_id:08X})"
else:
attribute_name = attribute_mapping["attributeName"]
return f"Attribute {attribute_name} ({attribute_id}, 0x{attribute_id:04X})"
def id_str(id):
return f'{id} (0x{id:02x})'
def cluster_id_str(id):
if id in Clusters.ClusterObjects.ALL_CLUSTERS.keys():
s = Clusters.ClusterObjects.ALL_CLUSTERS[id].__name__
else:
s = "Unknown cluster"
try:
return f'{id_str(id)} {s}'
except TypeError:
return 'HERE IS THE PROBLEM'
@dataclass
class CustomCommissioningParameters:
commissioningParameters: CommissioningParameters
randomDiscriminator: int
@dataclass
class ClusterPathLocation:
endpoint_id: int
cluster_id: int
def __str__(self):
return (f'\n Endpoint: {self.endpoint_id},'
f'\n Cluster: {cluster_id_str(self.cluster_id)}')
@dataclass
class AttributePathLocation(ClusterPathLocation):
cluster_id: Optional[int] = None
attribute_id: Optional[int] = None
def as_cluster_string(self, mapper: ClusterMapper):
desc = f"Endpoint {self.endpoint_id}"
if self.cluster_id is not None:
desc += f", {mapper.get_cluster_string(self.cluster_id)}"
return desc
def as_string(self, mapper: ClusterMapper):
desc = self.as_cluster_string(mapper)
if self.cluster_id is not None and self.attribute_id is not None:
desc += f", {mapper.get_attribute_string(self.cluster_id, self.attribute_id)}"
return desc
def __str__(self):
return (f'{super().__str__()}'
f'\n Attribute:{id_str(self.attribute_id)}')
@dataclass
class EventPathLocation(ClusterPathLocation):
event_id: int
def __str__(self):
return (f'{super().__str__()}'
f'\n Event: {id_str(self.event_id)}')
@dataclass
class CommandPathLocation(ClusterPathLocation):
command_id: int
def __str__(self):
return (f'{super().__str__()}'
f'\n Command: {id_str(self.command_id)}')
@dataclass
class FeaturePathLocation(ClusterPathLocation):
feature_code: str
def __str__(self):
return (f'{super().__str__()}'
f'\n Feature: {self.feature_code}')
@dataclass
class DeviceTypePathLocation:
device_type_id: int
cluster_id: Optional[int] = None
def __str__(self):
msg = f'\n DeviceType: {self.device_type_id}'
if self.cluster_id:
msg += f'\n ClusterID: {self.cluster_id}'
return msg
ProblemLocation = typing.Union[ClusterPathLocation, DeviceTypePathLocation]
# ProblemSeverity is not using StrEnum, but rather Enum, since StrEnum only
# appeared in 3.11. To make it JSON serializable easily, multiple inheritance
# from `str` is used. See https://stackoverflow.com/a/51976841.
class ProblemSeverity(str, Enum):
NOTE = "NOTE"
WARNING = "WARNING"
ERROR = "ERROR"
@dataclass
class ProblemNotice:
test_name: str
location: ProblemLocation
severity: ProblemSeverity
problem: str
spec_location: str = ""
def __str__(self):
return (f'\nProblem: {str(self.severity)}'
f'\n test_name: {self.test_name}'
f'\n location: {str(self.location)}'
f'\n problem: {self.problem}'
f'\n spec_location: {self.spec_location}\n')
@dataclass
class SetupPayloadInfo:
filter_type: discovery.FilterType = discovery.FilterType.LONG_DISCRIMINATOR
filter_value: int = 0
passcode: int = 0
class MatterStackState:
def __init__(self, config: MatterTestConfig):
self._logger = logger
self._config = config
if not hasattr(builtins, "chipStack"):
chip.native.Init(bluetoothAdapter=config.ble_interface_id)
if config.storage_path is None:
raise ValueError("Must have configured a MatterTestConfig.storage_path")
self._init_stack(already_initialized=False, persistentStoragePath=config.storage_path)
self._we_initialized_the_stack = True
else:
self._init_stack(already_initialized=True)
self._we_initialized_the_stack = False
def _init_stack(self, already_initialized: bool, **kwargs):
if already_initialized:
self._chip_stack = builtins.chipStack
self._logger.warn(
"Re-using existing ChipStack object found in current interpreter: "
"storage path %s will be ignored!" % (self._config.storage_path)
)
# TODO: Warn that storage will not follow what we set in config
else:
self._chip_stack = ChipStack(**kwargs)
builtins.chipStack = self._chip_stack
chip.logging.RedirectToPythonLogging()
self._storage = self._chip_stack.GetStorageManager()
self._certificate_authority_manager = chip.CertificateAuthority.CertificateAuthorityManager(chipStack=self._chip_stack)
self._certificate_authority_manager.LoadAuthoritiesFromStorage()
if (len(self._certificate_authority_manager.activeCaList) == 0):
self._logger.warn(
"Didn't find any CertificateAuthorities in storage -- creating a new CertificateAuthority + FabricAdmin...")
ca = self._certificate_authority_manager.NewCertificateAuthority(caIndex=self._config.root_of_trust_index)
ca.maximizeCertChains = self._config.maximize_cert_chains
ca.NewFabricAdmin(vendorId=0xFFF1, fabricId=self._config.fabric_id)
elif (len(self._certificate_authority_manager.activeCaList[0].adminList) == 0):
self._logger.warn("Didn't find any FabricAdmins in storage -- creating a new one...")
self._certificate_authority_manager.activeCaList[0].NewFabricAdmin(vendorId=0xFFF1, fabricId=self._config.fabric_id)
# TODO: support getting access to chip-tool credentials issuer's data
def Shutdown(self):
if self._we_initialized_the_stack:
# Unfortunately, all the below are singleton and possibly
# managed elsewhere so we have to be careful not to touch unless
# we initialized ourselves.
self._certificate_authority_manager.Shutdown()
global_chip_stack = builtins.chipStack
global_chip_stack.Shutdown()
@property
def certificate_authorities(self):
return self._certificate_authority_manager.activeCaList
@property
def certificate_authority_manager(self):
return self._certificate_authority_manager
@property
def storage(self) -> PersistentStorage:
return self._storage
@property
def stack(self) -> ChipStack:
return builtins.chipStack
def bytes_from_hex(hex: str) -> bytes:
"""Converts any `hex` string representation including `01:ab:cd` to bytes
Handles any whitespace including newlines, which are all stripped.
"""
return unhexlify("".join(hex.replace(":", "").replace(" ", "").split()))
def hex_from_bytes(b: bytes) -> str:
"""Converts a bytes object `b` into a hex string (reverse of bytes_from_hex)"""
return hexlify(b).decode("utf-8")
@dataclass
class TestStep:
test_plan_number: typing.Union[int, str]
description: str
expectation: str = ""
is_commissioning: bool = False
def __str__(self):
return f'{self.test_plan_number}: {self.description}\tExpected outcome: {self.expectation}'
@dataclass
class TestInfo:
function: str
desc: str
steps: list[TestStep]
pics: list[str]
class MatterBaseTest(base_test.BaseTestClass):
def __init__(self, *args):
super().__init__(*args)
# List of accumulated problems across all tests
self.problems = []
self.is_commissioning = False
# The named pipe name must be set in the derived classes
self.app_pipe = None
def get_test_steps(self, test: str) -> list[TestStep]:
''' Retrieves the test step list for the given test
Test steps are defined in the function called steps_<functionname>.
ex for test test_TC_TEST_1_1, the steps are in a function called
steps_TC_TEST_1_1.
Test that implement a steps_ function should call each step
in order using self.step(number), where number is the test_plan_number
from each TestStep.
'''
steps = self.get_defined_test_steps(test)
return [TestStep(1, "Run entire test")] if steps is None else steps
def get_defined_test_steps(self, test: str) -> list[TestStep]:
steps_name = f'steps_{test.removeprefix("test_")}'
try:
fn = getattr(self, steps_name)
return fn()
except AttributeError:
return None
def get_test_pics(self, test: str) -> list[str]:
''' Retrieves a list of top-level PICS that should be checked before running this test
An empty list means the test will always be run.
PICS are defined in a function called pics_<functionname>.
ex. for test test_TC_TEST_1_1, the pics are in a function called
pics_TC_TEST_1_1.
'''
pics = self._get_defined_pics(test)
return [] if pics is None else pics
def _get_defined_pics(self, test: str) -> list[TestStep]:
steps_name = f'pics_{test.removeprefix("test_")}'
try:
fn = getattr(self, steps_name)
return fn()
except AttributeError:
return None
def get_test_desc(self, test: str) -> str:
''' Returns a description of this test
Test description is defined in the function called desc_<functionname>.
ex for test test_TC_TEST_1_1, the steps are in a function called
desc_TC_TEST_1_1.
Format:
<Test plan reference> [<test plan number>] <test plan name>
ex:
133.1.1. [TC-ACL-1.1] Global attributes
'''
desc_name = f'desc_{test.removeprefix("test_")}'
try: