Implement TC-SC-3.6 as a Python test (#21719)
* Implement TC-SC-3.6 as a Python test
- Test TC-SC-3.6 was not feasible with chip-tool and manual
intervention due to the complexity of validating that the
correct sessions were established, establishing all of them,
and ensuring all subscriptions fire as intended.
This PR:
- Adds a version of TC-SC-3.6 in Python
- Updates the Python code with minor improvements needed during
the development of the test
- **Touches no C++ SDK code**
Testing done:
- Ran the test as it could be done by an end-user, passed on Linux
and on ESP32 (after some changes to resources in the examples, not
included in this PR)
To run:
- Build all-clusters app Linux:
- `scripts/examples/gn_build_example.sh examples/all-clusters-app/linux out/debug/standalone chip_config_network_layer_ble=false`
- In a shell, run: `clear && rm -f kvs1 && out/debug/standalone/chip-all-clusters-app --discriminator 2118 --KVS kvs1`
- Build the Python environment, activate it, then run the test
- `./scripts/build_python.sh -m platform -i separate`
- `. ./out/python_env/bin/activate`
- Run the test: `rm -f admin_storage.json && python src/python_testing/TC_SC_3_6.py -m on-network -d 2118 -p 20202021`
* Apply review comments
diff --git a/src/controller/python/chip-device-ctrl.py b/src/controller/python/chip-device-ctrl.py
index c94514b..5b567b3 100755
--- a/src/controller/python/chip-device-ctrl.py
+++ b/src/controller/python/chip-device-ctrl.py
@@ -367,7 +367,7 @@
setup-payload generate [options]
Options:
- -vr Version
+ -vr Version
-vi Vendor ID
-pi Product ID
-cf Custom Flow [Standard = 0, UserActionRequired = 1, Custom = 2]
@@ -971,7 +971,7 @@
open-commissioning-window <nodeid> [options]
Options:
- -t Timeout (in seconds)
+ -t Timeout (in seconds)
-o Option [TokenWithRandomPIN = 1, TokenWithProvidedPIN = 2]
-d Discriminator Value
-i Iteration
@@ -1024,7 +1024,7 @@
return
compressed_fabricid = self.devCtrl.GetCompressedFabricId()
- raw_fabricid = self.devCtrl.GetFabricId()
+ raw_fabricid = self.devCtrl.fabricId
except exceptions.ChipStackException as ex:
print("An exception occurred during reading FabricID:")
print(str(ex))
diff --git a/src/controller/python/chip/ChipDeviceCtrl.py b/src/controller/python/chip/ChipDeviceCtrl.py
index 6c36350..30064a1 100644
--- a/src/controller/python/chip/ChipDeviceCtrl.py
+++ b/src/controller/python/chip/ChipDeviceCtrl.py
@@ -40,11 +40,11 @@
from .clusters import Objects as GeneratedObjects
from .clusters.CHIPClusters import *
from . import clusters as Clusters
+from .FabricAdmin import FabricAdmin
import enum
import threading
import typing
import builtins
-import ipdb
import ctypes
import copy
@@ -157,7 +157,7 @@
class ChipDeviceController():
activeList = set()
- def __init__(self, opCredsContext: ctypes.c_void_p, fabricId: int, nodeId: int, adminVendorId: int, paaTrustStorePath: str = "", useTestCommissioner: bool = False):
+ def __init__(self, opCredsContext: ctypes.c_void_p, fabricId: int, nodeId: int, adminVendorId: int, paaTrustStorePath: str = "", useTestCommissioner: bool = False, fabricAdmin: FabricAdmin = None, name: str = None):
self.state = DCState.NOT_INITIALIZED
self.devCtrl = None
self._ChipStack = builtins.chipStack
@@ -174,19 +174,23 @@
opCredsContext), pointer(devCtrl), fabricId, nodeId, adminVendorId, ctypes.c_char_p(None if len(paaTrustStorePath) == 0 else str.encode(paaTrustStorePath)), useTestCommissioner)
)
- self.nodeId = nodeId
-
if res != 0:
raise self._ChipStack.ErrorToException(res)
self.devCtrl = devCtrl
+ self._fabricAdmin = fabricAdmin
+ self._fabricId = fabricId
+ self._nodeId = nodeId
+ self._adminIndex = fabricAdmin.adminIndex
+
+ if name is None:
+ self._name = "adminIndex(%x)/fabricId(0x%016X)/nodeId(0x%016X)" % (fabricAdmin.adminIndex, fabricId, nodeId)
+ else:
+ self._name = name
self._Cluster = ChipClusters(builtins.chipStack)
self._Cluster.InitLib(self._dmLib)
- def GetNodeId(self):
- return self.nodeId
-
def HandleCommissioningComplete(nodeid, err):
if err != 0:
print("Failed to commission: {}".format(err))
@@ -198,7 +202,7 @@
self._ChipStack.commissioningCompleteEvent.set()
self._ChipStack.completeEvent.set()
- def HandleKeyExchangeComplete(err):
+ def HandlePASEEstablishmentComplete(err):
if err != 0:
print("Failed to establish secure session to device: {}".format(err))
self._ChipStack.callbackRes = self._ChipStack.ErrorToException(
@@ -207,7 +211,7 @@
print("Established secure session with Device")
if self.state != DCState.COMMISSIONING:
- # During Commissioning, HandleKeyExchangeComplete will also be called,
+ # During Commissioning, HandlePASEEstablishmentComplete will also be called,
# in this case the async operation should be marked as finished by
# HandleCommissioningComplete instead this function.
self.state = DCState.IDLE
@@ -218,10 +222,10 @@
if err != 0:
HandleCommissioningComplete(0, err)
- self.cbHandleKeyExchangeCompleteFunct = _DevicePairingDelegate_OnPairingCompleteFunct(
- HandleKeyExchangeComplete)
+ self.cbHandlePASEEstablishmentCompleteFunct = _DevicePairingDelegate_OnPairingCompleteFunct(
+ HandlePASEEstablishmentComplete)
self._dmLib.pychip_ScriptDevicePairingDelegate_SetKeyExchangeCallback(
- self.devCtrl, self.cbHandleKeyExchangeCompleteFunct)
+ self.devCtrl, self.cbHandlePASEEstablishmentCompleteFunct)
self.cbHandleCommissioningCompleteFunct = _DevicePairingDelegate_OnCommissioningCompleteFunct(
HandleCommissioningComplete)
@@ -231,8 +235,40 @@
self.state = DCState.IDLE
self.isActive = True
+ # Validate FabricID/NodeID followed from NOC Chain
+ self._fabricId = self.GetFabricIdInternal()
+ assert self._fabricId == fabricId
+ self._nodeId = self.GetNodeIdInternal()
+ assert self._nodeId == nodeId
+
ChipDeviceController.activeList.add(self)
+ @property
+ def fabricAdmin(self) -> FabricAdmin:
+ return self._fabricAdmin
+
+ @property
+ def nodeId(self) -> int:
+ self.CheckIsActive()
+ return self._nodeId
+
+ @property
+ def fabricId(self) -> int:
+ self.CheckIsActive()
+ return self._fabricId
+
+ @property
+ def adminIndex(self) -> int:
+ return self._adminIndex
+
+ @property
+ def name(self) -> str:
+ return self._name
+
+ @name.setter
+ def name(self, new_name: str):
+ self._name = new_name
+
def Shutdown(self):
''' Shuts down this controller and reclaims any used resources, including the bound
C++ constructor instance in the SDK.
@@ -447,6 +483,7 @@
return self._ChipStack.commissioningEventRes == 0
def CommissionIP(self, ipaddr: str, setupPinCode: int, nodeid: int):
+ """ DEPRECATED, DO NOT USE! Use `CommissionOnNetwork` or `CommissionWithCode` """
self.CheckIsActive()
# IP connection will run through full commissioning, so we need to wait
@@ -614,7 +651,8 @@
else:
raise self._ChipStack.ErrorToException(res)
- def GetFabricId(self):
+ def GetFabricIdInternal(self):
+ """Get the fabric ID from the object. Only used to validate cached value from property."""
self.CheckIsActive()
fabricid = c_uint64(0)
@@ -629,7 +667,8 @@
else:
raise self._ChipStack.ErrorToException(res)
- def GetNodeId(self):
+ def GetNodeIdInternal(self) -> int:
+ """Get the node ID from the object. Only used to validate cached value from property."""
self.CheckIsActive()
nodeid = c_uint64(0)
diff --git a/src/controller/python/chip/FabricAdmin.py b/src/controller/python/chip/FabricAdmin.py
index 4af0e38..d441758 100644
--- a/src/controller/python/chip/FabricAdmin.py
+++ b/src/controller/python/chip/FabricAdmin.py
@@ -23,7 +23,6 @@
from typing import *
from ctypes import *
from rich.pretty import pprint
-import ipdb
import json
import logging
import builtins
@@ -102,7 +101,7 @@
raise ValueError(
f"Invalid VendorID ({vendorId}) provided!")
- self.vendorId = vendorId
+ self._vendorId = vendorId
self._fabricId = fabricId
if (adminIndex is None):
@@ -160,7 +159,7 @@
f"Allocating new controller with FabricId: 0x{self._fabricId:016X}, NodeId: 0x{nodeId:016X}")
controller = ChipDeviceCtrl.ChipDeviceController(
- self.closure, self._fabricId, nodeId, self.vendorId, paaTrustStorePath, useTestCommissioner)
+ self.closure, self._fabricId, nodeId, self.vendorId, paaTrustStorePath, useTestCommissioner, fabricAdmin=self)
return controller
def ShutdownAll():
@@ -200,3 +199,15 @@
def __del__(self):
self.Shutdown(False)
+
+ @property
+ def vendorId(self) -> int:
+ return self._vendorId
+
+ @property
+ def fabricId(self) -> int:
+ return self._fabricId
+
+ @property
+ def adminIndex(self) -> int:
+ return self._adminIndex
diff --git a/src/controller/python/chip/clusters/Attribute.py b/src/controller/python/chip/clusters/Attribute.py
index db29b86..fc2ec8e 100644
--- a/src/controller/python/chip/clusters/Attribute.py
+++ b/src/controller/python/chip/clusters/Attribute.py
@@ -465,7 +465,7 @@
class SubscriptionTransaction:
- def __init__(self, transaction: 'AsyncReadTransaction', subscriptionId, devCtrl):
+ def __init__(self, transaction: AsyncReadTransaction, subscriptionId, devCtrl):
self._onResubscriptionAttemptedCb = DefaultResubscriptionAttemptedCallback
self._onAttributeChangeCb = DefaultAttributeChangeCallback
self._onEventChangeCb = DefaultEventChangeCallback
@@ -760,9 +760,9 @@
if not self._future.done():
if self._resultError:
if self._subscription_handler:
- self._subscription_handler.OnErrorCb(chipError, self._subscription_handler)
+ self._subscription_handler.OnErrorCb(self._resultError, self._subscription_handler)
else:
- self._future.set_exception(chip.exceptions.ChipStackError(chipError))
+ self._future.set_exception(chip.exceptions.ChipStackError(self._resultError))
else:
self._future.set_result(AsyncReadTransaction.ReadResponse(
attributes=self._cache.attributeCache, events=self._events))
diff --git a/src/controller/python/chip/storage/__init__.py b/src/controller/python/chip/storage/__init__.py
index 521fd15..362abda 100644
--- a/src/controller/python/chip/storage/__init__.py
+++ b/src/controller/python/chip/storage/__init__.py
@@ -23,7 +23,6 @@
from typing import *
from ctypes import *
from rich.pretty import pprint
-import ipdb
import json
import logging
import base64
diff --git a/src/controller/python/chip/utils/CommissioningBuildingBlocks.py b/src/controller/python/chip/utils/CommissioningBuildingBlocks.py
index 69e3082..ae4da4a 100644
--- a/src/controller/python/chip/utils/CommissioningBuildingBlocks.py
+++ b/src/controller/python/chip/utils/CommissioningBuildingBlocks.py
@@ -66,7 +66,7 @@
# Step 1: Wipe the subject from all existing ACLs.
for acl in currentAcls:
if (acl.subjects != NullValue):
- acl.subjects = [subject for subject in acl.subjects if subject != grantedCtrl.GetNodeId()]
+ acl.subjects = [subject for subject in acl.subjects if subject != grantedCtrl.nodeId]
if (privilege):
addedPrivilege = False
@@ -75,8 +75,8 @@
# the existing privilege in that entry matches our desired privilege.
for acl in currentAcls:
if acl.privilege == privilege:
- if grantedCtrl.GetNodeId() not in acl.subjects:
- acl.subjects.append(grantedCtrl.GetNodeId())
+ if grantedCtrl.nodeId not in acl.subjects:
+ acl.subjects.append(grantedCtrl.nodeId)
addedPrivilege = True
# Step 3: If there isn't an existing entry to add to, make a new one.
@@ -86,7 +86,7 @@
f"Cannot add another ACL entry to grant privilege to existing count of {currentAcls} ACLs -- will exceed minimas!")
currentAcls.append(Clusters.AccessControl.Structs.AccessControlEntry(privilege=privilege, authMode=Clusters.AccessControl.Enums.AuthMode.kCase,
- subjects=[grantedCtrl.GetNodeId()]))
+ subjects=[grantedCtrl.nodeId]))
# Step 4: Prune ACLs which have empty subjects.
currentAcls = [acl for acl in currentAcls if acl.subjects != NullValue and len(acl.subjects) != 0]
@@ -115,24 +115,21 @@
async def AddNOCForNewFabricFromExisting(commissionerDevCtrl, newFabricDevCtrl, existingNodeId, newNodeId):
- ''' Perform sequence to commission new frabric using existing commissioned fabric.
+ ''' Perform sequence to commission new fabric using existing commissioned fabric.
Args:
commissionerDevCtrl (ChipDeviceController): Already commissioned device controller used
to commission a new fabric on `newFabricDevCtrl`.
newFabricDevCtrl (ChipDeviceController): New device controller which is used for the new
fabric we are establishing.
- existingNodeId (int): Node ID of the server we are establishing a CASE session on the
- existing fabric that we will used to perform AddNOC.
- newNodeId (int): Node ID that we would like to server to used on the new fabric being
- added.
+ existingNodeId (int): Node ID of the target where an AddNOC needs to be done for a new fabric.
+ newNodeId (int): Node ID to use for the target node on the new fabric.
Return:
bool: True if successful, False otherwise.
'''
-
- resp = await commissionerDevCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(60), timedRequestTimeoutMs=1000)
+ resp = await commissionerDevCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(60))
if resp.errorCode is not generalCommissioning.Enums.CommissioningError.kOk:
return False
@@ -141,20 +138,20 @@
chainForAddNOC = newFabricDevCtrl.IssueNOCChain(csrForAddNOC, newNodeId)
if chainForAddNOC.rcacBytes is None or chainForAddNOC.icacBytes is None or chainForAddNOC.nocBytes is None or chainForAddNOC.ipkBytes is None:
# Expiring the failsafe timer in an attempt to clean up.
- await commissionerDevCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(0), timedRequestTimeoutMs=1000)
+ await commissionerDevCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(0))
return False
await commissionerDevCtrl.SendCommand(existingNodeId, 0, opCreds.Commands.AddTrustedRootCertificate(chainForAddNOC.rcacBytes))
- resp = await commissionerDevCtrl.SendCommand(existingNodeId, 0, opCreds.Commands.AddNOC(chainForAddNOC.nocBytes, chainForAddNOC.icacBytes, chainForAddNOC.ipkBytes, newFabricDevCtrl.GetNodeId(), 0xFFF1))
+ resp = await commissionerDevCtrl.SendCommand(existingNodeId, 0, opCreds.Commands.AddNOC(chainForAddNOC.nocBytes, chainForAddNOC.icacBytes, chainForAddNOC.ipkBytes, newFabricDevCtrl.nodeId, 0xFFF1))
if resp.statusCode is not opCreds.Enums.OperationalCertStatus.kSuccess:
# Expiring the failsafe timer in an attempt to clean up.
- await commissionerDevCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(0), timedRequestTimeoutMs=1000)
+ await commissionerDevCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(0))
return False
resp = await newFabricDevCtrl.SendCommand(newNodeId, 0, generalCommissioning.Commands.CommissioningComplete())
if resp.errorCode is not generalCommissioning.Enums.CommissioningError.kOk:
# Expiring the failsafe timer in an attempt to clean up.
- await commissionerDevCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(0), timedRequestTimeoutMs=1000)
+ await commissionerDevCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(0))
return False
if not await _IsNodeInFabricList(newFabricDevCtrl, newNodeId):
@@ -179,20 +176,20 @@
bool: True if successful, False otherwise.
"""
- resp = await devCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(600), timedRequestTimeoutMs=1000)
+ resp = await devCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(600))
if resp.errorCode is not generalCommissioning.Enums.CommissioningError.kOk:
return False
csrForUpdateNOC = await devCtrl.SendCommand(
existingNodeId, 0, opCreds.Commands.CSRRequest(CSRNonce=os.urandom(32), isForUpdateNOC=True))
chainForUpdateNOC = devCtrl.IssueNOCChain(csrForUpdateNOC, newNodeId)
if chainForUpdateNOC.rcacBytes is None or chainForUpdateNOC.icacBytes is None or chainForUpdateNOC.nocBytes is None or chainForUpdateNOC.ipkBytes is None:
- await devCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(0), timedRequestTimeoutMs=1000)
+ await devCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(0))
return False
resp = await devCtrl.SendCommand(existingNodeId, 0, opCreds.Commands.UpdateNOC(chainForUpdateNOC.nocBytes, chainForUpdateNOC.icacBytes))
if resp.statusCode is not opCreds.Enums.OperationalCertStatus.kSuccess:
# Expiring the failsafe timer in an attempt to clean up.
- await devCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(0), timedRequestTimeoutMs=1000)
+ await devCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(0))
return False
# Forget our session since the peer deleted it
@@ -201,7 +198,7 @@
resp = await devCtrl.SendCommand(newNodeId, 0, generalCommissioning.Commands.CommissioningComplete())
if resp.errorCode is not generalCommissioning.Enums.CommissioningError.kOk:
# Expiring the failsafe timer in an attempt to clean up.
- await devCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(0), timedRequestTimeoutMs=1000)
+ await devCtrl.SendCommand(existingNodeId, 0, generalCommissioning.Commands.ArmFailSafe(0))
return False
if not await _IsNodeInFabricList(devCtrl, newNodeId):
diff --git a/src/python_testing/TC_SC_3_6.py b/src/python_testing/TC_SC_3_6.py
new file mode 100644
index 0000000..ede817f
--- /dev/null
+++ b/src/python_testing/TC_SC_3_6.py
@@ -0,0 +1,247 @@
+#
+# Copyright (c) 2022 Project CHIP Authors
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from matter_testing_support import MatterBaseTest, default_matter_test_main, async_test_body
+import chip.clusters as Clusters
+import chip.FabricAdmin
+import logging
+from mobly import asserts
+from chip.utils import CommissioningBuildingBlocks
+from chip.clusters.Attribute import TypedAttributePath, SubscriptionTransaction
+import queue
+import asyncio
+from threading import Event
+import time
+
+# TODO: Overall, we need to add validation that session IDs have not changed throughout to be agnostic
+# to some internal behavior assumptions of the SDK we are making relative to the write to
+# the trigger the subscriptions not re-opening a new CASE session
+#
+
+
+class AttributeChangeAccumulator:
+ def __init__(self, name: str, expected_attribute: Clusters.ClusterAttributeDescriptor, output: queue.Queue):
+ self._name = name
+ self._output = output
+ self._expected_attribute = expected_attribute
+
+ def __call__(self, path: TypedAttributePath, transaction: SubscriptionTransaction):
+ if path.AttributeType == self._expected_attribute:
+ data = transaction.GetAttribute(path)
+
+ value = {
+ 'name': self._name,
+ 'endpoint': path.Path.EndpointId,
+ 'attribute': path.AttributeType,
+ 'value': data
+ }
+ logging.info("Got subscription report on client %s for %s: %s" % (self.name, path.AttributeType, data))
+ self._output.put(value)
+
+ @property
+ def name(self) -> str:
+ return self._name
+
+
+class ResubscriptionCatcher:
+ def __init__(self, name):
+ self._name = name
+ self._got_resubscription_event = Event()
+
+ def __call__(self, transaction: SubscriptionTransaction, terminationError, nextResubscribeIntervalMsec):
+ self._got_resubscription_event.set()
+ logging.info("Got resubscription on client %s" % self.name)
+
+ @property
+ def name(self) -> str:
+ return self._name
+
+ @property
+ def caught_resubscription(self) -> bool:
+ return self._got_resubscription_event.is_set()
+
+
+class TC_SC_3_6(MatterBaseTest):
+ def setup_class(self):
+ self._subscriptions = []
+
+ def teardown_class(self):
+ logging.info("Teardown: shutting down all subscription to avoid racy callbacks")
+ for subscription in self._subscriptions:
+ subscription.Shutdown()
+
+ @async_test_body
+ async def test_TC_SC_3_6(self):
+ dev_ctrl = self.default_controller
+
+ # Get overrides for debugging the test
+ num_fabrics_to_commission = self.user_params.get("num_fabrics_to_commission", 5)
+ num_controllers_per_fabric = self.user_params.get("num_controllers_per_fabric", 3)
+ # Immediate reporting
+ min_report_interval_sec = self.user_params.get("min_report_interval_sec", 0)
+ # 10 minutes max reporting interval --> We don't care about keep-alives per-se and
+ # want to avoid resubscriptions
+ max_report_interval_sec = self.user_params.get("max_report_interval_sec", 10 * 60)
+ # Time to wait after changing NodeLabel for subscriptions to all hit. This is dependant
+ # on MRP params of subscriber and on actual min_report_interval.
+ # TODO: Determine the correct max value depending on target. Test plan doesn't say!
+ timeout_delay_sec = self.user_params.get("timeout_delay_sec", max_report_interval_sec * 2)
+
+ BEFORE_LABEL = "Before Subscriptions"
+ AFTER_LABEL = "After Subscriptions"
+
+ # Generate list of all clients names
+ all_names = []
+ for fabric_idx in range(num_fabrics_to_commission):
+ for controller_idx in range(num_controllers_per_fabric):
+ all_names.append("RD%d%s" % (fabric_idx + 1, chr(ord('A') + controller_idx)))
+ logging.info("Client names that will be used: %s" % all_names)
+ client_list = []
+
+ logging.info("Pre-conditions: validate CapabilityMinima.CaseSessionsPerFabric >= 3")
+
+ capability_minima = await self.read_single_attribute(dev_ctrl, node_id=self.dut_node_id, endpoint=0, attribute=Clusters.Basic.Attributes.CapabilityMinima)
+ asserts.assert_greater_equal(capability_minima.caseSessionsPerFabric, 3)
+
+ logging.info("Pre-conditions: use existing fabric to configure new fabrics so that total is %d fabrics" %
+ num_fabrics_to_commission)
+
+ # Generate Node IDs for subsequent controllers start at 200, follow 200, 300, ...
+ node_ids = [200 + (i * 100) for i in range(num_controllers_per_fabric - 1)]
+
+ # Prepare clients for first fabric, that includes the default controller
+ dev_ctrl.name = all_names.pop(0)
+ client_list.append(dev_ctrl)
+
+ if num_controllers_per_fabric > 1:
+ new_controllers = await CommissioningBuildingBlocks.CreateControllersOnFabric(fabricAdmin=dev_ctrl.fabricAdmin, adminDevCtrl=dev_ctrl, controllerNodeIds=node_ids, privilege=Clusters.AccessControl.Enums.Privilege.kAdminister, targetNodeId=self.dut_node_id)
+ for controller in new_controllers:
+ controller.name = all_names.pop(0)
+ client_list.extend(new_controllers)
+
+ # Prepare clients for subsequent fabrics
+ for i in range(num_fabrics_to_commission - 1):
+ admin_index = 2 + i
+ logging.info("Commissioning fabric %d/%d" % (admin_index, num_fabrics_to_commission))
+ new_fabric_admin = chip.FabricAdmin.FabricAdmin(vendorId=0xFFF1, adminIndex=admin_index)
+ new_admin_ctrl = new_fabric_admin.NewController(nodeId=dev_ctrl.nodeId)
+ new_admin_ctrl.name = all_names.pop(0)
+ client_list.append(new_admin_ctrl)
+ await CommissioningBuildingBlocks.AddNOCForNewFabricFromExisting(commissionerDevCtrl=dev_ctrl, newFabricDevCtrl=new_admin_ctrl, existingNodeId=self.dut_node_id, newNodeId=self.dut_node_id)
+
+ if num_controllers_per_fabric > 1:
+ new_controllers = await CommissioningBuildingBlocks.CreateControllersOnFabric(fabricAdmin=new_fabric_admin, adminDevCtrl=new_admin_ctrl,
+ controllerNodeIds=node_ids, privilege=Clusters.AccessControl.Enums.Privilege.kAdminister, targetNodeId=self.dut_node_id)
+ for controller in new_controllers:
+ controller.name = all_names.pop(0)
+
+ client_list.extend(new_controllers)
+
+ asserts.assert_equal(len(client_list), num_fabrics_to_commission *
+ num_controllers_per_fabric, "Must have the right number of clients")
+
+ # Before subscribing, set the NodeLabel to "Before Subscriptions"
+ logging.info("Pre-conditions: writing initial value of NodeLabel, so that we can control for change of attribute detection")
+ await client_list[0].WriteAttribute(self.dut_node_id, [(0, Clusters.Basic.Attributes.NodeLabel(value=BEFORE_LABEL))])
+
+ # Subscribe with all clients to NodeLabel attribute
+ sub_handlers = []
+ resub_catchers = []
+ output_queue = queue.Queue()
+
+ logging.info("Step 1 (first part): Establish subscription with all %d clients" % len(client_list))
+ for sub_idx, client in enumerate(client_list):
+ logging.info("Establishing subscription %d/%d from controller node %s" % (sub_idx + 1, len(client_list), client.name))
+
+ sub = await client.ReadAttribute(nodeid=self.dut_node_id, attributes=[(0, Clusters.Basic.Attributes.NodeLabel)],
+ reportInterval=(min_report_interval_sec, max_report_interval_sec), keepSubscriptions=False)
+ self._subscriptions.append(sub)
+
+ attribute_handler = AttributeChangeAccumulator(
+ name=client.name, expected_attribute=Clusters.Basic.Attributes.NodeLabel, output=output_queue)
+ sub.SetAttributeUpdateCallback(attribute_handler)
+ sub_handlers.append(attribute_handler)
+
+ # TODO: Replace resubscription catcher with API to disable re-subscription on failure
+ resub_catcher = ResubscriptionCatcher(name=client.name)
+ sub.SetResubscriptionAttemptedCallback(resub_catcher)
+ resub_catchers.append(resub_catcher)
+
+ asserts.assert_equal(len(self._subscriptions), len(client_list), "Must have the right number of subscriptions")
+
+ # Trigger a change on NodeLabel
+ logging.info(
+ "Step 1 (second part): Change attribute with one client, await all attributes changed within time")
+ await asyncio.sleep(1)
+ await client_list[0].WriteAttribute(self.dut_node_id, [(0, Clusters.Basic.Attributes.NodeLabel(value=AFTER_LABEL))])
+
+ all_changes = {client.name: False for client in client_list}
+
+ # Await a stabilization delay in increments to let the event loops run
+ start_time = time.time()
+ elapsed = 0
+ time_remaining = timeout_delay_sec
+
+ while time_remaining > 0:
+ try:
+ item = output_queue.get(block=True, timeout=time_remaining)
+ client_name, endpoint, attribute, value = item['name'], item['endpoint'], item['attribute'], item['value']
+
+ # Record arrival of an expected subscription change when seen
+ if endpoint == 0 and attribute == Clusters.Basic.Attributes.NodeLabel and value == AFTER_LABEL:
+ if not all_changes[client_name]:
+ logging.info("Got expected attribute change for client %s" % client_name)
+ all_changes[client_name] = True
+
+ # We are done waiting when we have accumulated all results
+ if all(all_changes.values()):
+ logging.info("All clients have reported, done waiting.")
+ break
+ except queue.Empty:
+ # No error, we update timeouts and keep going
+ pass
+
+ elapsed = time.time() - start_time
+ time_remaining = timeout_delay_sec - elapsed
+
+ logging.info("Validation of results")
+ failed = False
+
+ for catcher in resub_catchers:
+ if catcher.caught_resubscription:
+ logging.error("Client %s saw a resubscription" % catcher.name)
+ failed = True
+ else:
+ logging.info("Client %s correctly did not see a resubscription" % catcher.name)
+
+ all_reports_gotten = all(all_changes.values())
+ if not all_reports_gotten:
+ logging.error("Missing reports from the following clients: %s" %
+ ", ".join([name for name, value in all_changes.items() if value is False]))
+ failed = True
+ else:
+ logging.info("Got successful reports from all clients, meaning all concurrent CASE sessions worked")
+
+ # Determine final result
+ if failed:
+ asserts.fail("Failed test !")
+
+ # Pass is implicit if not failed
+
+
+if __name__ == "__main__":
+ default_matter_test_main()
diff --git a/src/python_testing/hello_test.py b/src/python_testing/hello_test.py
index 51a380b..d6bde7d 100644
--- a/src/python_testing/hello_test.py
+++ b/src/python_testing/hello_test.py
@@ -1,3 +1,20 @@
+#
+# Copyright (c) 2022 Project CHIP Authors
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
from matter_testing_support import MatterBaseTest, default_matter_test_main, async_test_body
from chip.interaction_model import Status
import chip.clusters as Clusters
diff --git a/src/python_testing/matter_testing_support.py b/src/python_testing/matter_testing_support.py
index 660f1c8..9ac5f8e 100644
--- a/src/python_testing/matter_testing_support.py
+++ b/src/python_testing/matter_testing_support.py
@@ -121,6 +121,7 @@
storage_path: pathlib.Path = None
logs_path: pathlib.Path = None
paa_trust_store_path: pathlib.Path = None
+ ble_interface_id: int = None
admin_vendor_id: int = _DEFAULT_ADMIN_VENDOR_ID
global_test_params: dict = field(default_factory=dict)
@@ -130,6 +131,7 @@
commissioning_method: str = None
discriminator: int = None
setup_passcode: int = None
+ commissionee_ip_address_just_for_testing: str = None
qr_code_content: str = None
manual_code: str = None
@@ -158,7 +160,7 @@
self._fabric_admins = []
if not hasattr(builtins, "chipStack"):
- chip.native.Init()
+ chip.native.Init(bluetoothAdapter=config.ble_interface_id)
if config.storage_path is None:
raise ValueError("Must have configured a MatterTestConfig.storage_path")
self._init_stack(already_initialized=False, persistentStoragePath=config.storage_path)
@@ -468,6 +470,11 @@
print("error: missing --thread-dataset-hex <DATASET_HEX> for --commissioning-method ble-thread!")
return False
config.thread_operational_dataset = args.thread_dataset_hex
+ elif config.commissioning_method == "on-network-ip":
+ if args.ip_addr is None:
+ print("error: missing --ip-addr <IP_ADDRESS> for --commissioning-method on-network-ip")
+ return False
+ config.commissionee_ip_address_just_for_testing = args.ip_addr
return True
@@ -482,6 +489,7 @@
config.storage_path = pathlib.Path(_DEFAULT_STORAGE_PATH) if args.storage_path is None else args.storage_path
config.logs_path = pathlib.Path(_DEFAULT_LOG_PATH) if args.logs_path is None else args.logs_path
config.paa_trust_store_path = args.paa_trust_store_path
+ config.ble_interface_id = args.ble_interface_id
config.controller_node_id = args.controller_node_id
@@ -521,6 +529,8 @@
paa_path_default = get_default_paa_trust_store(pathlib.Path.cwd())
basic_group.add_argument('--paa-trust-store-path', action="store", type=pathlib.Path, metavar="PATH", default=paa_path_default,
help="PAA trust store path (default: %s)" % str(paa_path_default))
+ basic_group.add_argument('--ble-interface-id', action="store", type=int,
+ metavar="INTERFACE_ID", help="ID of BLE adapter (from hciconfig)")
basic_group.add_argument('-N', '--controller-node-id', type=int_decimal_or_hex,
metavar='NODE_ID',
default=_DEFAULT_CONTROLLER_NODE_ID,
@@ -533,7 +543,7 @@
commission_group.add_argument('-m', '--commissioning-method', type=str,
metavar='METHOD_NAME',
- choices=["on-network", "ble-wifi", "ble-thread"],
+ choices=["on-network", "ble-wifi", "ble-thread", "on-network-ip"],
help='Name of commissioning method to use')
commission_group.add_argument('-d', '--discriminator', type=int_decimal_or_hex,
metavar='LONG_DISCRIMINATOR',
@@ -541,6 +551,9 @@
commission_group.add_argument('-p', '--passcode', type=int_decimal_or_hex,
metavar='PASSCODE',
help='PAKE passcode to use')
+ commission_group.add_argument('-i', '--ip-addr', type=str,
+ metavar='RAW_IP_ADDRESS',
+ help='IP address to use (only for method "on-network-ip". ONLY FOR LOCAL TESTING!')
commission_group.add_argument('--wifi-ssid', type=str,
metavar='SSID',
@@ -634,6 +647,9 @@
return dev_ctrl.CommissionWiFi(conf.discriminator, conf.setup_passcode, conf.dut_node_id, conf.wifi_ssid, conf.wifi_passphrase)
elif conf.commissioning_method == "ble-thread":
return dev_ctrl.CommissionThread(conf.discriminator, conf.setup_passcode, conf.dut_node_id, conf.thread_operational_dataset)
+ elif conf.commissioning_method == "on-network-ip":
+ logging.warning("==== USING A DIRECT IP COMMISSIONING METHOD NOT SUPPORTED IN THE LONG TERM ====")
+ return dev_ctrl.CommissionIP(ipaddr=conf.commissionee_ip_address_just_for_testing, setupPinCode=conf.setup_passcode, nodeid=conf.dut_node_id)
else:
raise ValueError("Invalid commissioning method %s!" % conf.commissioning_method)