| /* |
| * |
| * Copyright (c) 2025 Project CHIP Authors |
| * All rights reserved. |
| * |
| * Licensed under the Apache License, Version 2.0 (the "License"); |
| * you may not use this file except in compliance with the License. |
| * You may obtain a copy of the License at |
| * |
| * http://www.apache.org/licenses/LICENSE-2.0 |
| * |
| * Unless required by applicable law or agreed to in writing, software |
| * distributed under the License is distributed on an "AS IS" BASIS, |
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| * See the License for the specific language governing permissions and |
| * limitations under the License. |
| */ |
| #pragma once |
| |
| #include <app/StatusResponse.h> |
| #include <app/reporting/reporting.h> |
| #include <app/server-cluster/DefaultServerCluster.h> |
| #include <clusters/CameraAvStreamManagement/Attributes.h> |
| #include <clusters/CameraAvStreamManagement/Commands.h> |
| |
| #include <app/SafeAttributePersistenceProvider.h> |
| #include <lib/core/CHIPPersistentStorageDelegate.h> |
| #include <lib/support/TypeTraits.h> |
| #include <optional> |
| #include <protocols/interaction_model/StatusCode.h> |
| #include <vector> |
| |
| namespace chip { |
| namespace app { |
| namespace Clusters { |
| namespace CameraAvStreamManagement { |
| |
| using VideoStreamStruct = Structs::VideoStreamStruct::Type; |
| using AudioStreamStruct = Structs::AudioStreamStruct::Type; |
| using SnapshotStreamStruct = Structs::SnapshotStreamStruct::Type; |
| using AudioCapabilitiesStruct = Structs::AudioCapabilitiesStruct::Type; |
| using VideoSensorParamsStruct = Structs::VideoSensorParamsStruct::Type; |
| using SnapshotCapabilitiesStruct = Structs::SnapshotCapabilitiesStruct::Type; |
| using VideoResolutionStruct = Structs::VideoResolutionStruct::Type; |
| using RateDistortionTradeOffStruct = Structs::RateDistortionTradeOffPointsStruct::Type; |
| using StreamUsageEnum = Globals::StreamUsageEnum; |
| |
| constexpr uint8_t kMaxSpeakerLevel = 254; |
| constexpr uint8_t kMaxMicrophoneLevel = 254; |
| constexpr uint16_t kMaxImageRotationDegrees = 359; |
| constexpr uint8_t kMaxChannelCount = 8; |
| constexpr uint8_t kMaxImageQualityMetric = 100; |
| constexpr uint16_t kMaxKeyFrameIntervalMaxValue = 65500; |
| // Conservative room for other fields (resolution + codec) in |
| // capture snapshot response. TODO: Make a tighter bound. |
| constexpr size_t kMaxSnapshotImageSize = kMaxLargeSecureSduLengthBytes - 100; |
| |
| constexpr size_t kViewportStructMaxSerializedSize = |
| TLV::EstimateStructOverhead(sizeof(uint16_t), sizeof(uint16_t), sizeof(uint16_t), sizeof(uint16_t)); |
| |
| // The number of possible values of StreamUsageEnum. |
| constexpr size_t kNumOfStreamUsageTypes = 4; |
| |
| // StreamUsageEnum + Anonymous tag (1 byte). |
| // Assumes min-size encoding (1 byte) for the integer. |
| constexpr size_t kStreamUsageTlvSize = sizeof(Globals::StreamUsageEnum) + 1; |
| |
| // 1 control byte + end-of-array marker |
| constexpr size_t kArrayTlvOverhead = 2; |
| |
| constexpr size_t kStreamUsagePrioritiesTlvSize = kArrayTlvOverhead + kStreamUsageTlvSize * kNumOfStreamUsageTypes; |
| |
| // Calculate VideoResolutionStruct TLV encoding size |
| constexpr size_t kVideoResolutionStructMaxSerializedSize = TLV::EstimateStructOverhead(sizeof(uint16_t), sizeof(uint16_t)); |
| |
| // Calculate VideoStreamStruct TLV encoding size |
| constexpr size_t kMaxOneVideoStreamStructSerializedSize = |
| TLV::EstimateStructOverhead(sizeof(uint16_t), // videoStreamID |
| sizeof(Globals::StreamUsageEnum), // streamUsage |
| sizeof(VideoCodecEnum), // videoCodec |
| sizeof(uint16_t), // minFrameRate |
| sizeof(uint16_t), // maxFrameRate |
| kVideoResolutionStructMaxSerializedSize, // minResolution |
| kVideoResolutionStructMaxSerializedSize, // maxResolution |
| sizeof(uint32_t), // minBitRate |
| sizeof(uint32_t), // maxBitRate |
| sizeof(uint16_t), // keyFrameInterval |
| sizeof(bool), // watermarkEnabled (Optional<bool>) |
| sizeof(bool), // OSDEnabled (Optional<bool>) |
| sizeof(uint8_t) // referenceCount |
| ); |
| constexpr size_t kMaxAllocatedVideoStreamsSerializedSize = |
| kArrayTlvOverhead + (CHIP_CONFIG_MAX_NUM_CAMERA_VIDEO_STREAMS * kMaxOneVideoStreamStructSerializedSize); |
| |
| // Calculate SnapshotStreamStruct TLV encoding size |
| constexpr size_t kMaxOneSnapshotStructSerializedSize = |
| TLV::EstimateStructOverhead(sizeof(uint16_t), // snapshotStreamID |
| sizeof(ImageCodecEnum), // imageCodec |
| sizeof(uint16_t), // frameRate |
| kVideoResolutionStructMaxSerializedSize, // minResolution |
| kVideoResolutionStructMaxSerializedSize, // maxResolution |
| sizeof(uint8_t), // quality |
| sizeof(uint8_t), // referenceCount |
| sizeof(bool), // encodedPixels |
| sizeof(bool), // hardwareEncoder |
| sizeof(bool), // watermarkEnabled (Optional<bool>) |
| sizeof(bool) // OSDEnabled (Optional<bool>) |
| ); |
| // Max size for the TLV-encoded array of SnapshotStreamStruct |
| constexpr size_t kMaxAllocatedSnapshotStreamsSerializedSize = |
| kArrayTlvOverhead + (CHIP_CONFIG_MAX_NUM_CAMERA_SNAPSHOT_STREAMS * kMaxOneSnapshotStructSerializedSize); |
| |
| // Calculate AudioStreamStruct TLV encoding size |
| constexpr size_t kMaxOneAudioStreamStructSerializedSize = |
| TLV::EstimateStructOverhead(sizeof(uint16_t), // audioStreamID |
| sizeof(Globals::StreamUsageEnum), // streamUsage |
| sizeof(AudioCodecEnum), // audioCodec |
| sizeof(uint8_t), // channelCount |
| sizeof(uint32_t), // sampleRate |
| sizeof(uint32_t), // bitRate |
| sizeof(uint8_t), // bitDepth |
| sizeof(uint8_t) // referenceCount |
| ); |
| // Max size for the TLV-encoded array of AudioStreamStruct |
| constexpr size_t kMaxAllocatedAudioStreamsSerializedSize = |
| kArrayTlvOverhead + (CHIP_CONFIG_MAX_NUM_CAMERA_AUDIO_STREAMS * kMaxOneAudioStreamStructSerializedSize); |
| |
| enum class StreamAllocationAction |
| { |
| kNewAllocation, // Fresh stream allocation - always start |
| kModification, // Existing stream with parameter changes - restart if active |
| kReuse // Reusing existing stream without changes - no action needed |
| }; |
| |
| enum class StreamType |
| { |
| kAudio, |
| kVideo, |
| kSnapshot |
| }; |
| |
| // Forward declaration for the StreamTraits helper struct. |
| template <AttributeId TAttributeId> |
| struct StreamTraits; |
| |
| class CameraAVStreamManagementCluster; |
| |
| // ImageSnapshot response data for a CaptureSnapshot command. |
| struct ImageSnapshot |
| { |
| std::vector<uint8_t> data; // Buffer to hold the image data |
| VideoResolutionStruct imageRes; // Image resolution |
| ImageCodecEnum imageCodec; // Image codec used |
| }; |
| |
| /** @brief |
| * Defines interfaces for implementing application-specific logic for various aspects of the CameraAvStreamManagement Cluster. |
| * Specifically, it defines interfaces for the command handling and loading of the allocated streams. |
| */ |
| class CameraAVStreamManagementDelegate |
| { |
| public: |
| CameraAVStreamManagementDelegate() = default; |
| |
| virtual ~CameraAVStreamManagementDelegate() = default; |
| |
| /** |
| * @brief Handle Command Delegate for Video stream allocation with the provided parameter list. |
| * |
| * @param[in] allocateArgs Structure with parameters for video stream allocation. |
| * The videoStreamID and referenceCount fields in the struct |
| * must be ignored by the callee. |
| * |
| * @param[out] outStreamID Indicates the ID of the allocated Video Stream. |
| * |
| * @return Success if the allocation is successful and a VideoStreamID was |
| * produced; otherwise, the command SHALL be rejected with an appropriate |
| * error. |
| */ |
| virtual Protocols::InteractionModel::Status VideoStreamAllocate(const VideoStreamStruct & allocateArgs, |
| uint16_t & outStreamID) = 0; |
| |
| /** |
| * @brief Called after the server has finalized video stream allocation and narrowed parameters. |
| * This is where the actual video stream should be started using the final allocated parameters. |
| * |
| * @param allocatedStream The finalized video stream with narrowed parameters from the server. |
| * @param action Action indicating how to handle the stream: new allocation, modification, or reuse. |
| */ |
| virtual void OnVideoStreamAllocated(const VideoStreamStruct & allocatedStream, StreamAllocationAction action) = 0; |
| |
| /** |
| * @brief Handle Command Delegate for Video stream modification. |
| * |
| * @param streamID Indicates the streamID of the video stream to modify. |
| * |
| * @param waterMarkEnabled Indicates whether a watermark can be applied on the video stream. |
| * Value defaults to false if feature unsupported. |
| * |
| * @param osdEnabled Indicates whether the on-screen display can be applied on the video stream. |
| * Value defaults to false if feature unsupported. |
| * |
| * @return Success if the stream modification is successful; otherwise, the command SHALL be rejected with an appropriate |
| * error. |
| */ |
| virtual Protocols::InteractionModel::Status VideoStreamModify(const uint16_t streamID, const Optional<bool> waterMarkEnabled, |
| const Optional<bool> osdEnabled) = 0; |
| |
| /** |
| * @brief Handle Command Delegate for Video stream deallocation for the |
| * provided streamID. |
| * |
| * @param streamID Indicates the streamID to deallocate. |
| * |
| * @return Success Stream shall be de-allocated, if found. |
| * |
| */ |
| virtual Protocols::InteractionModel::Status VideoStreamDeallocate(const uint16_t streamID) = 0; |
| |
| /** |
| * @brief Handle Command Delegate for Audio stream allocation. |
| * |
| * @param[in] allocateArgs Structure with parameters for audio stream allocation. |
| * The audioStreamID and referenceCount fields in the struct |
| * must be ignored by the callee. |
| * |
| * @param[out] outStreamID Indicates the ID of the allocated Audio Stream. |
| * |
| * @return Success if the allocation is successful and an AudioStreamID was |
| * produced; otherwise, the command SHALL be rejected with an appropriate |
| * error. |
| */ |
| virtual Protocols::InteractionModel::Status AudioStreamAllocate(const AudioStreamStruct & allocateArgs, |
| uint16_t & outStreamID) = 0; |
| |
| /** |
| * @brief Handle Command Delegate for Audio stream deallocation. |
| * |
| * @param streamID Indicates the streamID to deallocate. |
| * |
| * @return Success Stream shall be de-allocated, if found. |
| */ |
| virtual Protocols::InteractionModel::Status AudioStreamDeallocate(const uint16_t streamID) = 0; |
| |
| struct SnapshotStreamAllocateArgs |
| { |
| ImageCodecEnum imageCodec; |
| uint16_t maxFrameRate; |
| Structs::VideoResolutionStruct::Type minResolution; |
| Structs::VideoResolutionStruct::Type maxResolution; |
| uint8_t quality; |
| bool encodedPixels; |
| bool hardwareEncoder; |
| Optional<bool> watermarkEnabled; |
| Optional<bool> OSDEnabled; |
| }; |
| |
| /** |
| * @brief Handle Command Delegate for Snapshot stream allocation. |
| * |
| * @param[in] allocateArgs Structure with parameters for snapshot stream allocation. |
| * The snapshotStreamID and referenceCount fields in the struct |
| * must be ignored by the callee. |
| * |
| * @param[out] outStreamID Indicates the ID of the allocated Audio Stream. |
| * |
| * |
| * @return Success if the allocation is successful and a SnapshotStreamID was |
| * produced; otherwise, the command SHALL be rejected with an appropriate |
| * error. |
| */ |
| virtual Protocols::InteractionModel::Status SnapshotStreamAllocate(const SnapshotStreamAllocateArgs & allocateArgs, |
| uint16_t & outStreamID) = 0; |
| |
| /** |
| * @brief Handle Command Delegate for Snapshot stream modification. |
| * |
| * @param streamID Indicates the streamID of the snapshot stream to modify. |
| * |
| * @param waterMarkEnabled Indicates whether a watermark can be applied on the snapshot stream. |
| * Value defaults to false if feature unsupported. |
| * |
| * @param osdEnabled Indicates whether the on-screen display can be applied on the snapshot stream. |
| * Value defaults to false if feature unsupported. |
| * |
| * @return Success if the stream modification is successful; otherwise, the command SHALL be rejected with an appropriate |
| * error. |
| */ |
| virtual Protocols::InteractionModel::Status SnapshotStreamModify(const uint16_t streamID, const Optional<bool> waterMarkEnabled, |
| const Optional<bool> osdEnabled) = 0; |
| |
| /** |
| * @brief Handle Command Delegate for Snapshot stream deallocation. |
| * |
| * @param streamID Indicates the streamID to deallocate. |
| * |
| * @return Success Stream shall be de-allocated, if found. |
| */ |
| virtual Protocols::InteractionModel::Status SnapshotStreamDeallocate(const uint16_t streamID) = 0; |
| |
| /** |
| * @brief Command Delegate for notifying change in StreamPriorities. |
| * |
| */ |
| virtual void OnStreamUsagePrioritiesChanged() = 0; |
| |
| /** |
| * @brief Delegate callback for notifying change in an attribute. |
| * |
| */ |
| virtual void OnAttributeChanged(AttributeId attributeId) = 0; |
| |
| /** |
| * @brief Handle Command Delegate for CaptureSnapshot. |
| * |
| * @param streamID Indicates the streamID representing the shapshot stream. |
| * |
| * @param videoResolution Indicates the preferred resolution of the snapshot image. |
| * |
| * @return Success if the processing of the Command is successful; otherwise, the command SHALL be rejected with an |
| * appropriate error. |
| */ |
| virtual Protocols::InteractionModel::Status CaptureSnapshot(const DataModel::Nullable<uint16_t> streamID, |
| const VideoResolutionStruct & resolution, |
| ImageSnapshot & outImageSnapshot) = 0; |
| |
| /** |
| * @brief Callback into the delegate once persistent attributes managed by |
| * the Cluster have been loaded from Storage. |
| */ |
| virtual CHIP_ERROR PersistentAttributesLoadedCallback() = 0; |
| |
| /** |
| * @brief Called by transports when they start using the corresponding audio and video streams. |
| * |
| */ |
| virtual CHIP_ERROR OnTransportAcquireAudioVideoStreams(uint16_t audioStreamID, uint16_t videoStreamID) = 0; |
| |
| /** |
| * @brief Called by transports when they release the corresponding audio and video streams. |
| * |
| */ |
| virtual CHIP_ERROR OnTransportReleaseAudioVideoStreams(uint16_t audioStreamID, uint16_t videoStreamID) = 0; |
| |
| /** |
| * @brief Provides read-only access to the list of currently allocated video streams. |
| * This allows other components (like PushAVStreamTransportManager) to query |
| * allocated stream parameters (e.g., for bandwidth calculation) without directly |
| * accessing the CameraAVStreamManagementCluster instance. |
| * |
| * @return A const reference to the vector of allocated video stream structures. |
| */ |
| virtual const std::vector<VideoStreamStruct> & GetAllocatedVideoStreams() const = 0; |
| |
| /** |
| * @brief Provides read-only access to the list of currently allocated audio streams. |
| * This allows other components (like PushAVStreamTransportManager) to query |
| * allocated stream parameters (e.g., for bandwidth calculation) without directly |
| * accessing the CameraAVStreamManagementCluster instance. |
| * |
| * @return A const reference to the vector of allocated audio stream structures. |
| */ |
| virtual const std::vector<AudioStreamStruct> & GetAllocatedAudioStreams() const = 0; |
| |
| private: |
| friend class CameraAVStreamManagementCluster; |
| |
| CameraAVStreamManagementCluster * mCameraAVStreamManagementCluster = nullptr; |
| |
| /** |
| * This method is used by the SDK to ensure the delegate points to the server instance it's associated with. |
| * When a server instance is created or destroyed, this method will be called to set and clear, respectively, |
| * the pointer to the server instance. |
| * |
| * @param aCameraAVStreamManagementCluster A pointer to the CameraAVStreamManagementCluster object related to this delegate |
| * object. |
| */ |
| void SetCameraAVStreamManagementCluster(CameraAVStreamManagementCluster * aCameraAVStreamManagementCluster) |
| { |
| mCameraAVStreamManagementCluster = aCameraAVStreamManagementCluster; |
| } |
| |
| protected: |
| CameraAVStreamManagementCluster * GetCameraAVStreamManagementCluster() const { return mCameraAVStreamManagementCluster; } |
| }; |
| |
| enum class OptionalAttribute : uint32_t |
| { |
| kHardPrivacyModeOn = 0x0001, |
| kNightVisionIllum = 0x0002, |
| kMicrophoneAGCEnabled = 0x0004, |
| kImageRotation = 0x0008, |
| kImageFlipHorizontal = 0x0010, |
| kImageFlipVertical = 0x0020, |
| kStatusLightEnabled = 0x0040, |
| kStatusLightBrightness = 0x0080, |
| }; |
| |
| class CameraAVStreamManagementCluster : public DefaultServerCluster |
| { |
| public: |
| /** |
| * @brief Creates a Camera AV Stream Management cluster instance. The Init() function needs to be called for this instance |
| * to be registered and called by the interaction model at the appropriate times. |
| * |
| * @param aDelegate A pointer to the delegate to be used by this server. |
| * Note: the caller must ensure that the delegate lives throughout the instance's |
| * lifetime. |
| * |
| * @param aEndpointId The endpoint on which this cluster exists. This must match the zap configuration. |
| * @param aFeatures The bitflags value that identifies which features are supported by this instance. |
| * @param aOptionalAttrs The bitflags value that identifies the optional attributes supported by this |
| * instance. |
| * @param aMaxConcurrentEncoders The maximum number of video encoders supported by camera. |
| * @param aMaxEncodedPixelRate The maximum data rate (encoded pixels/sec) supported by camera. |
| * @param aVideoSensorParams The set of video sensor parameters for the camera. |
| * @param aNightVisionUsesInfrared Indicates whether nightvision mode does or does not use infrared |
| * @param aMinViewPort Indicates minimum resolution (width/height) in pixels allowed for camera viewport. |
| * @param aRateDistortionTradeOffPoints Indicates the list of rate distortion trade-off points for supported hardware |
| * encoders. |
| * @param aMaxContentBufferSize The maximum size of the content buffer containing data for all streams, including |
| * pre-roll. |
| * @param aMicrophoneCapabilities Indicates the audio capabilities of the speaker in terms of the codec used, |
| * supported sample rates and the number of channels. |
| * @param aSpkrCapabilities Indicates the audio capabilities of the speaker in terms of the codec used, |
| * supported sample rates and the number of channels. |
| * @param aTwoWayTalkSupport Indicates the type of two-way talk support the device has, e.g., half-duplex, |
| * full-duplex, etc. |
| * @param aSnapshotCapabilities Indicates the set of supported snapshot capabilities by the device, e.g., the image |
| * codec, the resolution and the maximum frame rate. |
| * @param aMaxNetworkBandwidth Indicates the maximum network bandwidth (in bps) that the device would consume |
| * @param aSupportedStreamUsages Indicates the possible stream types available |
| * @param aStreamUsagePriorities Indicates the priority ranking of the available streams |
| * for the transmission of its media streams. |
| * |
| */ |
| CameraAVStreamManagementCluster(CameraAVStreamManagementDelegate & aDelegate, EndpointId aEndpointId, |
| const BitFlags<Feature> aFeatures, const BitFlags<OptionalAttribute> aOptionalAttrs, |
| uint8_t aMaxConcurrentEncoders, uint32_t aMaxEncodedPixelRate, |
| const VideoSensorParamsStruct & aVideoSensorParams, bool aNightVisionUsesInfrared, |
| const VideoResolutionStruct & aMinViewPort, |
| const std::vector<RateDistortionTradeOffStruct> & aRateDistortionTradeOffPoints, |
| uint32_t aMaxContentBufferSize, const AudioCapabilitiesStruct & aMicrophoneCapabilities, |
| const AudioCapabilitiesStruct & aSpkrCapabilities, TwoWayTalkSupportTypeEnum aTwoWayTalkSupport, |
| const std::vector<SnapshotCapabilitiesStruct> & aSnapshotCapabilities, |
| uint32_t aMaxNetworkBandwidth, |
| const std::vector<Globals::StreamUsageEnum> & aSupportedStreamUsages, |
| const std::vector<Globals::StreamUsageEnum> & aStreamUsagePriorities); |
| |
| ~CameraAVStreamManagementCluster() override; |
| |
| /** |
| * @brief Initialise the Camera AV Stream Management server instance. |
| * This function must be called after defining an CameraAVStreamManagementCluster class object. |
| * @return Returns an error if the given endpoint and cluster ID have not been enabled in zap or if the |
| * CommandHandler or AttributeHandler registration fails, else returns CHIP_NO_ERROR. |
| * This method also checks if the feature setting is valid, if invalid it will return CHIP_ERROR_INVALID_ARGUMENT. |
| */ |
| CHIP_ERROR Init(); |
| |
| // Server cluster implementation |
| DataModel::ActionReturnStatus ReadAttribute(const DataModel::ReadAttributeRequest & request, |
| AttributeValueEncoder & encoder) override; |
| |
| DataModel::ActionReturnStatus WriteAttribute(const DataModel::WriteAttributeRequest & request, |
| AttributeValueDecoder & decoder) override; |
| |
| std::optional<DataModel::ActionReturnStatus> InvokeCommand(const DataModel::InvokeRequest & request, |
| TLV::TLVReader & input_arguments, CommandHandler * handler) override; |
| |
| CHIP_ERROR Attributes(const ConcreteClusterPath & path, ReadOnlyBufferBuilder<DataModel::AttributeEntry> & builder) override; |
| |
| CHIP_ERROR AcceptedCommands(const ConcreteClusterPath & path, |
| ReadOnlyBufferBuilder<DataModel::AcceptedCommandEntry> & builder) override; |
| |
| CHIP_ERROR GeneratedCommands(const ConcreteClusterPath & path, ReadOnlyBufferBuilder<CommandId> & builder) override; |
| |
| bool HasFeature(Feature feature) const; |
| |
| bool SupportsOptAttr(OptionalAttribute aOptionalAttr) const; |
| |
| bool IsLocalVideoRecordingEnabled() const; |
| |
| // Attribute Setters |
| CHIP_ERROR SetCurrentFrameRate(uint16_t aCurrentFrameRate); |
| |
| CHIP_ERROR SetHDRModeEnabled(bool aHDRModeEnabled); |
| |
| CHIP_ERROR SetSoftRecordingPrivacyModeEnabled(bool aSoftRecordingPrivacyModeEnabled); |
| |
| CHIP_ERROR SetSoftLivestreamPrivacyModeEnabled(bool aSoftLivestreamPrivacyModeEnabled); |
| |
| CHIP_ERROR SetHardPrivacyModeOn(bool aHardPrivacyModeOn); |
| |
| CHIP_ERROR SetNightVisionUsesInfrared(bool aNightVisionUsesInfrared); |
| |
| CHIP_ERROR SetNightVision(TriStateAutoEnum aNightVision); |
| |
| CHIP_ERROR SetNightVisionIllum(TriStateAutoEnum aNightVisionIllum); |
| |
| CHIP_ERROR SetViewport(const Globals::Structs::ViewportStruct::Type & aViewport); |
| |
| CHIP_ERROR SetSpeakerMuted(bool aSpeakerMuted); |
| |
| CHIP_ERROR SetSpeakerVolumeLevel(uint8_t aSpeakerVolumeLevel); |
| |
| CHIP_ERROR SetSpeakerMaxLevel(uint8_t aSpeakerMaxLevel); |
| |
| CHIP_ERROR SetSpeakerMinLevel(uint8_t aSpeakerMinLevel); |
| |
| CHIP_ERROR SetMicrophoneMuted(bool aMicrophoneMuted); |
| |
| CHIP_ERROR SetMicrophoneVolumeLevel(uint8_t aMicrophoneVolumeLevel); |
| |
| CHIP_ERROR SetMicrophoneMaxLevel(uint8_t aMicrophoneMaxLevel); |
| |
| CHIP_ERROR SetMicrophoneMinLevel(uint8_t aMicrophoneMinLevel); |
| |
| CHIP_ERROR SetMicrophoneAGCEnabled(bool aMicrophoneAGCEnabled); |
| |
| CHIP_ERROR SetImageRotation(uint16_t aImageRotation); |
| |
| CHIP_ERROR SetImageFlipHorizontal(bool aImageFlipVertical); |
| |
| CHIP_ERROR SetImageFlipVertical(bool aImageFlipVertical); |
| |
| CHIP_ERROR SetLocalVideoRecordingEnabled(bool aLocalVideoRecordingEnabled); |
| |
| CHIP_ERROR SetLocalSnapshotRecordingEnabled(bool aLocalVideoRecordingEnabled); |
| |
| CHIP_ERROR SetStatusLightEnabled(bool aStatusLightEnabled); |
| |
| CHIP_ERROR SetStatusLightBrightness(Globals::ThreeLevelAutoEnum aStatusLightBrightness); |
| |
| // Attribute Getters |
| uint8_t GetMaxConcurrentEncoders() const { return mMaxConcurrentEncoders; } |
| |
| uint32_t GetMaxEncodedPixelRate() const { return mMaxEncodedPixelRate; } |
| |
| const VideoSensorParamsStruct & GetVideoSensorParams() const { return mVideoSensorParams; } |
| |
| bool GetNightVisionUsesInfrared() const { return mNightVisionUsesInfrared; } |
| |
| const VideoResolutionStruct & GetMinViewportResolution() const { return mMinViewPortResolution; } |
| |
| const std::vector<RateDistortionTradeOffStruct> & GetRateDistortionTradeOffPoints() const |
| { |
| return mRateDistortionTradeOffPointsList; |
| } |
| |
| uint32_t GetMaxContentBufferSize() const { return mMaxContentBufferSize; } |
| |
| const AudioCapabilitiesStruct & GetMicrophoneCapabilities() const { return mMicrophoneCapabilities; } |
| |
| const AudioCapabilitiesStruct & GetSpeakerCapabilities() const { return mSpeakerCapabilities; } |
| |
| TwoWayTalkSupportTypeEnum GetTwoWayTalkSupport() const { return mTwoWayTalkSupport; } |
| |
| const std::vector<SnapshotCapabilitiesStruct> & GetSnapshotCapabilities() const { return mSnapshotCapabilitiesList; } |
| |
| uint32_t GetMaxNetworkBandwidth() const { return mMaxNetworkBandwidth; } |
| |
| uint16_t GetCurrentFrameRate() const { return mCurrentFrameRate; } |
| |
| bool GetHDRModeEnabled() const { return mHDRModeEnabled; } |
| |
| const std::vector<Globals::StreamUsageEnum> & GetSupportedStreamUsages() const { return mSupportedStreamUsages; } |
| |
| const std::vector<VideoStreamStruct> & GetAllocatedVideoStreams() const { return mAllocatedVideoStreams; } |
| |
| const std::vector<AudioStreamStruct> & GetAllocatedAudioStreams() const { return mAllocatedAudioStreams; } |
| |
| const std::vector<SnapshotStreamStruct> & GetAllocatedSnapshotStreams() const { return mAllocatedSnapshotStreams; } |
| |
| const std::vector<Globals::StreamUsageEnum> & GetStreamUsagePriorities() const { return mStreamUsagePriorities; } |
| |
| bool GetSoftRecordingPrivacyModeEnabled() const { return mSoftRecordingPrivacyModeEnabled; } |
| |
| bool GetSoftLivestreamPrivacyModeEnabled() const { return mSoftLivestreamPrivacyModeEnabled; } |
| |
| bool GetHardPrivacyModeOn() const { return mHardPrivacyModeOn; } |
| |
| TriStateAutoEnum GetNightVision() const { return mNightVision; } |
| |
| TriStateAutoEnum GetNightVisionIllum() const { return mNightVisionIllum; } |
| |
| const Globals::Structs::ViewportStruct::Type & GetViewport() const { return mViewport; } |
| |
| bool GetSpeakerMuted() const { return mSpeakerMuted; } |
| |
| uint8_t GetSpeakerVolumeLevel() const { return mSpeakerVolumeLevel; } |
| |
| uint8_t GetSpeakerMaxLevel() const { return mSpeakerMaxLevel; } |
| |
| uint8_t GetSpeakerMinLevel() const { return mSpeakerMinLevel; } |
| |
| bool GetMicrophoneMuted() const { return mMicrophoneMuted; } |
| |
| uint8_t GetMicrophoneVolumeLevel() const { return mMicrophoneVolumeLevel; } |
| |
| uint8_t GetMicrophoneMaxLevel() const { return mMicrophoneMaxLevel; } |
| |
| uint8_t GetMicrophoneMinLevel() const { return mMicrophoneMinLevel; } |
| |
| bool IsMicrophoneAGCEnabled() const { return mMicrophoneAGCEnabled; } |
| |
| uint16_t GetImageRotation() const { return mImageRotation; } |
| |
| bool GetImageFlipHorizontal() const { return mImageFlipHorizontal; } |
| |
| bool GetImageFlipVertical() const { return mImageFlipVertical; } |
| |
| bool GetLocalVideoRecordingEnabled() const { return mLocalVideoRecordingEnabled; } |
| |
| bool GetLocalSnapshotRecordingEnabled() const { return mLocalSnapshotRecordingEnabled; } |
| |
| bool GetStatusLightEnabled() const { return mStatusLightEnabled; } |
| |
| Globals::ThreeLevelAutoEnum GetStatusLightBrightness() const { return mStatusLightBrightness; } |
| |
| // Add/Remove Management functions for streams |
| |
| CHIP_ERROR SetStreamUsagePriorities(const std::vector<Globals::StreamUsageEnum> & newPriorities); |
| |
| /** |
| * Called during the processing of an AllocateVideoStream request to find |
| * an existing stream that can be reused given the requested stream args. |
| * |
| * @param requestedArgs parameters in the allocation request |
| * |
| * Returns std::nullopt if there is no such stream, else the id of the stream |
| * that can be reused. |
| * The handler of the request iterates through the currently allocated video |
| * streams to check if the allocation request parameters fall within the |
| * ranges of an allocated stream so that the latter can be reused. |
| * If a match is found, the function returns the StreamID of the reusable |
| * stream. |
| */ |
| std::optional<uint16_t> GetReusableVideoStreamId(const VideoStreamStruct & requestedArgs) const; |
| |
| CHIP_ERROR AddVideoStream(const VideoStreamStruct & videoStream); |
| |
| CHIP_ERROR UpdateVideoStreamRangeParams(VideoStreamStruct & videoStreamToUpdate, const VideoStreamStruct & videoStream, |
| bool & wasModified); |
| |
| CHIP_ERROR RemoveVideoStream(uint16_t videoStreamId); |
| |
| CHIP_ERROR AddAudioStream(const AudioStreamStruct & audioStream); |
| |
| CHIP_ERROR RemoveAudioStream(uint16_t audioStreamId); |
| |
| /** |
| * Called during the processing of an AllocateSnapshotStream request to find |
| * an existing stream that can be reused given the requested stream args. |
| * |
| * @param requestedArgs parameters in the allocation request |
| * |
| * Returns std::nullopt if there is no such stream, else the id of the stream |
| * that can be reused. |
| * The handler of the request iterates through the currently allocated snapshot |
| * streams to check if the allocation request parameters fall within the |
| * ranges of an allocated stream so that the latter can be reused. |
| * If a match is found, the function returns the StreamID of the reusable |
| * stream. |
| */ |
| std::optional<uint16_t> |
| GetReusableSnapshotStreamId(const CameraAVStreamManagementDelegate::SnapshotStreamAllocateArgs & requestedArgs) const; |
| |
| CHIP_ERROR AddSnapshotStream(const SnapshotStreamStruct & snapshotStream); |
| |
| CHIP_ERROR UpdateSnapshotStreamRangeParams(SnapshotStreamStruct & snapshotStreamToUpdate, |
| const CameraAVStreamManagementDelegate::SnapshotStreamAllocateArgs & snapshotStream); |
| |
| CHIP_ERROR RemoveSnapshotStream(uint16_t snapshotStreamId); |
| |
| CHIP_ERROR UpdateVideoStreamRefCount(uint16_t videoStreamId, bool shouldIncrement); |
| |
| CHIP_ERROR UpdateAudioStreamRefCount(uint16_t audioStreamId, bool shouldIncrement); |
| |
| CHIP_ERROR UpdateSnapshotStreamRefCount(uint16_t snapshotStreamId, bool shouldIncrement); |
| |
| constexpr const char * StreamTypeToString(StreamType type) |
| { |
| switch (type) |
| { |
| case StreamType::kVideo: |
| return "Video"; |
| case StreamType::kAudio: |
| return "Audio"; |
| case StreamType::kSnapshot: |
| return "Snapshot"; |
| default: |
| return "Unknown"; |
| } |
| }; |
| |
| bool IsResourceAvailableForStreamAllocation(uint32_t candidateEncodedPixelRate, bool encoderRequired); |
| |
| private: |
| template <AttributeId TAttributeId> |
| CHIP_ERROR PersistAndNotify(); |
| |
| // Declared friend so that it can access the private stream vector members |
| // from CameraAVStreamManagementCluster. |
| template <AttributeId TAttributeId> |
| friend struct StreamTraits; |
| |
| CameraAVStreamManagementDelegate & mDelegate; |
| const BitFlags<Feature> mEnabledFeatures; |
| const BitFlags<OptionalAttribute> mOptionalAttrs; |
| |
| // Attributes |
| const uint8_t mMaxConcurrentEncoders; |
| const uint32_t mMaxEncodedPixelRate; |
| const VideoSensorParamsStruct mVideoSensorParams; |
| const bool mNightVisionUsesInfrared; |
| const VideoResolutionStruct mMinViewPortResolution; |
| const std::vector<RateDistortionTradeOffStruct> mRateDistortionTradeOffPointsList; |
| const uint32_t mMaxContentBufferSize; |
| const AudioCapabilitiesStruct mMicrophoneCapabilities; |
| const AudioCapabilitiesStruct mSpeakerCapabilities; |
| const TwoWayTalkSupportTypeEnum mTwoWayTalkSupport; |
| const std::vector<SnapshotCapabilitiesStruct> mSnapshotCapabilitiesList; |
| const uint32_t mMaxNetworkBandwidth; |
| |
| uint16_t mCurrentFrameRate = 0; |
| bool mHDRModeEnabled = false; |
| bool mSoftRecordingPrivacyModeEnabled = false; |
| bool mSoftLivestreamPrivacyModeEnabled = false; |
| bool mHardPrivacyModeOn = false; |
| TriStateAutoEnum mNightVision = TriStateAutoEnum::kOn; |
| TriStateAutoEnum mNightVisionIllum = TriStateAutoEnum::kOn; |
| Globals::Structs::ViewportStruct::Type mViewport = { 0, 0, 0, 0 }; |
| bool mSpeakerMuted = false; |
| uint8_t mSpeakerVolumeLevel = 0; |
| uint8_t mSpeakerMaxLevel = kMaxSpeakerLevel; |
| uint8_t mSpeakerMinLevel = 0; |
| bool mMicrophoneMuted = false; |
| uint8_t mMicrophoneVolumeLevel = 0; |
| uint8_t mMicrophoneMaxLevel = kMaxMicrophoneLevel; |
| uint8_t mMicrophoneMinLevel = 0; |
| bool mMicrophoneAGCEnabled = false; |
| uint16_t mImageRotation = 0; |
| bool mImageFlipHorizontal = false; |
| bool mImageFlipVertical = false; |
| bool mLocalVideoRecordingEnabled = false; |
| bool mLocalSnapshotRecordingEnabled = false; |
| bool mStatusLightEnabled = false; |
| |
| Globals::ThreeLevelAutoEnum mStatusLightBrightness = Globals::ThreeLevelAutoEnum::kMedium; |
| |
| // Managed lists |
| std::vector<Globals::StreamUsageEnum> mSupportedStreamUsages; |
| |
| std::vector<Globals::StreamUsageEnum> mStreamUsagePriorities; |
| std::vector<VideoStreamStruct> mAllocatedVideoStreams; |
| std::vector<AudioStreamStruct> mAllocatedAudioStreams; |
| std::vector<SnapshotStreamStruct> mAllocatedSnapshotStreams; |
| |
| // Utility function to set and persist attributes |
| template <typename T> |
| CHIP_ERROR SetAttributeIfDifferent(T & currentValue, const T & newValue, AttributeId attributeId, bool shouldPersist = true) |
| { |
| if (currentValue != newValue) |
| { |
| currentValue = newValue; |
| auto path = ConcreteAttributePath(mPath.mEndpointId, CameraAvStreamManagement::Id, attributeId); |
| if (shouldPersist) |
| { |
| ReturnErrorOnFailure(GetSafeAttributePersistenceProvider()->WriteScalarValue(path, currentValue)); |
| } |
| mDelegate.OnAttributeChanged(attributeId); |
| NotifyAttributeChanged(attributeId); |
| } |
| return CHIP_NO_ERROR; |
| } |
| |
| template <typename StreamContainer, typename IdGetter> |
| Protocols::InteractionModel::Status ValidateStreamForModifyOrDeallocateImpl(StreamContainer & streams, uint16_t streamID, |
| StreamType streamType, IdGetter id_getter, |
| bool isDeallocate) |
| { |
| auto it = std::find_if(streams.begin(), streams.end(), [&](const auto & stream) { return id_getter(stream) == streamID; }); |
| |
| if (it == streams.end()) |
| { |
| ChipLogError(Zcl, "CameraAVStreamMgmt[ep=%d]: %s stream with ID: %u not found", mPath.mEndpointId, |
| StreamTypeToString(streamType), streamID); |
| return Protocols::InteractionModel::Status::NotFound; |
| } |
| |
| if (isDeallocate && it->referenceCount > 0) |
| { |
| ChipLogError(Zcl, "CameraAVStreamMgmt[ep=%d]: %s stream with ID: %u still in use", mPath.mEndpointId, |
| StreamTypeToString(streamType), streamID); |
| return Protocols::InteractionModel::Status::InvalidInState; |
| } |
| |
| using StreamValueType = typename StreamContainer::value_type; |
| if constexpr (std::is_same_v<StreamValueType, VideoStreamStruct> || std::is_same_v<StreamValueType, AudioStreamStruct>) |
| { |
| if (it->streamUsage == Globals::StreamUsageEnum::kInternal) |
| { |
| ChipLogError(Zcl, "CameraAVStreamMgmt[ep=%d]: %s stream with ID: %u is Internal", mPath.mEndpointId, |
| StreamTypeToString(streamType), streamID); |
| return Protocols::InteractionModel::Status::DynamicConstraintError; |
| } |
| } |
| |
| // For SnapshotStreamModify, check against the corresponding |
| // SnapshotStreamStruct for requiresHardwareEncoder. |
| if constexpr (std::is_same_v<StreamValueType, SnapshotStreamStruct>) |
| { |
| if (!isDeallocate) |
| { |
| auto snCapabIt = |
| std::find_if(mSnapshotCapabilitiesList.begin(), mSnapshotCapabilitiesList.end(), [&](const auto & capability) { |
| return capability.imageCodec == it->imageCodec && capability.maxFrameRate >= it->frameRate && |
| capability.resolution.width >= it->minResolution.width && |
| capability.resolution.height >= it->minResolution.height && |
| capability.resolution.width <= it->maxResolution.width && |
| capability.resolution.height <= it->maxResolution.height; |
| }); |
| if (snCapabIt != mSnapshotCapabilitiesList.end()) |
| { |
| if (snCapabIt->requiresHardwareEncoder.HasValue() && !snCapabIt->requiresHardwareEncoder.Value()) |
| { |
| ChipLogError( |
| Zcl, |
| "CameraAVStreamMgmt[ep=%d]: Snapshot stream with ID: %u based off an underlying video stream and " |
| "not modifiable", |
| mPath.mEndpointId, streamID); |
| return Protocols::InteractionModel::Status::InvalidInState; |
| } |
| } |
| else |
| { |
| return Protocols::InteractionModel::Status::InvalidInState; |
| } |
| } |
| } |
| |
| return Protocols::InteractionModel::Status::Success; |
| } |
| |
| bool IsBitDepthValid(uint8_t bitDepth) { return (bitDepth == 8 || bitDepth == 16 || bitDepth == 24 || bitDepth == 32); } |
| |
| /** |
| * Helper function that loads all the persistent attributes from the KVS. |
| */ |
| void LoadPersistentAttributes(); |
| |
| // Helpers to read list items via delegate APIs |
| CHIP_ERROR ReadAndEncodeRateDistortionTradeOffPoints(const AttributeValueEncoder::ListEncodeHelper & encoder); |
| CHIP_ERROR ReadAndEncodeSnapshotCapabilities(const AttributeValueEncoder::ListEncodeHelper & encoder); |
| |
| CHIP_ERROR ReadAndEncodeSupportedStreamUsages(const AttributeValueEncoder::ListEncodeHelper & encoder); |
| |
| CHIP_ERROR ReadAndEncodeAllocatedVideoStreams(const AttributeValueEncoder::ListEncodeHelper & encoder); |
| CHIP_ERROR ReadAndEncodeAllocatedAudioStreams(const AttributeValueEncoder::ListEncodeHelper & encoder); |
| CHIP_ERROR ReadAndEncodeAllocatedSnapshotStreams(const AttributeValueEncoder::ListEncodeHelper & encoder); |
| |
| CHIP_ERROR ReadAndEncodeStreamUsagePriorities(const AttributeValueEncoder::ListEncodeHelper & encoder); |
| |
| CHIP_ERROR StoreViewport(const Globals::Structs::ViewportStruct::Type & viewport); |
| CHIP_ERROR LoadViewport(Globals::Structs::ViewportStruct::Type & viewport); |
| |
| CHIP_ERROR StoreStreamUsagePriorities(); |
| CHIP_ERROR LoadStreamUsagePriorities(); |
| |
| template <AttributeId attributeId> |
| CHIP_ERROR StoreAllocatedStreams(); |
| |
| /** |
| * @brief |
| * A templatized function that loads the allocated streams of a certain type from persistent storage. |
| * |
| * @tparam attributeId The attribute Id of the allocated stream list. |
| * @return CHIP_ERROR CHIP_NO_ERROR on success, otherwise another CHIP_ERROR. |
| */ |
| template <AttributeId attributeId> |
| CHIP_ERROR LoadAllocatedStreams(); |
| |
| void ModifyVideoStream(const uint16_t streamID, const Optional<bool> waterMarkEnabled, const Optional<bool> osdEnabled); |
| |
| void ModifySnapshotStream(const uint16_t streamID, const Optional<bool> waterMarkEnabled, const Optional<bool> osdEnabled); |
| |
| bool StreamPrioritiesHasDuplicates(const std::vector<Globals::StreamUsageEnum> & aStreamUsagePriorities); |
| |
| std::optional<DataModel::ActionReturnStatus> |
| HandleVideoStreamAllocate(CommandHandler & handler, const ConcreteCommandPath & commandPath, |
| const Commands::VideoStreamAllocate::DecodableType & req); |
| |
| std::optional<DataModel::ActionReturnStatus> HandleVideoStreamModify(const Commands::VideoStreamModify::DecodableType & req); |
| |
| std::optional<DataModel::ActionReturnStatus> |
| HandleVideoStreamDeallocate(const Commands::VideoStreamDeallocate::DecodableType & req); |
| |
| std::optional<DataModel::ActionReturnStatus> |
| HandleAudioStreamAllocate(CommandHandler & handler, const ConcreteCommandPath & commandPath, |
| const Commands::AudioStreamAllocate::DecodableType & req); |
| |
| std::optional<DataModel::ActionReturnStatus> |
| HandleAudioStreamDeallocate(const Commands::AudioStreamDeallocate::DecodableType & req); |
| |
| std::optional<DataModel::ActionReturnStatus> |
| HandleSnapshotStreamAllocate(CommandHandler & handler, const ConcreteCommandPath & commandPath, |
| const Commands::SnapshotStreamAllocate::DecodableType & req); |
| |
| std::optional<DataModel::ActionReturnStatus> |
| HandleSnapshotStreamModify(const Commands::SnapshotStreamModify::DecodableType & req); |
| |
| std::optional<DataModel::ActionReturnStatus> |
| HandleSnapshotStreamDeallocate(const Commands::SnapshotStreamDeallocate::DecodableType & req); |
| |
| std::optional<DataModel::ActionReturnStatus> |
| HandleSetStreamPriorities(const Commands::SetStreamPriorities::DecodableType & req); |
| |
| std::optional<DataModel::ActionReturnStatus> HandleCaptureSnapshot(CommandHandler & handler, |
| const ConcreteCommandPath & commandPath, |
| const Commands::CaptureSnapshot::DecodableType & req); |
| |
| bool CheckSnapshotStreamsAvailability(); |
| |
| bool ValidateSnapshotStreamId(const DataModel::Nullable<uint16_t> & snapshotStreamID); |
| |
| Protocols::InteractionModel::Status ValidateVideoStreamForModifyOrDeallocate(const uint16_t videoStreamID, bool isDeallocate); |
| |
| Protocols::InteractionModel::Status ValidateAudioStreamForDeallocate(const uint16_t audioStreamID); |
| |
| Protocols::InteractionModel::Status ValidateSnapshotStreamForModifyOrDeallocate(const uint16_t snapshotStreamID, |
| bool isDeallocate); |
| }; |
| |
| } // namespace CameraAvStreamManagement |
| } // namespace Clusters |
| } // namespace app |
| } // namespace chip |