diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 54c40cf7d..5fc106eb3 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -34,7 +34,7 @@ _Explain how this change can be tested manually, if applicable._ - [ ] Changelog is updated with client-facing changes - [ ] New code is covered by unit tests - [ ] Comparison screenshots added for visual changes -- [ ] Affected documentation updated (Docusaurus, tutorial, CMS) +- [ ] Affected documentation updated (tutorial, CMS) ### 🎁 Meme diff --git a/.github/workflows/docusaurus-prod.yml b/.github/workflows/docusaurus-prod.yml deleted file mode 100644 index 158042164..000000000 --- a/.github/workflows/docusaurus-prod.yml +++ /dev/null @@ -1,26 +0,0 @@ -name: Publish Docusaurus to prod - -on: - release: - types: [published] - - workflow_dispatch: - -jobs: - push_docusaurus: - name: Publish docusaurus docs - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4.1.1 - - uses: actions/setup-node@v3 - with: - node-version: 16 - - name: push - uses: GetStream/push-stream-chat-docusaurus-action@main - with: - target-branch: 'main' - cli-target-branch: 'production' - destination-repository-name: 'stream-video-docusaurus' - source-directory: 'docusaurus' - env: - DOCUSAURUS_GH_TOKEN: ${{ secrets.DOCUSAURUS_GH_TOKEN }} diff --git a/.github/workflows/docusaurus-staging.yml b/.github/workflows/docusaurus-staging.yml deleted file mode 100644 index dde88115a..000000000 --- a/.github/workflows/docusaurus-staging.yml +++ /dev/null @@ -1,27 +0,0 @@ -name: Publish Docusaurus to staging - -on: - push: - branches: - - develop - - workflow_dispatch: - -jobs: - push_docusaurus: - name: Publish docusaurus docs - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4.1.1 - - uses: actions/setup-node@v3 - with: - node-version: 16 - - name: push - uses: GetStream/push-stream-chat-docusaurus-action@main - with: - target-branch: 'staging' - cli-target-branch: 'staging' - destination-repository-name: 'stream-video-docusaurus' - source-directory: 'docusaurus' - env: - DOCUSAURUS_GH_TOKEN: ${{ secrets.DOCUSAURUS_GH_TOKEN }} diff --git a/.github/workflows/smoke-checks.yml b/.github/workflows/smoke-checks.yml index af34b7bb1..c9014f8ac 100644 --- a/.github/workflows/smoke-checks.yml +++ b/.github/workflows/smoke-checks.yml @@ -5,7 +5,6 @@ on: branches: - '**' paths-ignore: - - 'docusaurus/**' - 'README.md' - 'CHANGELOG.md' diff --git a/.github/workflows/vale-doc-lint.yml b/.github/workflows/vale-doc-lint.yml deleted file mode 100644 index 1e6d5cf2f..000000000 --- a/.github/workflows/vale-doc-lint.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: Vale - -on: - pull_request: - paths: - - 'docusaurus/**' - - '.github/workflows/vale-doc-lint.yml' - - workflow_dispatch: - -jobs: - vale: - name: Check Docusaurus docs - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4.1.1 - - uses: errata-ai/vale-action@v2.1.0 - with: - # added, diff_context, file, nofilter - filter_mode: nofilter - # github-pr-check, github-pr-review, github-check - reporter: github-pr-check - fail_on_error: true - files: docusaurus - env: - # Required, set by GitHub actions automatically: - # https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret - GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} diff --git a/.gitignore b/.gitignore index 60500fe5e..fe3ec144f 100644 --- a/.gitignore +++ b/.gitignore @@ -84,7 +84,6 @@ vendor/bundle/ Example/Carthage/.env Example/Carthage/fastlane/report.xml Sample/Cocoapods/Podfile.lock -docusaurus/.env reports/ .scannerwork/ push_payload.json diff --git a/.styles/Google/meta.json b/.styles/Google/meta.json deleted file mode 100644 index a5da2a848..000000000 --- a/.styles/Google/meta.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "feed": "https://github.com/errata-ai/Google/releases.atom", - "vale_version": ">=1.0.0" -} diff --git a/.styles/Vocab/Base/accept.txt b/.styles/Vocab/Base/accept.txt index a3f33e374..6d56f214d 100644 --- a/.styles/Vocab/Base/accept.txt +++ b/.styles/Vocab/Base/accept.txt @@ -6,7 +6,6 @@ Boolean CocoaPods cooldown Cooldown -Docusaurus encodable Giphy Objective-C diff --git a/.vale.ini b/.vale.ini deleted file mode 100644 index c251b1564..000000000 --- a/.vale.ini +++ /dev/null @@ -1,12 +0,0 @@ -StylesPath = .styles - -MinAlertLevel = error -Vocab = Base - -Packages = Google - -[*.md] -BasedOnStyles = Vale, Google -TokenIgnores = ("integrationguide"), ^<[ ]{0}img(.*)+[ ]{0}/>$, <[ ]{0}img(.*\n)+/> - - diff --git a/Scripts/addImagesToDocumentation.sh b/Scripts/addImagesToDocumentation.sh deleted file mode 100644 index a382f5ae9..000000000 --- a/Scripts/addImagesToDocumentation.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash - -# Move to project root directory -scriptDir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -cd "$scriptDir/../" - -DOCUMENTATION_FOLDER=$1 -PATH_TO_SNAPSHOTS="UISDKdocumentation/__Snapshots__" -PATH_TO_ASSETS="assets" -PATH_TO_DOCUSAURUS="docusaurus/docs/iOS" - -# Let's iterate through snapshots we have and add them to the given components: -for UI_SNAPSHOT in ${PATH_TO_SNAPSHOTS}/*;do - - echo "Here's your snapshot Folder to be iterated to add image to documentation: 🚀" - echo $UI_SNAPSHOT - # Get component name for later processing and finding corresponding file in markdown. - STRIPPED_PATH=`basename $UI_SNAPSHOT` - COMPONENT_NAME=${STRIPPED_PATH%_*_*} - DOCUMENTATION_FILE=`find $DOCUMENTATION_FOLDER -name "$COMPONENT_NAME.md"` - - # Let's use just light variation of the screenshot, we can support dark mode later. - FINAL_SNAPSHOT=`ls $UI_SNAPSHOT | grep light` - - # Check if the file already contains snapshot line, if yes, continue the cycle and generate it for next one. s - tail -1 "$DOCUMENTATION_FILE" | grep "$FINAL_SNAPSHOT" - if [ $? -eq 0 ];then - echo "There is already line containing the snapshot for $COMPONENT_NAME, skipping adding of documentation." - continue - fi - - echo "Copying $COMPONENT_NAME image to docusaurus/docs/iOS/assets/" - pwd - cp "$UI_SNAPSHOT/$FINAL_SNAPSHOT" "$PATH_TO_DOCUSAURUS/$PATH_TO_ASSETS/$FINAL_SNAPSHOT" - - echo "Adding snapshot of $COMPONENT_NAME to documentation..." - # Docusaurus works only with relative paths, so we move to docusaurus root folder (iOS) and generate relative path for the - # snapshot aka ../../assets/Snapshot-light.png when the directory is /ui-components/Folder/Snapshot.md - pushd "$PATH_TO_DOCUSAURUS" - RELATIVE_PATH_INSIDE_DOCUSAURUS=`dirname ${DOCUMENTATION_FILE##*iOS/}` - DESIRED_PATH=`realpath --relative-to="$RELATIVE_PATH_INSIDE_DOCUSAURUS" "$PATH_TO_ASSETS"` - popd - - # Simple create image annotation and paste it to the first line of the file. - SNAPSHOT_ANNOTATION_TEXT="![$COMPONENT_NAME]($DESIRED_PATH/$FINAL_SNAPSHOT)" - echo -e "$SNAPSHOT_ANNOTATION_TEXT\n$(cat $DOCUMENTATION_FILE)" > $DOCUMENTATION_FILE - - if [ $? -eq 0 ];then - echo "Successfully added documentation snapshot to $DOCUMENTATION_FILE" - fi -done diff --git a/Scripts/createPropFiles.sh b/Scripts/createPropFiles.sh deleted file mode 100755 index df47618b6..000000000 --- a/Scripts/createPropFiles.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -pushd docusaurus/docs/iOS/common-content/reference-docs - -find . -name "*.md" ! -name '*-properties.md' -type f -exec sh -c 'N="${0%.*}-properties.md"; awk "/## Properties/{p=1}p" {} | awk "NR>2 {print last} {last=\$0}" > $N' {} \; - -popd diff --git a/Scripts/deleteDuplicates.sh b/Scripts/deleteDuplicates.sh deleted file mode 100644 index 0cbf5d222..000000000 --- a/Scripts/deleteDuplicates.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash - -# Move to project root directory -scriptDir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -cd "$scriptDir/../" - -# Let's delete the typealiases :) -pushd "docusaurus/docs/iOS" - -echo "_Button.md -_View.md -_Control.md -_CollectionReusableView.md -_CollectionViewCell.md -_NavigationBar.md -_ViewController.md" > /tmp/reservedFilesNotToRename.txt - -# We find the files which are duplicate (For example _ChatChannelNamer and ChatChannelNamer), name without underscore in here is typealias. -find . -type f -name "_*" | sed 's/_//g' > /tmp/typealiasDuplicates.txt - -while read TYPEALIAS_FILE; do - rm -rf $TYPEALIAS_FILE - echo "REMOVED $TYPEALIAS_FILE typealias" -done < /tmp/typealiasDuplicates.txt - -# What lefts is to remove underscores from files, so let's iterate over files with underscore and rename them to no underscore -# Except for /tmp/reservedFilesNotToRename.txt :) -for file in `find . -type f -name "_*"`; do - FILE_NAME=`basename $file` - cat /tmp/reservedFilesNotToRename.txt | grep "$FILE_NAME" - if [ $? -eq 0 ];then - echo "NOT RENAMING: $FILE_NAME" - continue - fi - /bin/mv "$file" "${file/_}" -done; - -popd - - diff --git a/Scripts/generateDocumentation.sh b/Scripts/generateDocumentation.sh deleted file mode 100644 index dc357d8e9..000000000 --- a/Scripts/generateDocumentation.sh +++ /dev/null @@ -1,79 +0,0 @@ -#!/bin/bash -set -ex -TARGET=$1 - -# Move to project root directory -scriptDir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -cd "$scriptDir/../" - -if [[ "$TARGET" = "StreamChat" ]];then - TARGET_DIRECTORY="Sources/StreamChat" -elif [[ "$TARGET" = "StreamChatUI" ]]; then - TARGET_DIRECTORY="Sources/StreamChatUI" -else - echo "Please specify target to generate docs for (StreamChat or StreamChatUI)" - exit 1 -fi - -OUTPUT_DIRECTORY="docusaurus/docs/iOS/CommonContent" - -swift-doc generate $TARGET_DIRECTORY -n $TARGET -o "$OUTPUT_DIRECTORY/$TARGET_DIRECTORY" - -pushd $OUTPUT_DIRECTORY - -# Delete emissions which cause docusaurus not compiling., we probably want to rename Home.md to name it contains it in order to create brief overview of the component. -find . -type f -name '_Sidebar.md' -delete -find . -type f -name 'Home.md' -delete -find . -type f -name '_Footer.md' -delete - -popd - -# cleanup the duplicate files by comparing what is not in the Sources directory. -bash Scripts/deleteDuplicates.sh "$OUTPUT_DIRECTORY/$TARGET_DIRECTORY" "$TARGET_DIRECTORY" - -# Delete first lines in files -find "$OUTPUT_DIRECTORY/$TARGET_DIRECTORY" -type f -exec sed -i '' '1d' {} + - -# Add snapshots to UI elements - Skipping this for now -# if [[ "$TARGET" = "StreamChatUI" ]]; then -# bash Scripts/addImagesToDocumentation.sh "$OUTPUT_DIRECTORY/Sources/StreamChatUI" -# fi - -pushd docusaurus/docs/iOS/ -# Let's go to output directory one more time and add MDX headers. -# --- -# id: ${classname} -# header: ClassName -# slug: lowecased path -# --- -# sed is cool and everything but having it on macOS hurts -find CommonContent -type f > /tmp/allFiles.txt - -while read FILEPATH; do - FILENAME=`basename $FILEPATH` - #echo "Adding ID to: $FILEPATH" - CLASSNAME="${FILENAME%.md}" - LOWERCASED=$(echo $CLASSNAME | tr '[:upper:]' '[:lower:]') - PATH_WITHOUT_FILE=`dirname $FILEPATH` - - #Docusaurus needs path for the ID... - FINAL_PATH=$(echo "/$PATH_WITHOUT_FILE/$LOWERCASED" | sed 's#/#\\/#g') - - TITLESTRING="id: $LOWERCASED" - FIRSTLINE=`head -1 "$FILEPATH"` - - # Got nothing better right now: - if [ "$TITLESTRING" == "---" ]; then - echo "Found id, title and slug marks, skipping" - continue - fi - -echo "FINAL_PATH:" -FINAL_PATH=$(echo $FINAL_PATH | tr '[:upper:]' '[:lower:]') -echo $FINAL_PATH - -sed -i '' "1s/^/---\ntitle: $CLASSNAME\n---\n/" $FILEPATH -done "Add Packages..." -- Paste the URL https://github.com/GetStream/stream-video-swift.git -- In the option "Dependency Rule" choose "Branch," in the single text input next to it, enter "main" - -![Screenshot shows how to add the SPM dependency](../assets/spm.png) - -- Choose "Add Package" and wait for the dialog to complete. -- Select `StreamVideo` and `StreamVideoSwiftUI` (if you use SwiftUI, otherwise also select `StreamVideoUIKit`). - -![Screenshot shows selection of dependencies](../assets/spm_select.png) - -
➤ Do you want to use XCFrameworks? (Click to read more) -

- -:::caution -Our XCFrameworks are built with **Swift 5.9**. In order to use them you need **Xcode 15** or above -::: - -You can learn more about [our Module Stable XCFrameworks here](#xcframeworks) - -- Use the URL https://github.com/getstream/stream-video-swift-spm.git - -

-
- -### Permissions - -Making a video call requires the usage of the camera and the microphone of the device. Therefore, you need to request permissions to use them in your app. In order to do this, you will need to add the following keys and values in your `Info.plist` file. - -`Privacy - Camera Usage Description` - "Your_app_name requires camera access in order to capture and transmit video" -`Privacy - Microphone Usage Description` - "Your_app_name requires microphone access in order to capture and transmit audio" - -![Screenshot shows permissions in the .plist file](../assets/permissions.png) - -### Push notifications - -Stream video support both Remote Notifications and VoIP notifications. See the [Push Notification](../../advanced/push-notifications) section for more details. - -## XCFrameworks - -In an effort to have [**Module Stability**](https://www.swift.org/blog/library-evolution/), we distribute **pre-built XCFrameworks**. - -:::info -Our XCFrameworks are built with Swift 5.9. In order to use them you need Xcode 15 or above. -::: - -An **XCFramework** is a package that contains binaries for multiple architectures/platforms, but only the particular slice of that package required for your architecture/platform will be used. - -**Benefits of XCFrameworks:** -- Conveniently import a single package -- Supports all platforms and architectures -- No more fat binaries. No more architectures stripping - -:::info -In order to manually integrate StreamVideo XCFrameworks, you need to add the additional dependencies to your project. - -```swift -.package(name: "StreamWebRTC", url: "https://github.com/GetStream/stream-video-swift-webrtc.git", .exact("114.5735.8")), -.package(url: "https://github.com/apple/swift-protobuf.git", from: "1.18.0") -``` -::: diff --git a/docusaurus/docs/iOS/01-basics/03-quickstart.mdx b/docusaurus/docs/iOS/01-basics/03-quickstart.mdx deleted file mode 100644 index 5188f9971..000000000 --- a/docusaurus/docs/iOS/01-basics/03-quickstart.mdx +++ /dev/null @@ -1,263 +0,0 @@ ---- -title: Quickstart -description: For when you're in a hurry and want to quickly get up and running ---- - -### Introduction - -`StreamVideo` is a highly customizable SDK that facilitates adding calling (audio and video) support to your apps. The SDK consists of three parts: - -- low-level client - responsible for establishing calls, built on top of WebRTC. -- SwiftUI SDK - SwiftUI components for different types of call flows. -- UIKit SDK - UIKit wrapper over the SwiftUI components, for easier usage in UIKit based apps. - -In this tutorial, we will build a video calling app that shows how you can integrate the SDK in few simple steps. - -### Creating a project - -To get started with the `StreamVideo` SDK, open Xcode and create a new project. - -- Create a new Swift project in Xcode -- Choose iOS from the list of platforms -- Choose the "App" template -- Use VideoDemoSwiftUI for the project name -- Select "SwiftUI" in the Interface option - -![Screenshot shows how to create a project in Xcode](../assets/new_project.png) - -We are going to use the Swift Package Manager to fetch the SDK. - -- In Xcode, go to File -> "Add Packages..." -- Paste the URL https://github.com/GetStream/stream-video-swift.git -- In the option "Dependency Rule" choose "Branch" and in the single text input next to it, enter "main" - -![Screenshot shows how to add the SPM dependency](../assets/spm.png) - -- Choose "Add Package" and wait for the dialog to complete. -- Select `StreamVideo` and `StreamVideoSwiftUI` (if you use SwiftUI, otherwise also select `StreamVideoUIKit`). - -![Screenshot shows selection of dependencies](../assets/spm_select.png) - -You now have an empty project for your video calling app with the `StreamVideo` SDK as a dependency. Let's get started by displaying some content. - -### Setting up the StreamVideoUI object - -`StreamVideoUI` is the main access point to our SwiftUI SDK. It's created with the following values: - -- `apiKey` - your unique API key that's available in your dashboard. -- `user` - the `UserInfo` struct that contains information about the currently logged in user. -- `token` - the current user's `Token`. -- `tokenProvider` - called when a token is expired. We strongly recommend that you use token that expires (for security reasons), and provide a way for the SDK to fetch a new token when the current one expires, with the `tokenProvider` closure. - -Depending on your app architecture, you can keep the `StreamVideoUI` in a place where the lifecycle is tied to the lifecycle of the currently logged in user. - -In this example, for simplicity, we will add it in the SwiftUI `App` file, as a `@State` variable, and set it up on `init`. In your app, you should setup the `StreamVideoUI` object after you login your user. - -Open up the file `VideoDemoSwiftUIApp` in your Xcode project and add the following contents to it: - -```swift -import SwiftUI -import StreamVideo -import StreamVideoSwiftUI - -@main -struct VideoDemoSwiftUIApp: App { - - @State var streamVideo: StreamVideoUI? - - init() { - setupStreamVideo(with: "key1", userCredentials: .demoUser) - } - - private func setupStreamVideo( - with apiKey: String, - userCredentials: UserCredentials - ) { - streamVideo = StreamVideoUI( - apiKey: apiKey, - user: userCredentials.user, - token: userCredentials.token, - tokenProvider: { result in - // Call your networking service to generate a new token here. - // When finished, call the result handler with either .success or .failure. - result(.success(userCredentials.token)) - } - ) - } - - var body: some Scene { - WindowGroup { - ContentView() - } - } -} -``` - -In this example, we're using a hardcoded demo user, with a token that never expires: - -```swift -struct UserCredentials { - let user: User - let token: UserToken -} - -extension UserCredentials { - static let demoUser = UserCredentials( - user: User( - id: "testuser", - name: "Test User", - imageURL: URL(string: "https://vignette.wikia.nocookie.net/starwars/images/2/20/LukeTLJ.jpg")!, - customData: [:] - ), - token: UserToken(rawValue: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdHJlYW0tdmlkZW8tZ29AdjAuMS4wIiwic3ViIjoidXNlci90ZXN0dXNlciIsImlhdCI6MTY2NjY5ODczMSwidXNlcl9pZCI6InRlc3R1c2VyIn0.h4lnaF6OFYaNPjeK8uFkKirR5kHtj1vAKuipq3A5nM0") - ) -} -``` - -With this, our `StreamVideoUI` object is setup, and the UI components are ready to be used inside your app. - -Let's see an example on how to invoke a call. The UI would be simple - just a text field to enter the call id and a button to start the call. - -Add the following code in the `ContentView` file in Xcode. - -```swift -import StreamVideo -import StreamVideoSwiftUI -import SwiftUI - -struct ContentView: View { - - @Injected(\.streamVideo) var streamVideo - - @StateObject var callViewModel = CallViewModel() - @State var callId = "" - - var body: some View { - VStack { - TextField("Insert a call id", text: $callId) - .textFieldStyle(.roundedBorder) - .padding() - - Button { - resignFirstResponder() - callViewModel.startCall( - callType: "default", - callId: callId, - members: [/* Your list of participants goes here. */] - ) - } label: { - Text("Start a call") - } - } - .padding() - .modifier(CallModifier(viewModel: callViewModel)) - } -} -``` - -Here, you need to create the `CallViewModel`, which deals with the call related state and provides access to features like muting audio/video, changing the camera, starting / stopping calls etc. - -In the example, we're also setting a `CallModifier` to the view. With this modifier, the calling support is added to your view. The modifier handles everything from reporting incoming / outgoing calls to showing the appropriate UI based on the call state. - -:::note -The way we retrieve the `streamVideo` object here is through the `@Injected` property wrapper (read more on [this page](../../guides/dependency-injection).) -::: - -### UI Customizations - -#### Appearance - -When you create the `StreamVideoUI` object, you can optionally provide your own version of the `Appearance` class, that will allow you to customize things like fonts, colors, icons and sounds used in the SDK. - -For example, let's change the default hang up icon. For this, we would need to create a new `Images` class, and modify its `hangup` property. Then, we need to initialize the `Appearance` class with the updated `Images` and pass it to the `StreamVideoUI` object. - -```swift -var images = Images() -images.hangup = Image(systemName: "phone.down") -let appearance = Appearance(images: images) -streamVideo = StreamVideoUI( - apiKey: "your_api_key", - user: userCredentials.userInfo, - token: userCredentials.token, - videoConfig: VideoConfig(), - tokenProvider: { result in - result(.success(user.token)) - }, - appearance: appearance -) -``` - -If you want to learn about all the possible appearance customizations (fonts, colors, icons and sounds), please check the following [page](../../ui-components/overview). - -#### View Customizations - -The SwiftUI SDK allows complete view swapping of some of its components. This means you can, for example, create your own (different) outgoing call view and inject it in the slot of the default one. For most of the views, the SDK doesn't require anything else than the view to conform to the standard SwiftUI `View` protocol and return a view from the `body` variable. - -To abstract away the creation of the views, a protocol called `ViewFactory` is used in the SDK. This protocol defines the swappable views of the video experience. There are default implementations for all the views used in the SDK. If you want to customize a view, you will need to provide your own implementation of the `ViewFactory`, but you will need to implement only the view you want to swap. - -For example, let's customize the outgoing call view and attach a text overlay to it. For this, we will need to implement the `makeOutgoingCallView(viewModel: CallViewModel) -> some View` in the `ViewFactory`: - -```swift -class CustomViewFactory: ViewFactory { - - func makeOutgoingCallView(viewModel: CallViewModel) -> some View { - // Here you can also provide your own custom view. - // In this example, we are re-using the standard one, while also adding an overlay. - let view = DefaultViewFactory.shared.makeOutgoingCallView(viewModel: viewModel) - return view.overlay( - Text("Custom text overlay") - ) - } -} -``` - -Next, when you attach the `CallModifier` to your hosting view, you need to inject the newly created `CustomViewFactory`. The SDK will use the views you have provided in your custom implementation, while it will default back to the ones from the SDK in the slots where you haven't provided any implementation. - -In order to inject the `ViewFactory`, you will need to update the `CallModifier` initializer. - -```swift - @StateObject var callViewModel = CallViewModel() - @State var callId = "" - - var body: some View { - VStack { - TextField("Insert a call id", text: $callId) - .textFieldStyle(.roundedBorder) - .padding() - - Button { - resignFirstResponder() - callViewModel.startCall( - callType: "default", - callId: callId, - members: [/* Your list of participants goes here. */] - ) - } label: { - Text("Start a call") - } - } - .padding() - .modifier(CallModifier(viewFactory: CustomViewFactory(), viewModel: callViewModel)) - } -``` - -For the full list of supported view slots that can be swapped, please refer to this [page](../../guides/view-slots). - -### Permissions - -Making a video call requires the usage of the camera and the microphone of the device. Therefore, you need to request permissions to use them in your app. In order to do this, you will need to add the following keys and values in your `Info.plist` file. - -`Privacy - Camera Usage Description` - "Your_app_name requires camera access in order to capture and transmit video" -`Privacy - Microphone Usage Description` - "Your_app_name requires microphone access in order to capture and transmit audio" - -![Screenshot shows permissions in the .plist file](../assets/permissions.png) - -:::note -You should replace "Your_app_name" (or also use your custom strings instead). -::: - -With this, you should be able to test a video call between two devices. - -:::note -The call id should be the same on the different devices that are part of the call. -::: diff --git a/docusaurus/docs/iOS/01-basics/_category_.json b/docusaurus/docs/iOS/01-basics/_category_.json deleted file mode 100644 index 7f07e2d3a..000000000 --- a/docusaurus/docs/iOS/01-basics/_category_.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "label": "Basics" -} diff --git a/docusaurus/docs/iOS/03-guides/01-client-auth.mdx b/docusaurus/docs/iOS/03-guides/01-client-auth.mdx deleted file mode 100644 index 66fb23aae..000000000 --- a/docusaurus/docs/iOS/03-guides/01-client-auth.mdx +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: Client & Authentication -description: How to setup the client and authenticate ---- - -import { TokenSnippet } from '../../../shared/_tokenSnippet.jsx'; - -## Client & Auth - -Before joining a call, it is necessary to set up the video client. Here's a basic example: - -```swift -let streamVideo = StreamVideo( - apiKey: apiKey, - user: user, - token: token, - tokenProvider: { _ in } -) -``` - -- The API Key can be found in your dashboard. -- The user can be either authenticated, anonymous or guest. -- Note: You can store custom data on the user object, if required. - -Typically, you'll want to initialize the client in your app's AppDelegate or SceneDelegate or in a dependency injection module. - -### Generating a token - -Tokens need to be generated server side. You can use our server side SDKs to quickly add support for this. Typically you integrate this into the part of your codebase where you login or register users. The tokens provide a way to authenticate a user or give access to a specific set of calls. - -Here's a valid user and token to help you get started on the client side, before integrating with your backend API. - - - -### Different types of users - -Authenticated users are users that have an account on your app. -Guest users are temporary user accounts. You can use it to temporarily give someone a name and image when joining a call. -Anonymous users are users that are not authenticated. It's common to use this for watching a livestream or similar where you aren't authenticated. - -This example shows the client setup for a guest user: - -```swift -let streamVideo = StreamVideo( - apiKey: apiKey, - user: .guest("guest"), - token: token, - tokenProvider: { _ in } -) -``` - -And here's an example for an anonymous user: - -```swift -let streamVideo = StreamVideo( - apiKey: apiKey, - user: .anonymous, - token: token, - tokenProvider: { _ in } -) -``` - -Anonymous users don't establish a web socket connection, therefore they won't receive any events. They are just able to watch a livestream or join a call. - -The token for an anonymous user should contain the `call_cids` field, which is an array of the call `cid`'s that the user is allowed to join. - -Call `cid` consists of the call type and the call id, in the following format: `callType:callId`. - -Here's an example JWT token payload for an anonymous user: - -```swift -{ - "iss": "@stream-io/dashboard", - "iat": 1726406693, - "exp": 1726493093, - "user_id": "!anon", - "role": "viewer", - "call_cids": [ - "livestream:123" - ] -} -``` - -If you try to join a call that is not listed in the array, you will receive an error. - -### Client options - -Here's a more complete example of the client options: - -```swift -let streamVideo = StreamVideo( - apiKey: apiKey, - user: user, - token: token, - videoConfig: VideoConfig(), - pushNotificationsConfig: .default, - tokenProvider: { _ in } -) -``` - -The full list of supported options is: - -| Option | Description | Default | -| ------ | ----------- | ------- | -| `apiKey` | The API key to use. Found in the dashboard | - | -| `user` | The user object. You can store custom data on the user. | - | -| `token` | The JWT token to use for authentication. | - | -| `videoConfig` | A `VideoConfig` instance representing the current video config. | `VideoConfig()` | -| `pushNotificationsConfig` | Config for push notifications. | `.default` | -| `tokenProvider` | A function to call if the token is expired or invalid. | - | diff --git a/docusaurus/docs/iOS/03-guides/02-joining-creating-calls.mdx b/docusaurus/docs/iOS/03-guides/02-joining-creating-calls.mdx deleted file mode 100644 index 3452e47a8..000000000 --- a/docusaurus/docs/iOS/03-guides/02-joining-creating-calls.mdx +++ /dev/null @@ -1,198 +0,0 @@ ---- -title: Joining & Creating Calls -description: An overview of how to create calls and join them ---- - -### Creating a call - -You create a call by specifying a "call type" and a call id. - -```swift -let call = streamVideo.call(callType: "default", callId: "123") -let result = try await call.create() -``` - -The **call type** controls which features are enabled, and sets up permissions. - -For the call id there are a few things to note: - -- You can reuse the same call multiple times. -- If you have a unique id for the call we recommend passing that as the id. -- If you don't have a unique id you can leave it empty and we'll generate one for you. - -As an example, if you're building a telemedicine app calls will be connected to an appointment. Using your own appointment id as the **call id** makes it easy to find the call later. - -### Joining a call - -Joining a call sets up the realtime communication for audio and video. - -```swift -let call = streamVideo.call(callType: "default", callId: "123") -let result = try await call.join() -``` - -### Create and join a call - -For convenience, you can create and join a call in a single operation. One of the flags you can provide there is `create`. -Set this to `true` if you want to enable creating new call. Set it to `false` if you only want to join an existing call. - -```swift -try await call.join(create: true) -``` - -### Leave call - -To leave a call, you can use the `leave` method: - -```swift -call.leave() -``` - -### End call - -Ending a call requires a [special permission](../permissions-and-moderation). This action terminates the call for everyone. - -```typescript -try await call.end() -``` - -Only users with special permission can join an ended call. - -### Call CRUD - -Basic CRUD operations are available on the call object - -```swift -// create -let call = streamVideo.call(callType: "default", callId: "123") -let result = try await call.create() - -// update -let custom: [String: RawJSON] = ["secret": .string("secret")] -let updateResult = try await call.update(custom: custom) - -// get -let getResult = try await call.get() -``` - -### Call Create Options - -Here's a more complete example that shows how to create a call with members and custom data that starts tomorrow and is limited to our team: - -```swift -let members = ["thierry", "tommaso"] -let call = streamVideo.call(callType: "default", callId: UUID().uuidString) - -let result = try await call.create( - memberIds: members, - custom: ["color": .string("red")], - startsAt: Calendar.current.date(byAdding: .day, value: 1, to: Date()), - team: "stream", - ring: true, - notify: false -) -``` - -Members are permanently associated with a call. It allows you to: - -- Restrict the ability to join a call only to members -- Send a push notification to members when the call starts - -#### Backstage setup - -The backstage feature makes it easy to build a use-case where you and your co-hosts can setup your camera before going live. Only after you call call.goLive() the regular users be allowed to join the livestream. - -However, you can also specify a `joinAheadTimeSeconds`, which allows regular users to join the livestream before it is live, in the specified join time before the stream starts. - -Here's an example how to do that: - -```swift -let call = streamVideo.call(callType: "livestream", callId: callId) -let backstageRequest = BackstageSettingsRequest( - enabled: true, - joinAheadTimeSeconds: 300 -) -try await call.create( - members: [.init(userId: "test")], - startsAt: Date().addingTimeInterval(500), - backstage: backstageRequest -) -try await call.join() -``` - -In the code snippet above, we are creating a call that starts 500 seconds from now. We are also enabling backstage mode, with a `joinAheadTimeSeconds` of 300 seconds. That means that regular users will be able to join the call 200 seconds from now. - -The following options are supported when creating a call: - -| Option | Description | Default | -| ------ | ----------- | ------- | -| `memberIds` | A list of users ids to add as members to this call. | `nil` | -| `members` | A list of members to add to this call. You can specify the role and custom data on these members. | `nil` | -| `custom` | Any custom data you want to store. | `nil` | -| `startsAt` | When the call will start. Used for calls scheduled in the future, livestreams, audio rooms etc. | `nil` | -| `team` | Restrict the access to this call to a specific team. | `nil` | -| `ring` | If you want the call to ring for each member. | `false` | -| `notify` | If you want the call to nofiy each member by sending push notification. | `false` | -| `maxDuration` | If you want to specify a max duration of the call, in seconds. | `nil` | -| `maxParticipants` | If you want to specify the max number of participants in the call. | `nil` | -| `backstage` | If you want to specify backstage setup for the call. | `nil` | - -### Querying Members - -You can query the members of the call. This is helpful if you have thousands of members in a call and want to paginate. - -```swift -let filters: [String: RawJSON] = ["user_id": .string("jaewoong")] -let response = try await call.queryMembers( - filters: filters, - sort: [SortParamRequest.descending("created_at")], - limit: 5 -) -``` - -You can filter the member list on these fields, and sort on the selected fields. Note that you can also query on custom data for the member or the user. - -| Option | Description | Sorting Supported | -| ------ | ----------- | ------- | -| `user_id` | The user's id. | Yes | -| `role` | The member's role. | No | -| `custom` | The custom data on the member. | No | -| `created_at` | When the member was created. | Yes | -| `updated_at` | When the member was last updated. | No | - -## Restricting access - -You can restrict access to a call by tweaking the [Call Type](../configuring-call-types/) permissions and roles. -A typical use case is to restrict access to a call to a specific set of users -> call members. - -#### Step 1: Set up the roles and permissions - -On our [dashboard](https://dashboard.getstream.io/), navigate to the **Video & Audio -> Roles & Permissions** section and select the appropriate role and scope. -In this example, we will use `my-call-type` scope. - -By default, all users unless specified otherwise, have the `user` role. - -We start by removing the `JoinCall` permission from the `user` role for the `my-call-type` scope. -It will prevent regular users from joining a call of this type. - -![Revoke JoinCall for user role](../assets/user-revoke-joincall.png) - -Next, let's ensure that the `call_member` role has the `JoinCall` permission for the `my-call-type` scope. -It will allow users with the `call_member` role to join a call of this type. - -![Grant JoinCall for call_member role](../assets/call_member-grant-joincall.png) - -Once this is set, we can proceed with setting up a `call` instance. - -#### Step 2: Set up the call - -```swift -let call = streamVideo.call(callType: "my-call-type", callId: "my-call-id") -try await call.create(members: [.init(role: "call_member", userId: "alice")]) - -// and if necessary, to grant access to more users -try await call.addMembers(members: [.init(role: "call_member", userId: "charlie")]) - -// or, to remove access from some users -try await call.removeMembers(ids: ["charlie"]) -``` \ No newline at end of file diff --git a/docusaurus/docs/iOS/03-guides/03-call-and-participant-state.mdx b/docusaurus/docs/iOS/03-guides/03-call-and-participant-state.mdx deleted file mode 100644 index ced3ccce4..000000000 --- a/docusaurus/docs/iOS/03-guides/03-call-and-participant-state.mdx +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: Call & Participant State -description: How the state is exposed ---- - -### Video Call State - -When you join a call, we'll automatically expose observable objects in 3 different places: - -```swift -let clientState = streamVideo.state -let callState = call.state -let participants = call.state.participants -``` - -### Call State - -The call state is kept in sync (updates are received) only when you join the call. - -Here's an example of how you can access the call state: - -```swift -let call = streamVideo.call(callType: "default", callId: "mycall") -let joinResult = try await call.join(create: true) -// state is now available at -let state = call.state -``` - -The following fields are available on the call: - -| Attribute | Description| -| --------- | ---------- | -| `participants` | The list of call participants. | -| `localParticipant` | Shortcut to your own participant state. | -| `remoteParticipants` | The list of call participants other than yourself. | -| `activeSpeakers` | The list of participants who are currently speaking. | -| `dominantSpeaker` | The dominant speaker. | -| `members` | The list of call members. | -| `screenSharingSession` | If someone is screensharing, it will be available here. | -| `recordingState` | if the call is being recorded or not. | -| `blockedUserIds` | The user ids who are blocked from this call. | -| `settings` | The settings for this call. | -| `ownCapabilities` | Which actions you have permission to do. | -| `capabilitiesByRole` | What different roles (user, admin, moderator etc.) are allowed to do. | -| `backstage` | If a call is in backstage mode or not. | -| `broadcasting` | If a call is broadcasting (to HLS) or not. | -| `createdAt` | When the call was created. | -| `updatedAt` | When the call was updated. | -| `startsAt` | When the call is scheduled to start. | -| `endedAt` | When the call ended. | -| `endedBy` | User who ended the call. | -| `custom` | Custom data on the call. | -| `team` | Team that the call is restricted to. Default to null. | -| `createdBy` | Who created the call. | -| `ingress` | If there is an active ingress session to this call. IE if you're sending RTMP into the call | -| `transcribing` | a boolean indicating if transciptions are active or or not for this call. | -| `egress` | contains URL for playlist of recording. | -| `session` | the session associated with the call. | -| `reconnectionStatus` | whether the call is reconnecting. | -| `participantCount` | number of participants connected to the call. | -| `duration` | The duration of the call. | -| `statsReport` | Returns stats of the call, updated every 5 seconds. | - -### Participant State - -The `CallParticipant` is the most essential component used to render a participant in a call. It contains all of the information to render a participant, such as audio & video tracks, availabilities of audio & video, the screen sharing session, reactions, and etc. Here's how you can subscribe to participants updates: - -```swift -// all participants -let cancellable = call.state.$participants.sink { participants in - // .. -} -``` - -Filtering of the participants is also supported. You can get all the participants with the role "host", with the following code: - -```swift -var hosts: [CallParticipant] { - call.state.participants.filter { $0.roles.contains("host") } -} -``` - -When you join a call with many participants, maximum of 250 participants are returned in the join response. The list of participants is updated dynamically when there are join call events. - -The participants that are publishing video, audio or screensharing are prioritized over the other participants in the list. - -The total number of participants is updated realtime via health check events. This value is available from the call state's `participantCount` property. - -You can get the current user with the following code: - -```swift -let localParticipant: CallParticipant? = call.state.localParticipant -``` - -The following fields are available on the participant: - -| Attribute | Description| -| --------- | ---------- | -| `user` | The `User` object for the participant. | -| `id` | The unique call id of the participant. | -| `roles` | The user's roles in the call. | -| `hasVideo` | Returns whether the participant has video. | -| `hasAudio` | Returns whether the participant has audio. | -| `isScreensharing` | Returns whether the participant is screenSharing. | -| `track` | Returns the participant's video track. | -| `trackSize` | Returns the size of the track for the participant. | -| `screenshareTrack` | Returns the screensharing track for the participant. | -| `showTrack` | Returns whether the track should be shown. | -| `isSpeaking` | Returns whether the participant is speaking. | -| `isDominantSpeaker` | Returns whether the participant is a dominant speaker. | -| `sessionId` | Returns whether the participant is speaking. | -| `connectionQuality` | The participant's connection quality. | -| `joinedAt` | Returns the date when the user joined the call. | -| `pin` | Holds pinning information. | -| `isPinned` | Returns whether the user is pinned. | -| `audioLevel` | The audio level for the user. | -| `audioLevels` | A list of the last 10 audio levels. Convenient for audio visualizations. | - -### Participants Sorting - -If you want to change the default sorting of the participants, you can use the `Call` object's method `updateParticipantsSorting`. - -Here's an example that will sort participants alphabetically, by their name: - -```swift -let nameComparator: StreamSortComparator = { - comparison($0, $1, keyPath: \.name) -} -let call = streamVideo.call(callType: callType, callId: callId) -call.updateParticipantsSorting(with: [nameComparator]) -``` - -### Client State - -```swift -// client state is available on the client object -let state = streamVideo.state -``` - -And contains these fields: - -| Attribute | Description| -| --------- | ---------- | -| `user` | The user you're currently authenticated as. | -| `connection` | The connection state. See ConnectionState. | -| `activeCall` | The call you've currently joined. | -| `ringingCall` | Contains the call if you're calling someone or someone is calling you. | \ No newline at end of file diff --git a/docusaurus/docs/iOS/03-guides/04-camera-and-microphone.mdx b/docusaurus/docs/iOS/03-guides/04-camera-and-microphone.mdx deleted file mode 100644 index b396c0cdc..000000000 --- a/docusaurus/docs/iOS/03-guides/04-camera-and-microphone.mdx +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: Camera & Microphone -description: Docs on the media manager ---- - -The SDK does its best to make working with the camera and microphone easy. -We expose the following objects on the call: - -```swift -let call = streamVideo.call(callType: "default", callId: "123") -let camera = call.camera -let microphone = call.microphone -let speaker = call.speaker -``` - -### Camera Manager - -The following methods are available on the camera manager: - -```swift -try await call.camera.enable() // enable the camera -try await call.camera.disable() // disable the camera -try await call.camera.flip() // switch between front and back camera -``` - -The camera manager also exposes these observables: - -```swift -call.camera.direction // front/back -call.camera.status // enabled/ disabled. -``` - -### Microphone Manager - -The microphone manager supports changing the mic state: - -```swift -try await call.microphone.enable() // enable the microphone -try await call.microphone.disable() // disable the microphone -``` - -You can get the microphone status like this: - -```swift -call.microphone.status // enabled/ disabled. -``` - -#### Noise Cancellation - -Check our [Noise Cancellation guide](./noise-cancellation). - -### Speaker Manager - -The speaker allows you to enable/disable the speaker phone. - -```swift -try await call.speaker.enableSpeakerPhone() -try await call.speaker.disableSpeakerPhone() -``` - -Additionally, you can enable/disable the audio output on the device. - -```swift -try await call.speaker.enableAudioOutput() -try await call.speaker.disableAudioOutput() -``` \ No newline at end of file diff --git a/docusaurus/docs/iOS/03-guides/04-noise-cancellation.mdx b/docusaurus/docs/iOS/03-guides/04-noise-cancellation.mdx deleted file mode 100644 index 09c21579f..000000000 --- a/docusaurus/docs/iOS/03-guides/04-noise-cancellation.mdx +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: Noise Cancellation -description: Documentation on integrating noise cancellation. ---- - -Noise Cancellation capabilities of our [iOS Video SDK](https://getstream.io/video/sdk/ios) can be enabled by installing our NoiseCancellation package. Under the hood, this package uses the technology developed by [krisp.ai](https://krisp.ai). - -## Installation - -### Add the SDK to your project - -To add StreamVideo SDK, open Xcode and follow these steps: - -- In Xcode, go to File -> "Add Packages..." -- Paste the URL https://github.com/GetStream/stream-video-noise-cancellation-swift -- In the option "Dependency Rule" choose "Up to the Next Major Version," in the single text input next to it. The field will be populated automatically with the latest version. - -- Choose "Add Package" and wait for the dialog to complete. -- Select `StreamVideoNoiseCancellation` and add it in your project. - -### Integration - -Our iOS SDK provides a utility component that makes the integration smoother. - -- `NoiseCancellationProcessor`: the object that comes from the `StreamVideoNoiseCancellation` and performs the noise cancellation operation. -- `NoiseCancellationFilter`: an object that conforms to `StreamVideo.AudioFilter` and performs all tasks required for the noise cancellation session (e.g request start/stop). - - -```swift -// Firstly you initalize the processor. -let processor = NoiseCancellationProcessor() - -// Secondly you instantiate the NoiseCancellationFilter. You can use any name, but it needs to be unique compared to other AudioFilters you may be using. -let noiseCancellationFilter = NoiseCancellationFilter( - name: "noise-cancellation", - initialize: processor.initialize, - process: processor.process, - release: processor.release -) -``` - -Once you are able to create a `NoiseCancellationFilter` you can rely on `Call`'s API and state to toggle the filter status and also observe the features availability. - -#### Feature availability - -`Call.state.settings` contains the `noiseCancellation` configuration. The configuration contains a mode property that you look into, to determine the feature's availability: - -- `.available` -The featue has been enabled on the dashboard and it's available for the call. In this case, you are free to present any noise cancellation toggle UI in your application. - -:::important -Even though the feature may be enabled for your call, you should note that NoiseCancellation is a very performance-heavy process. For that reason, it's recommended to only allow the feature on devices that support Apple's neuralEngine. - -You can easily check if the current device has neuralEngine support by using `StreamVideo.isHardwareAccelerationAvailable` on your streamVideo initialized instance. - -For more info you can refer to our [UI docs](../../ui-cookbook/noise-cancellation). about Noise Cancellation. -::: - -- `.disabled` -The feature hasn't been enabled on the dashboard or the feature isn't available for the call. In this case, you should hide any noise cancellation toggle UI in your application. - -- `.autoOn` -Similar to `.available` with the difference that if possible, the StreamVideo SDK will enable the filter automatically, when the user join the call. - -:::note -The requirements for `.autoOn` to work properly are: -- A `VideoConfig.noiseCancellationFilter` value when you initialise StreamVideo -- Device has support for Apple's neuralEngine -::: - -#### Activate/Deactivate the filter - -The `NoiseCancellationFilter` is an object conforming to the `AudioFilter` protocol. That means, you can manage it in the same manner as any other audioFilter, as described [here](../../advanced/apply-video-filters).) - -In order to support `.autoOn` though, the StreamVideo SDK requires us to provide it with a `NoiseCancellationFilter` instance. The instance you provide, will be used whenever the noise cancellation mode is `.autoOn`. - -You can easily provide the `NoiseCancellationFilter` instance every time you initialize `StreamVideo`, like below: - -```swift -// Create the NoiseCancellationFilter like the example above. - -// Then you create VideoConfig instance that includes our NoiseCancellationFilter. -let videoConfig = VideoConfig(noiseCancellationFilter: noiseCancellationFilter) - -// Finally, you create the StreamVideo instance by passing in our videoConfig. -let streamVideo = StreamVideo( - apiKey: apiKey, - user: user, - token: token, - videoConfig: videoConfig, - tokenProvider: { _ in } -) -``` diff --git a/docusaurus/docs/iOS/03-guides/05-call-types.mdx b/docusaurus/docs/iOS/03-guides/05-call-types.mdx deleted file mode 100644 index 6a290e4df..000000000 --- a/docusaurus/docs/iOS/03-guides/05-call-types.mdx +++ /dev/null @@ -1,20 +0,0 @@ ---- -id: configuring-call-types -title: Call Types ---- - -import CallTypesPage from '../../../shared/video/_call-types.mdx'; -import WithExternalLinks from '../../../shared/video/_withExternalLinks'; - - - - \ No newline at end of file diff --git a/docusaurus/docs/iOS/03-guides/06-querying-calls.mdx b/docusaurus/docs/iOS/03-guides/06-querying-calls.mdx deleted file mode 100644 index 324eb454b..000000000 --- a/docusaurus/docs/iOS/03-guides/06-querying-calls.mdx +++ /dev/null @@ -1,183 +0,0 @@ ---- -title: Querying Calls -description: How to query calls ---- - -The `StreamVideo` SDK allows you to query calls and watch them. This allows you to build apps that display feeds of calls with real-time updates (without joining them), similar to Clubhouse. - -You can query calls based on built-in fields as well as any custom field you add to the calls. Multiple filters can be combined using AND, OR logical operators, each filter can use its comparison (equality, inequality, greater than, greater or equal, etc.). - -### Client - -You can query calls by using the client directly. By using the `queryCalls(filters:sort:limit:watch:)` method we will fetch the first page. The result will be an array of calls and a cursor to the next page (if one is available). Finally, we can use the next page cursor to fetch the next page. - -```swift -let filters: [String: RawJSON] = ["ended_at": .nil] -let sort = [SortParamRequest.descending("created_at")] -let limit = 10 - -// Fetch the first page of calls -let (firstPageCalls, secondPageCursor) = try await streamVideo.queryCalls( - filters: filters, - sort: sort, - limit: limit -) - -// Use the cursor we received from the previous call to fetch the second page -let (secondPageCalls, _) = try await streamVideo.queryCalls(next: secondPageCursor) -``` - -### CallsController - -In order to query calls, you need to create an instance of the `CallsController`, via the `StreamVideo` object's method `makeCallsController`: - -```swift -private lazy var callsController: CallsController = { - let sortParam = CallSortParam(direction: .descending, field: .createdAt) - let filters: [String: RawJSON] = ["type": .dictionary(["$eq": .string("audio_room")])] - let callsQuery = CallsQuery(sortParams: [sortParam], filters: filters, watch: true) - return streamVideo.makeCallsController(callsQuery: callsQuery) -}() -``` - -The controller requires the `CallsQuery` parameter, which provides sorting and filtering information for the query, as well as whether the calls should be watched. - -#### Sort Parameters - -The `CallSortParam` model contains two properties - `direction` and `field`. The `direction` can be `ascending` and `descending`, while the `field` can be one of the following values: - -```swift -/// The sort field for the call start time. -static let startsAt: Self = "starts_at" -/// The sort field for the call creation time. -static let createdAt: Self = "created_at" -/// The sort field for the call update time. -static let updatedAt: Self = "updated_at" -/// The sort field for the call end time. -static let endedAt: Self = "ended_at" -/// The sort field for the call type. -static let type: Self = "type" -/// The sort field for the call id. -static let id: Self = "id" -/// The sort field for the call cid. -static let cid: Self = "cid" -``` - -You can provide an array of `CallSortParam`'s in order to have sorting by multiple fields. - -#### Filters - -The `StreamVideo` API supports MongoDB style queries to make it easier to fetch the required data. For example, if you want to query the channels that are of type `audio_room`, you would need to write the following filter: - -```swift -let filters: [String: RawJSON] = ["type": .dictionary(["$eq": .string("audio_room")])] -``` - -You can find the supported operators [here](https://getstream.io/chat/docs/ios-swift/query_syntax_operators/?language=swift&q=filter). - -#### Pagination - -You can encapsulate the querying calls login into an object `CallsViewModel` for simplicity and state management. -```swift -@MainActor -final class CallsViewModel: ObservableObject { - - @Injected(\.streamVideo) internal var streamVideo - - @Published internal var calls = [Call]() - - private var cancellables = Set() - - private lazy var callsController: CallsController = { - let sortParam = CallSortParam(direction: .descending, field: .createdAt) - let filters: [String: RawJSON] = ["type": .dictionary(["$eq": .string("audio_room")])] - let callsQuery = CallsQuery(sortParams: [sortParam], filters: filters, watch: true) - return streamVideo.makeCallsController(callsQuery: callsQuery) - }() - - init() { - subscribeToCallsUpdates() - loadCalls() - } - - func onCallAppear(_ call: Call) { - let index = calls.firstIndex { callData in - callData.cId == call.cId - } - guard let index else { return } - - if index < calls.count - 10 { - return - } - - loadCalls() - } - - func loadCalls() { - Task { - try await callsController.loadNextCalls() - } - } - - func subscribeToCallsUpdates() { - callsController.$calls.sink { calls in - DispatchQueue.main.async { - self.calls = calls - } - } - .store(in: &cancellables) - } -} -``` - -You can then fetch the next calls from the specified query, by calling the `loadNextCalls` method. For example, if you are doing pagination in SwiftUI, you can use the `onAppear` modifier on each entry, and based on its index fetch the next calls. - -First, in your SwiftUI view, you can call a method from your presentation layer (for example a view model), on the view item appearance: - -```swift -ScrollView { - LazyVStack { - ForEach(callsViewModel.calls, id: \.callId) { call in - CallView(viewFactory: viewFactory, viewModel: viewModel) - .padding(.vertical, 4) - } - } -} -``` - -Next, in your presentation layer, you can check based on the call index, if the next page should be fetched: - -```swift -func onCallAppear(_ call: CallData) { - let index = calls.firstIndex { callData in - callData.callCid == call.callCid - } - guard let index else { return } - - if index < calls.count - 10 { - return - } - - loadCalls() -} - -func loadCalls() { - Task { - try await callsController.loadNextCalls() - } -} -``` - -The `CallsController` automatically manages the cursors for the pagination. You only need to be careful not to call the `loadNextCalls` method before it's necessary (like in the example above). - -#### Watching Calls - -You are able to watch real-time updates of the calls. The `@Published` `calls` variable will provide all the updates to the calls, that you can use to update your UI. - -#### Cleanup - -When you are done watching channels, you should cleanup the controller (which will stop the WS updates): - -```swift -callsController.cleanUp() -``` \ No newline at end of file diff --git a/docusaurus/docs/iOS/03-guides/07-query-call-members.mdx b/docusaurus/docs/iOS/03-guides/07-query-call-members.mdx deleted file mode 100644 index c54c8d6c0..000000000 --- a/docusaurus/docs/iOS/03-guides/07-query-call-members.mdx +++ /dev/null @@ -1,52 +0,0 @@ ---- -id: querying-call-members -title: Querying Call Members -description: How to query call members ---- - -import FilterConditions from '../../../shared/_filter-operators.mdx'; -import CallMemberFilters from '../../../shared/video/_call-member-filters.mdx'; -import CallMemberSort from '../../../shared/video/_call-member-sort-fields.mdx'; - -When you create or join a call you get a list of call members, however this can return at most 25 members: - -```swift -// The maximum limit is 25 -// The default limit is 25 -try await call.get(membersLimit: 25) -``` - -To get the complete list of call members the Stream API allows you to query, filter and sort members of a call using a paginated list. - -## Examples - -Below are a few examples of how to use this API: - -```swift -// sorting and pagination -let sort = SortParamRequest(direction: 1, field: "user_id") -let result1 = try await call.queryMembers( - sort: [sort], - limit: 10 -) - -// loading the next page -if let next = result1.next { - let result2 = try await call.queryMembers(sort: [sort], limit: 10, next: next) -} - -// filtering -let result2 = try await call.queryMembers( - filters: ["role": .dictionary(["eq": "admin"])] -) -``` - -## Sort options - - - -## Filter options - - - - diff --git a/docusaurus/docs/iOS/03-guides/08-permissions-and-moderation.mdx b/docusaurus/docs/iOS/03-guides/08-permissions-and-moderation.mdx deleted file mode 100644 index 37500f677..000000000 --- a/docusaurus/docs/iOS/03-guides/08-permissions-and-moderation.mdx +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Permissions & Moderation -description: Explanation of call permissions and moderation features ---- - -## Permissions & Moderation - -In some types of calls, there's a requirement to moderate the behaviour of the participants. Examples include muting a participant, or ending the call for everyone. Those capabilities are usually reserved for the hosts of the call (users with elevated capabilities). They usually have additional moderation controls in their UI, that allow them to achieve these actions. - -The StreamVideo SDK has support for such capabilities, with the usage of the `Call`'s permissions features. - -### Requesting & Granting permission - -This example shows how to check if you have permissions to do something and ask for permission. Let's say that you've joined an audio room and want to speak - -```swift -// see if you currently have this permission. -let hasPermission = call.currentUserHasCapability(.sendAudio) - -// request the host to grant you this permission. -let response = try await call.request(permissions: [.sendAudio]) -``` - -Permission requests are exposed from `call.state` on the `permissionRequests` published variable. - -```swift -if let request = call.state.permissionRequests.first { - // reject it - request.reject() - - // grant it - try await call.grant(request: request) -} -``` - -You can also grant permissions directly using call.grantPermissions() method like the example below: - -```swift -try await call.grant(permissions: [.sendAudio], for: "thrierry") -``` - -You can request the following 3 permissions: `send-audio`, `send-video`, and `screenshare`. - -### Revoking permissions - -Similarly to granting permissions, the host, can also revoke permissions for any user in the call. - -```swift -// revoke user's with id "tommaso", the permission to send audio -let response = try await call.revoke(permissions: [.sendAudio], for: "tommaso") -``` - -### Moderation Capabilities - -```swift -// block a user -try await call.blockUser(with: "tommaso") - -// unblock a user -try await call.unblockUser(with: "tommaso") - -// remove a member from a call -try await call.removeMembers(ids: ["tommaso"]) -``` - -Alternatively you can also mute users. - -```swift -// mutes all users (audio and video are true by default) other than yourself -try await call.muteAllUsers() - -// mute user with id "tommaso" specifically -try await call.mute(userId: "tommaso") -``` \ No newline at end of file diff --git a/docusaurus/docs/iOS/03-guides/09-reactions.mdx b/docusaurus/docs/iOS/03-guides/09-reactions.mdx deleted file mode 100644 index 4216381fa..000000000 --- a/docusaurus/docs/iOS/03-guides/09-reactions.mdx +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Reactions -description: How reactions work ---- - -## Sending Reactions - -It's easy to add reactions to your call. - -```swift -let response = try await call.sendReaction(type: "fireworks") -``` - -You can also add custom data to the reaction and specify a specific emoji - -```swift -let response = try await call.sendReaction( - type: "raise-hand", - custom: ["mycustomfield": "hello"], - emojiCode: ":smile:" -) -``` - -## Listening to Reactions - -Here's an example that shows how to listen to reaction events: - -```swift -for await event in call.subscribe(for: CallReactionEvent.self) { - // handle reaction event -} -``` \ No newline at end of file diff --git a/docusaurus/docs/iOS/03-guides/10-custom-events.mdx b/docusaurus/docs/iOS/03-guides/10-custom-events.mdx deleted file mode 100644 index fb6a00e28..000000000 --- a/docusaurus/docs/iOS/03-guides/10-custom-events.mdx +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: Custom Events -description: How custom events work ---- - -## Sending Custom Events - -In case the reaction system isn't flexible enough we also support custom events. - -You can use custom events to send data between participants in the call. This is a realtime layer that you can broadcast your own events to. - -For example, if you are building a collaborative drawing app, you can send the coordinates to the other participants with the following code: - -```swift -let response = try await call.sendCustomEvent(["type": .string("draw"), "x": .number(10), "y": .number(20)]) -``` - -The data that can be passed with a custom event has a limit of 100KB. - -If you want to pass larger files: -- you can send URLs to those resources and download them from your location when the event is received. -- you can split the file into chunks of bytes and send them with separate events. -- if you are sending an image, you can resize it before you pass it in the event. - -Here's an example that shows how to resize an image and send it as a base 64 encoded string via custom event: - -```swift -func sendImageData(_ data: Data) async { - guard - let snapshot = UIImage(data: data), - let resizedImage = resize(image: snapshot, to: .init(width: 30, height: 30)), - let snapshotData = resizedImage.jpegData(compressionQuality: 0.8) - else { - return - } - - do { - try await call.sendCustomEvent([ - "snapshot": .string(snapshotData.base64EncodedString()) - ]) - } catch { - log.error("Failed to send image.", error: error) - } -} - -private func resize( - image: UIImage, - to targetSize: CGSize -) -> UIImage? { - guard - image.size.width > targetSize.width || image.size.height > targetSize.height - else { - return image - } - - let widthRatio = targetSize.width / image.size.width - let heightRatio = targetSize.height / image.size.height - - // Determine the scale factor that preserves aspect ratio - let scaleFactor = min(widthRatio, heightRatio) - - let scaledWidth = image.size.width * scaleFactor - let scaledHeight = image.size.height * scaleFactor - let targetRect = CGRect( - x: (targetSize.width - scaledWidth) / 2, - y: (targetSize.height - scaledHeight) / 2, - width: scaledWidth, - height: scaledHeight - ) - - // Create a new image context - UIGraphicsBeginImageContextWithOptions(targetSize, false, 0) - image.draw(in: targetRect) - - let newImage = UIGraphicsGetImageFromCurrentImageContext() - UIGraphicsEndImageContext() - - return newImage -} -``` - -## Listening to Custom Events - -Custom events are only delivered to clients that are watching the call. - -To receive custom events, you need to subscribe to the custom WebSocket event. - -```swift -for await event in call.subscribe(for: CustomVideoEvent.self) { - // read custom data - let customData = event.custom - // perform actions with the custom data. -} -``` - -The custom event has the following properties: - -- `callCid`: `String` - the type and call id that identifies the call -- `createdAt`: `Date` - when was the event created -- `custom`: `[String: RawJSON]` - any custom data you send via the `sendCustomEvent` method -- `user`: `UserResponse` - the user who sent the event \ No newline at end of file diff --git a/docusaurus/docs/iOS/03-guides/12-call-state.mdx b/docusaurus/docs/iOS/03-guides/12-call-state.mdx deleted file mode 100644 index 86a1de358..000000000 --- a/docusaurus/docs/iOS/03-guides/12-call-state.mdx +++ /dev/null @@ -1,103 +0,0 @@ ---- -title: Calling state ---- - -If you are using our `CallViewModel`, the state of the call is managed for you and available as a `@Published` property called `callingState`. It can be used to show custom UI, such as incoming / outgoing call screens, depending on your use-case. If you are using our default UI components, you don't have to do any special handling about the `callingState`. - -The `CallingState` enumeration has the following possible values: - -- `idle` - There's no active call at the moment. In this case, your hosting view should be displayed. -- `lobby(LobbyInfo)` - The user is in the lobby before joining the call. -- `incoming(IncomingCall)` - There's an incoming call, therefore an incoming call screen needs to be displayed. -- `joining` - The user is joining a call. -- `outgoing` - The user rings someone, therefore an outgoing call needs to be displayed. -- `inCall` - The user is in a call. -- `reconnecting` - The user dropped the connection and now they are trying to reconnect. - -### Example handling - -If you want to build your own UI layer, here's an example how to react to the changes of the calling state in SwiftUI: - -```swift -public var body: some View { - ZStack { - if viewModel.callingState == .outgoing { - viewFactory.makeOutgoingCallView(viewModel: viewModel) - } else if viewModel.callingState == .inCall { - if !viewModel.participants.isEmpty { - if viewModel.isMinimized { - MinimizedCallView(viewModel: viewModel) - } else { - viewFactory.makeCallView(viewModel: viewModel) - } - } else { - WaitingLocalUserView(viewModel: viewModel, viewFactory: viewFactory) - } - } else if case let .incoming(callInfo) = viewModel.callingState { - viewFactory.makeIncomingCallView(viewModel: viewModel, callInfo: callInfo) - } - } - .onReceive(viewModel.$callingState) { _ in - if viewModel.callingState == .idle || viewModel.callingState == .inCall { - utils.callSoundsPlayer.stopOngoingSound() - } - } -} -``` - -Similarly, you can listen to the UI changes via the `@Published` property in UIKit: - -```swift -@MainActor -private func listenToIncomingCalls() { - callViewModel.$callingState.sink { [weak self] newState in - guard let self = self else { return } - if case .incoming(_) = newState, self == self.navigationController?.topViewController { - let next = CallViewController.make(with: self.callViewModel) - CallViewHelper.shared.add(callView: next.view) - } else if newState == .idle { - CallViewHelper.shared.removeCallView() - } - } - .store(in: &cancellables) -} -``` - -:::note -An example implementation of `CallViewHelper` can be seen below: - -```swift -class CallViewHelper { - - static let shared = CallViewHelper() - - private var callView: UIView? - - private init() {} - - func add(callView: UIView) { - guard self.callView == nil else { return } - guard let window = UIApplication.shared.windows.first else { - return - } - callView.isOpaque = false - callView.backgroundColor = UIColor.clear - self.callView = callView - window.addSubview(callView) - } - - func removeCallView() { - callView?.removeFromSuperview() - callView = nil - } -} -``` -::: - -### Call Settings - -The `CallViewModel` provides information about the current call settings, such as the camera position and whether there's an audio and video turned on. This is available as a `@Published` property called `callSettings`. - -If you are building a custom UI, you should use the values from this struct to show the corresponding call controls and camera (front or back). - -If you want to learn more about the call settings and how to use them, please check the following [page](../../ui-components/view-model). diff --git a/docusaurus/docs/iOS/03-guides/_category_.json b/docusaurus/docs/iOS/03-guides/_category_.json deleted file mode 100644 index 506e345f6..000000000 --- a/docusaurus/docs/iOS/03-guides/_category_.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "label": "Guides" -} diff --git a/docusaurus/docs/iOS/04-ui-components/01-overview.mdx b/docusaurus/docs/iOS/04-ui-components/01-overview.mdx deleted file mode 100644 index a461382f1..000000000 --- a/docusaurus/docs/iOS/04-ui-components/01-overview.mdx +++ /dev/null @@ -1,142 +0,0 @@ ---- -title: Overview -description: Overview of the UI components ---- - -## Introduction - -The StreamVideo SDK provides UI components to facilitate the integration of video capabilities into your apps. - -The UI components are provided in SwiftUI. If you use UIKit, we also provide UIKit wrappers, that can make it easier for you to integrate video in UIKit-based apps. - -## UI components vs Custom - -StreamVideo provides both ready made components to use directly in your app, as well as extension points that you can use to inject your own custom UI. If you only need the calling functionality to support your (custom built) UI, you can simply rely on our low-level client. - -Let's explore the different possibilities and how they would impact your app and the integration efforts. - -## Using only the low-level client - -If your app needs a completely custom UI and calling flow, you can use only our low-level client that implements the WebRTC protocol and communicates with our backend services. If you go with this approach, you can either use our stateful `CallViewModel` that allows you to observe the call state (list of participants, camera & microphone state, etc), or use our lower level `Call` object and implement your own presentation objects. - -Additionally, if you go with this approach, you can still use some components from our UI SDKs (if they fit your use-case), to facilitate your development. We have several examples for this in [our cookbook](../../ui-cookbook/overview). - -This approach would require some familiarity with our low-level client, and the highest development efforts compared to the other two options. On the other hand, it gives you maximum flexibility to customize the calling flow according to your needs. - -In any case, our view components are highly customizable and flexible for many video/audio calling cases, and they can save big development efforts. Therefore, we recommend that you consider the other two options below, before deciding on starting from scratch. - -## Mix & match - -The mix & match approach is ideal if you need one of the standard calling flows, but with a possibility to replace parts of the UI with your own implementation. Our UI SDK allows you to completely swap views with your own interface elements. - -For example, if you are building an app with incoming / outgoing calling screens, you can easily swap only those screens. For building your custom screens, you can still reuse our lower level components. - -This approach provides a nice balance between levels of customization and development efforts. Find examples and extension slots to get started in our docs [here](../view-slots). - -## Simple theming - -If you need a standard video calling experience that needs to match the rest of your app's look and feel, you can use our theming customizations. - -This is the fastest way to add calling support to your app, just setup our video client and attach our `CallModifier` to your hosting view. You can change the fonts, colors, icons, texts and sounds used in the SDK, by interacting with our `Appearance` class. - -## StreamVideoUI object - -The UI SDK provides a context provider object that allows simple access to functionalities exposed by the SDK, such as branding, presentation logic, icons, and the low-level video client. - -The `StreamVideoUI` object can be initialized in two ways. The first way is to implicitly create the low-level client `StreamVideo`, by only creating the `StreamVideoUI` object. - -```swift -let streamVideoUI = StreamVideoUI( - apiKey: "your_api_key", - user: user.userInfo, - token: user.token, - tokenProvider: { result in - result(.success(user.token)) - } -) -``` - -The other option is to first create the `StreamVideo` client (in case you want to keep an instance of it), and use that one to create the `StreamVideoUI` object. - -```swift -let streamVideo = StreamVideo( - apiKey: "your_api_key", - user: user.userInfo, - token: user.token, - tokenProvider: { result in - result(.success(user.token)) - } -) -let streamVideoUI = StreamVideoUI(streamVideo: streamVideo) -``` - -:::important -It's important to initialize the client early in your app's lifecycle, and as soon as your user is logged in. If you try to display a view without the `StreamVideoUI` object being created, you will receive a crash. -::: - -## Customization options - -### Appearance - -When you create the `StreamVideoUI` object, you can optionally provide your custom version of the `Appearance` class, which will allow you to customize things like fonts, colors, icons, and sounds used in the SDK. - -Find more details on how to do this on [this page](../video-theme). - -### Changing Views - -Apart from the basic theming customizations, you can also swap certain views, with your implementation. You can find more details on how to do that on this [page](../customizing-views). - -## Dependency Injection - -For injecting dependencies in the SwiftUI SDK, we are using an approach based on [this article](https://www.avanderlee.com/swift/dependency-injection/). It works similarly to the @Environment in SwiftUI, but it also allows access to the dependencies in non-view related code. - -When you initialize the SDK (by creating the `StreamVideoUI` object), all the dependencies are created too, and you can use them anywhere in your code. In order to access a particular type, you need to use the `@Injected(\.keyPath)` property wrapper: - -```swift -@Injected(\.streamVideo) var streamVideo -@Injected(\.fonts) var fonts -@Injected(\.colors) var colors -@Injected(\.images) var images -@Injected(\.sounds) var sounds -@Injected(\.utils) var utils -``` - -### Extending the DI with Custom Types - -In some cases, you might also need to extend our DI mechanism with your own types. For example, you may want to be able to access your custom types like this: - -```swift -@Injected(\.customType) var customType -``` - -In order to achieve this, you first need to define your own `InjectionKey`, and define it's `currentValue`, which basically creates the new instance of your type. - -```swift -class CustomType { - // your custom logic here -} - -struct CustomInjectionKey: InjectionKey { - static var currentValue: CustomType = CustomType() -} -``` - -Next, you need to extend our `InjectedValues` with your own custom type, by defining its getter and setter. - -```swift -extension InjectedValues { - /// Provides access to the `CustomType` instance in the views and view models. - var customType: CustomType { - get { - Self[CustomInjectionKey.self] - } - set { - Self[CustomInjectionKey.self] = newValue - } - } -} -``` - -With these few simple steps, you can now access your custom functionality in both your app code and in your custom implementations of the views used throughout the SDK. - -Additionally, DI entries can be accessed by using the `InjectedValues[\.]` syntax (for example `InjectedValues[\.customType]`). This approach can be useful in case you want to override our default injected values. \ No newline at end of file diff --git a/docusaurus/docs/iOS/04-ui-components/02-swiftui-vs-uikit.mdx b/docusaurus/docs/iOS/04-ui-components/02-swiftui-vs-uikit.mdx deleted file mode 100644 index 8cd342883..000000000 --- a/docusaurus/docs/iOS/04-ui-components/02-swiftui-vs-uikit.mdx +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: SwiftUI vs. UIKit ---- - -## Overview - -The SDK was developed with SwiftUI as the primary use-case in mind. However, we take the integration into UIKit-based applications very seriously. This is why we also offer an UIKit SDK that wraps the SwiftUI components so that you don't have to. - -Find explanations for integrating both of the SDK on this page. - -:::info -You can also have a direct view at the [SwiftUI SDK and the UIKit SDK](https://github.com/GetStream/stream-video-swift) on GitHub and see how to integrate them into your apps [on the Chat Integration page](../../advanced/chat-integration). -::: - -## SwiftUI SDK - -The simplest way to add calling support to your hosting view is to attach the `CallModifier`: - -```swift -struct CallView: View { - - @StateObject var viewModel: CallViewModel - - init() { - _viewModel = StateObject(wrappedValue: CallViewModel()) - } - - var body: some View { - HomeView(viewModel: viewModel) - .modifier(CallModifier(viewModel: viewModel)) - } -} -``` - -With this setup, the `CallViewModel` will listen to incoming call events and present the appropriate UI, based on the state. - -You can customize the look and feel of the screens presented in the calling flow, by implementing the corresponding methods in our `ViewFactory`. - -Most of our components are public, so you can use them as building blocks if you want to build your custom UI. - -All the texts, images, fonts and sounds used in the SDK are configurable via our `Appearance` class, to help you brand the views to be inline with your hosting app. - -## UIKit SDK - -The UIKit SDK provides UIKit wrappers around the SwiftUI views. Its main integration point is the `CallViewController` which you can easily push in your navigation stack, or add as a modal screen. - -```swift -private func didTapStartButton() { - let next = CallViewController(viewModel: callViewModel) - next.modalPresentationStyle = .fullScreen - next.startCall(callType: "default", callId: text, members: selectedParticipants) - self.navigationController?.present(next, animated: true) -} -``` - -The `CallViewController` is created with a `CallViewModel` - the same one used in our SwiftUI SDK. - -At the moment, all the customizations in the UIKit SDK, need to be done in SwiftUI. diff --git a/docusaurus/docs/iOS/04-ui-components/03-video-theme.mdx b/docusaurus/docs/iOS/04-ui-components/03-video-theme.mdx deleted file mode 100644 index a0f242b7d..000000000 --- a/docusaurus/docs/iOS/04-ui-components/03-video-theme.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Theme -description: How to use the VideoTheme ---- - -## Theming - -When you create the `StreamVideoUI` object, you can optionally provide your custom version of the `Appearance` class, which will allow you to customize things like fonts, colors, icons, and sounds used in the SDK. - -## Changing Colors - -If you want to change the colors, you can set your own values in the `Colors` class: - -```swift -let streamBlue = UIColor(red: 0, green: 108.0 / 255.0, blue: 255.0 / 255.0, alpha: 1) -var colors = Colors() -colors.tintColor = Color(streamBlue) -let appearance = Appearance(colors: colors) -let streamVideo = StreamVideoUI(streamVideo: streamVideo, appearance: appearance) -``` - -## Changing Images - -All of the images used in the SDK can be replaced with your custom ones. To customize the images, create a new instance of the `Images` class and update the images you want to change. For example, if you want to change the icon for hanging up, you just need to override the corresponding image property. - -```swift -var images = Images() -images.hangup = Image("your_custom_hangup_icon") -let appearance = Appearance(images: images) -let streamVideoUI = StreamVideoUI(streamVideo: streamVideo, appearance: appearance) -``` - -## Changing Fonts - -You can provide your font to match the style of the rest of your app. In the SDK, the default system font is used, with dynamic type support. To keep this support with your custom fonts, please follow Apple's guidelines about scaling fonts [automatically](https://developer.apple.com/documentation/uikit/uifont/scaling_fonts_automatically). - -The fonts used in the SDK can be customized via the `Fonts` struct, which is part of the `Appearance` class. So, for example, if we don't want to use the bold footnote font, we can easily override it with our non-bold version. - -```swift -var fonts = Fonts() -fonts.footnoteBold = Font.footnote -let appearance = Appearance(fonts: fonts) -let streamVideoUI = StreamVideoUI(streamVideo: streamVideo, appearance: appearance) -``` - -## Changing Sounds - -There are several sounds used throughout the video SDK, such as for incoming and outgoing calls. You can change these sounds with your custom ones, by changing the corresponding values in the `Sounds` class: - -```swift -let sounds = Sounds() -sounds.incomingCallSound = "your_custom_sound" -let appearance = Appearance(sounds: sounds) -let streamVideoUI = StreamVideoUI(streamVideo: streamVideo, appearance: appearance) -``` diff --git a/docusaurus/docs/iOS/04-ui-components/04-customizing-views.mdx b/docusaurus/docs/iOS/04-ui-components/04-customizing-views.mdx deleted file mode 100644 index ef24738ca..000000000 --- a/docusaurus/docs/iOS/04-ui-components/04-customizing-views.mdx +++ /dev/null @@ -1,166 +0,0 @@ ---- -title: View Customizations ---- - -## Injecting Your Views - -The SwiftUI SDK allows complete view swapping of some of its components. This means you can, for example, create your own (different) outgoing call view and inject it in the slot of the default one. For most of the views, the SDK doesn't require anything else than the view to conform to the standard SwiftUI `View` protocol and return a view from the `body` variable. You don't need to implement any other lifecycle related methods or additional protocol conformance. - -### How the View Swapping Works - -All the views that allow slots that your implementations can replace are generic over those views. This means that view type erasure (AnyView) is not used. The views contain default implementations, and in general, you don't have to deal with the generics part of the code. Using generics over type erasure allows SwiftUI to compute the diffing of the views faster and more accurately while boosting performance and correctness. - -### View Factory - -To abstract away the creation of the views, a protocol called `ViewFactory` is used in the SDK. This protocol defines the swappable views of the video experience. There are default implementations for all the views used in the SDK. If you want to customize a view, you will need to provide your own implementation of the `ViewFactory`, but you will need to implement only the view you want to swap. - -For example, if we want to change the outgoing call view, we will need to implement the `makeOutgoingCallView(viewModel: CallViewModel) -> OutgoingCallViewType` in the `ViewFactory`: - -```swift -class CustomViewFactory: ViewFactory { - - func makeOutgoingCallView(viewModel: CallViewModel) -> some View { - CustomOutgoingCallView(viewModel: viewModel) - } - -} -``` - -Next, when you attach the `CallModifier` to your hosting view, you need to inject the newly created `CustomViewFactory`. The SDK will use the views you have provided in your custom implementation, while it will default back to the ones from the SDK in the slots where you haven't provided any implementation. - -```swift -var body: some View { - YourHostingView() - .modifier(CallModifier(viewFactory: CustomViewFactory(), viewModel: viewModel)) -} -``` - -Here are all the slots available for customization in the SwiftUI SDK. - -### Outgoing Call View - -In order to swap the outgoing call view, we will need to implement the `makeOutgoingCallView(viewModel: CallViewModel) -> some View` in the `ViewFactory`: - -```swift - -class CustomViewFactory: ViewFactory { - - func makeOutgoingCallView(viewModel: CallViewModel) -> some View { - CustomOutgoingCallView(viewModel: viewModel) - } - -} -``` - -### Incoming Call View - -Similarly, the incoming call view can be replaced by implementing the `makeIncomingCallView(viewModel: CallViewModel, callInfo: IncomingCall) -> some View` in the `ViewFactory`: - -```swift -public func makeIncomingCallView(viewModel: CallViewModel, callInfo: IncomingCall) -> some View { - CustomIncomingCallView(callInfo: callInfo, viewModel: viewModel) -} -``` - -### Call View - -When the call state change to `.inCall`, the call view slot is shown. The default implementation provides several customizable parts, such as the video participants, the call controls (mute/unmute, hang up) and the top trailing view (which by default displays participants' info). - -In order to swap the default call view, you will need to implement the `makeCallView(viewModel: CallViewModel) -> some View`: - -```swift -public func makeCallView(viewModel: CallViewModel) -> some View { - CustomCallView(viewModel: viewModel) -} -``` - -Apart from the main call view, you can also swap its building blocks. - -#### Call Controls View - -The call controls view by default displays controls for hiding/showing the camera, muting/unmuting the microphone, changing the camera source (front/back) and hanging up. If you want to change these controls, you will need to implement the `makeCallControlsView(viewModel: CallViewModel) -> some View` method: - -```swift -func makeCallControlsView(viewModel: CallViewModel) -> some View { - CustomCallControlsView(viewModel: viewModel) -} -``` - -#### Video Participants View - -The video participants view slot presents the grid of users that are in the call. If you want to provide a different variation of the participants display, you will need to implement the `makeVideoParticipantsView` in the `ViewFactory`: - -```swift -public func makeVideoParticipantsView( - viewModel: CallViewModel, - availableFrame: CGRect, - onChangeTrackVisibility: @escaping @MainActor(CallParticipant, Bool) -> Void -) -> some View { - VideoParticipantsView( - viewFactory: self, - viewModel: viewModel, - availableFrame: availableFrame, - onChangeTrackVisibility: onChangeTrackVisibility - ) -} -``` - -In the method, the following parameters are provided: - -- `viewModel` - the viewModel that manages the call. -- `availableFrame` - the available frame for the participants view. -- `onChangeTrackVisibility` - callback when the track changes its visibility. - -#### Video Participant View - -If you want to customize one particular participant view, you can change it via the method `makeVideoParticipantView`: - -```swift -func makeVideoParticipantView( - participant: CallParticipant, - id: String, - availableFrame: CGRect, - contentMode: UIView.ContentMode, - customData: [String: RawJSON], - call: Call? -) -> some View { - VideoCallParticipantView( - participant: participant, - id: id, - availableFrame: availableFrame, - contentMode: contentMode, - customData: customData, - call: call - ) -} -``` - -Additionally, you can change the modifier applied to the view, by implementing the `makeVideoCallParticipantModifier`: - -```swift -public func makeVideoCallParticipantModifier( - participant: CallParticipant, - call: Call?, - availableFrame: CGRect, - ratio: CGFloat, - showAllInfo: Bool -) -> some ViewModifier { - VideoCallParticipantModifier( - participant: participant, - call: call, - availableFrame: availableFrame, - ratio: ratio, - showAllInfo: showAllInfo - ) -} -``` - -#### Top View - -This is the view presented in the top area of the call view. By default, it displays a back button (to go in minimized mode) and a button that shows the list of participants. You can swap this view with your own implementation, by implementing the `makeCallTopView` in the `ViewFactory`: - -```swift -public func makeCallTopView(viewModel: CallViewModel) -> some View { - CallTopView(viewModel: viewModel) -} -``` \ No newline at end of file diff --git a/docusaurus/docs/iOS/04-ui-components/05-uikit-customizations.mdx b/docusaurus/docs/iOS/04-ui-components/05-uikit-customizations.mdx deleted file mode 100644 index 26a4ace0f..000000000 --- a/docusaurus/docs/iOS/04-ui-components/05-uikit-customizations.mdx +++ /dev/null @@ -1,278 +0,0 @@ ---- -title: UIKit Customizations ---- - -In order to enable a smoother video integration in your UIKit projects, we provide UIKit wrappers over our SwiftUI components. In the following section, we will see how we can customize them. - -### View Factory Customizations - -As described in the [customizing views](../customizing-views) section, we allow swapping of the default UI components with your custom ones. To achieve this, you will need to create your own implementation of the `ViewFactory` protocol. - -In our UIKit components, we expose a `CallViewController` that can be easily used in UIKit based projects. The `CallViewController` uses the default `ViewFactory` implementation from the SwiftUI SDK. However, you can easily inject your own implementation, by subclassing the `CallViewController` and providing your own implementation of the `setupVideoView` method. - -For example, let's extend the video call controls with a chat icon. In order to do this, you will need to implement the `makeCallControlsView` method in the `ViewFactory`: - -```swift -class VideoWithChatViewFactory: ViewFactory { - - static let shared = VideoWithChatViewFactory() - - private init() {} - - func makeCallControlsView(viewModel: CallViewModel) -> some View { - ChatCallControls(viewModel: viewModel) - } - -} -``` - -At the end of this guide, there's a possible implementation of `ChatCallControls`, that you can customize as you see fit. - -Next, we need to inject our custom implementation into the StreamVideo UIKit components. In order to do this, we need to create a subclass of the `CallViewController`. - -```swift -class CallChatViewController: CallViewController { - - override func setupVideoView() { - let videoView = makeVideoView(with: VideoWithChatViewFactory.shared) - view.embed(videoView) - } - -} -``` - -Now, you can use the `CallChatViewController` in your app. There are several options how you can add the view controller in your app's view hierarchy. - -One option is to use the standard navigation patterns, such as pushing or presenting the view controller over your app's views. You can do that, if you don't need the minimized call option. However, if you want to allow users to use your app while still being in call, we recommend to add the `CallViewController` (or its subclasses) as a subview. - -Here's one example implementation that adds the view in the application window (this is needed in case you want to also navigate throughout your app while in a call): - -```swift -@MainActor -class CallViewHelper { - - static let shared = CallViewHelper() - - private var callView: UIView? - - private init() {} - - func add(callView: UIView) { - guard self.callView == nil else { return } - guard let window = UIApplication.shared.windows.first else { - return - } - callView.isOpaque = false - callView.backgroundColor = UIColor.clear - self.callView = callView - window.addSubview(callView) - } - - func removeCallView() { - callView?.removeFromSuperview() - callView = nil - } -} -``` - -Finally, in your app, you can add the `CallViewController` with the following code: - -```swift -@objc private func didTapStartButton() { - let next = CallChatViewController.makeCallChatController(with: self.callViewModel) - next.startCall(callType: "default", callId: text, members: selectedParticipants) - CallViewHelper.shared.add(callView: next.view) -} -``` - -You can also listen to call events, and show/hide the calling view depending on the state: - -```swift -private func listenToIncomingCalls() { - callViewModel.$callingState.sink { [weak self] newState in - guard let self = self else { return } - if case .incoming(_) = newState, self == self.navigationController?.topViewController { - let next = CallChatViewController.makeCallChatController(with: self.callViewModel) - CallViewHelper.shared.add(callView: next.view) - } else if newState == .idle { - CallViewHelper.shared.removeCallView() - } - } - .store(in: &cancellables) -} -``` - -You can find fully working sample apps with our UIKit components in our sample apps [repository](https://github.com/GetStream/stream-video-ios-examples). - -### ChatCallControls Implementation - -For reference, here's the `ChatCallControls` mentioned above. - -```swift -import SwiftUI -import struct StreamChatSwiftUI.ChatChannelView -import struct StreamChatSwiftUI.UnreadIndicatorView -import StreamVideo -import StreamVideoSwiftUI - -struct ChatCallControls: View { - - @Injected(\.streamVideo) var streamVideo - - private let size: CGFloat = 50 - - @ObservedObject var viewModel: CallViewModel - - @StateObject private var chatHelper = ChatHelper() - - @Injected(\.images) var images - @Injected(\.colors) var colors - - public init(viewModel: CallViewModel) { - self.viewModel = viewModel - } - - public var body: some View { - VStack { - HStack { - Button( - action: { - withAnimation { - chatHelper.chatShown.toggle() - } - }, - label: { - CallIconView( - icon: Image(systemName: "message"), - size: size, - iconStyle: chatHelper.chatShown ? .primary : .transparent - ) - .overlay( - chatHelper.unreadCount > 0 ? - TopRightView(content: { - UnreadIndicatorView(unreadCount: chatHelper.unreadCount) - }) - : nil - ) - }) - .frame(maxWidth: .infinity) - - Button( - action: { - viewModel.toggleCameraEnabled() - }, - label: { - CallIconView( - icon: (viewModel.callSettings.videoOn ? images.videoTurnOn : images.videoTurnOff), - size: size, - iconStyle: (viewModel.callSettings.videoOn ? .primary : .transparent) - ) - } - ) - .frame(maxWidth: .infinity) - - Button( - action: { - viewModel.toggleMicrophoneEnabled() - }, - label: { - CallIconView( - icon: (viewModel.callSettings.audioOn ? images.micTurnOn : images.micTurnOff), - size: size, - iconStyle: (viewModel.callSettings.audioOn ? .primary : .transparent) - ) - } - ) - .frame(maxWidth: .infinity) - - Button( - action: { - viewModel.toggleCameraPosition() - }, - label: { - CallIconView( - icon: images.toggleCamera, - size: size, - iconStyle: .primary - ) - } - ) - .frame(maxWidth: .infinity) - - Button { - viewModel.hangUp() - } label: { - images.hangup - .applyCallButtonStyle( - color: colors.hangUpIconColor, - size: size - ) - } - .frame(maxWidth: .infinity) - } - - if chatHelper.chatShown { - if let channelController = chatHelper.channelController { - ChatChannelView( - viewFactory: ChatViewFactory.shared, - channelController: channelController - ) - .frame(height: chatHeight) - .preferredColorScheme(.dark) - .onAppear { - chatHelper.markAsRead() - } - } else { - Spacer() - Text("Chat not available") - Spacer() - } - } - } - .frame(maxWidth: .infinity) - .frame(height: chatHelper.chatShown ? chatHeight + 100 : 100) - .background( - colors.callControlsBackground - .cornerRadius(16) - .edgesIgnoringSafeArea(.all) - ) - .onReceive(viewModel.$callParticipants, perform: { output in - if viewModel.callParticipants.count > 1 { - chatHelper.update(memberIds: Set(viewModel.callParticipants.map(\.key))) - } - }) - } - - private var chatHeight: CGFloat { - (UIScreen.main.bounds.height / 3 + 50) - } - -} - -struct EqualSpacingHStack: View { - - var views: [AnyView] - - var body: some View { - HStack(alignment: .top) { - ForEach(0..) { /* Your implementation here */ } -} -``` diff --git a/docusaurus/docs/iOS/04-ui-components/06-view-model.mdx b/docusaurus/docs/iOS/04-ui-components/06-view-model.mdx deleted file mode 100644 index 0aa2e8771..000000000 --- a/docusaurus/docs/iOS/04-ui-components/06-view-model.mdx +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: ViewModel -description: How to use the ViewModel ---- - -## Introduction - -The `CallViewModel` is a stateful component, that can be used as a presentation layer in your custom views that present video. It can live throughout the whole app lifecycle (if you want to react to incoming call events), or it can be created per call. - -The `CallViewModel` is an observable object, and it can be used with both SwiftUI and UIKit views. It provides information about the call state, events, participants, as well as different actions that you can perform while on a call (muting/unmuting yourself, changing camera source, etc). - -## Available functionalities - -### Calling State - -The `CallViewModel` exposes a `@Published` `callingState` variable, that can be used to track the current state of a call. It should be used to show different UI in your views. You can find more information about the `callingState` [here](../../guides/call-state). - -### Starting a call - -You can start a call using the method `startCall(callId: String, type: String, members: [Member], ring: Bool)`, where the parameters are: - -- `callId` - the id of the call. If you use the ringing functionality, this should be always a unique value. -- `type` - the call type. -- `members` - the list of members in the call. -- `ring` - whether the call should ring. - -Here's an example usage: - -```swift -Button { - viewModel.startCall(callType: "default", callId: callId, members: members, ring: false) -} label: { - Text("Start a call") -} -``` - -After you call this method (or the other ones below), the `callingState` will change accordingly. - -### Joining a call - -You can join an existing call using the method `joinCall(callId: String, type: String)`, where the parameters are: - -- `callId` - the id of the call. If you use the ringing functionality, this should be always a unique value. -- `type` - the type of the call. - -Here's an example usage: - -```swift -Button { - viewModel.joinCall(callType: "default", callId: callId) -} label: { - Text("Join a call") -} -``` - -### Entering the lobby - -If you want to display a lobby screen before the user joins the call, you should use the `enterLobby(callId: String, type: String, members: [Member])` method. This will change the calling state to `.lobby`. When that happens, you can either display your custom implementation of a lobby view, or use the one from the SDK. - -When the user decides to join the call, you should call the `joinCall(callId: String, type: String)` method. - -### Accepting a call - -When you are receiving an incoming call, you can either accept it or reject it. If you don't perform any of these actions after a configurable timeout, the call is canceled. - -In order to accept a call, you need to use the method `acceptCall(callId: String, type: String)`, where the parameters are: - -- `callId` - the id of the call. -- `type` - the type of the call. - -### Rejecting a call - -In order to reject a call, you need to use the method `rejectCall(callId: String, type: String)`, where the parameters are: - -- `callId` - the id of the call. -- `type` - the type of the call. - -### Hanging up - -If you want to hangup a call in progress, you should call the `hangUp()` method. This will notify other users that you've left the call. - -### Participants - -The call participants are available as a `@Published` property called `callParticipants`. You can use this to present your custom UI that displays their video feeds. - -### Call Settings - -The `CallViewModel` provides information about the current call settings, such as the camera position and whether there's an audio and video turned on. This is available as a `@Published` property called `callSettings`. - -If you are building a custom UI, you should use the values from this struct to show the corresponding call controls and camera (front or back). - -The `callSettings` are updated by performing the following actions from the view model. - -#### toggleCameraPosition - -You can toggle the camera position by calling the method `toggleCameraPosition`. The method takes into consideration the current camera state (front or back), and it updates it to the new one. - -The video view will automatically update itself and send the new feed to the backend. - -#### toggleCameraEnabled - -You can also show/hide the camera during a call. This is done by calling the method `toggleCameraEnabled`. If you're not using our default view components, you will need to handle this state change from the `callSettings` and show a fallback view when the camera is turned off. - -#### toggleMicrophoneEnabled - -You can mute/unmute the audio during a call, using the `toggleMicrophoneEnabled` method. The change will be published to the views via the `callSettings`. - -### Screen Sharing Session - -When there is a screen sharing session in progress, it can be accessed via the `screensharingSession` published property in the view model. - -### Other properties - -Here are some other useful properties from the view model that you can use to build custom calling experiences: - -- `error` - optional, has a value if there was an error. You can use it to display more detailed error messages to users. -- `errorAlertShown` - if the error has a value, it's true. You can use it to control the visibility of an alert presented to the user. -- `participantsShown` - whether the list of participants is shown during the call. -- `outgoingCallMembers` - list of the outgoing call members. -- `participantEvent` - published variable that contains info about a participant event. It's reset to nil after 2 seconds. -- `isMinimized` - whether the call is in minimized mode. -- `localVideoPrimary` - `false` by default. It becomes `true` when the current user's local video is shown as a primary view. -- `hideUIElements` - whether the UI elements, such as the call controls should be hidden (for example while screen sharing). -- `blockedUsers` - a list of the blocked users in the call. -- `recordingState` - the current recording state of the call. -- `participantsLayout` - The participants layout. -- `pinnedParticipant` - The pinned participant (if any). -- `localParticipant` - returns the local participant of the call. diff --git a/docusaurus/docs/iOS/04-ui-components/07-video-renderer.mdx b/docusaurus/docs/iOS/04-ui-components/07-video-renderer.mdx deleted file mode 100644 index 5a2dd0071..000000000 --- a/docusaurus/docs/iOS/04-ui-components/07-video-renderer.mdx +++ /dev/null @@ -1,179 +0,0 @@ -# VideoRenderer - -Each participant in the video call has their own video view (card), that you can either customize or completely swap it with your custom implementation. - -If you are not using our SwiftUI SDK and the `ViewFactory` for customizations, you can still re-use our low-level components to build your own video views from scratch. - -The important part here is to use the `track` from the `CallParticipant` class, for each of the participants. In order to save resources (both bandwidth and memory), you should hide the track when the user is not visible on the screen, and show it again when they become visible. This is already implemented in our SDK view layouts. - -### RTCMTLVideoView - -If you want to use WebRTC's view for displaying tracks, then you should use the `RTCMTLVideoView` view directly. In our SDK, we provide a subclass of this view, called `VideoRenderer`, which also provides access to the track. - -### VideoRendererView - -If you are using SwiftUI, you can use our `UIViewRepresentable` called `VideoRendererView`, since it simplifies the SwiftUI integration. - -For example, here's how to use this view: - -```swift -VideoRendererView( - id: id, - size: availableSize, - contentMode: contentMode -) { view in - view.handleViewRendering(for: participant) { size, participant in - // handle track size update - } -} -``` - -The `handleViewRendering` method is an extension method from the `VideoRenderer`, that adds the track to the view (if needed), and reports any track size changes to the caller: - -```swift -extension VideoRenderer { - - func handleViewRendering( - for participant: CallParticipant, - onTrackSizeUpdate: @escaping (CGSize, CallParticipant) -> () - ) { - if let track = participant.track { - log.debug("adding track to a view \(self)") - self.add(track: track) - DispatchQueue.main.asyncAfter(deadline: .now() + 0.01) { - let prev = participant.trackSize - let scale = UIScreen.main.scale - let newSize = CGSize( - width: self.bounds.size.width * scale, - height: self.bounds.size.height * scale - ) - if prev != newSize { - onTrackSizeUpdate(newSize, participant) - } - } - } - } -} -``` - -### Additional participant info - -Apart from the video track, we show additional information in the video view, such as the name, network quality, audio / video state etc. - -If you are using our SwiftUI SDK, this is controlled by the `VideoCallParticipantModifier`, which can be customized by implementing the `makeVideoCallParticipantModifier`. - -For reference, here's the default `VideoCallParticipantModifier` implementation, that you can use for inspiration while implementing your own modifiers: - -```swift -public struct VideoCallParticipantModifier: ViewModifier { - - var participant: CallParticipant - var call: Call? - var availableFrame: CGRect - var ratio: CGFloat - var showAllInfo: Bool - var decorations: Set - - public init( - participant: CallParticipant, - call: Call?, - availableFrame: CGRect, - ratio: CGFloat, - showAllInfo: Bool, - decorations: [VideoCallParticipantDecoration] = VideoCallParticipantDecoration.allCases - ) { - self.participant = participant - self.call = call - self.availableFrame = availableFrame - self.ratio = ratio - self.showAllInfo = showAllInfo - self.decorations = .init(decorations) - } - - public func body(content: Content) -> some View { - content - .adjustVideoFrame(to: availableFrame.size.width, ratio: ratio) - .overlay( - ZStack { - BottomView(content: { - HStack { - ParticipantInfoView( - participant: participant, - isPinned: participant.isPinned - ) - - Spacer() - - if showAllInfo { - ConnectionQualityIndicator( - connectionQuality: participant.connectionQuality - ) - } - } - }) - } - ) - .applyDecorationModifierIfRequired( - VideoCallParticipantOptionsModifier(participant: participant, call: call), - decoration: .options, - availableDecorations: decorations - ) - .applyDecorationModifierIfRequired( - VideoCallParticipantSpeakingModifier(participant: participant, participantCount: participantCount), - decoration: .speaking, - availableDecorations: decorations - ) - .clipShape(RoundedRectangle(cornerRadius: 16)) - .clipped() - } - - @MainActor - private var participantCount: Int { - call?.state.participants.count ?? 0 - } -} -``` - -By default, this modifier is applied to the video call participant view: - -```swift -ForEach(participants) { participant in - viewFactory.makeVideoParticipantView( - participant: participant, - id: participant.id, - availableFrame: availableFrame, - contentMode: .scaleAspectFill, - customData: [:], - call: call - ) - .modifier( - viewFactory.makeVideoCallParticipantModifier( - participant: participant, - call: call, - availableFrame: availableFrame, - ratio: ratio, - showAllInfo: true - ) - ) -} -``` - -Note that the container above is ommited, since you can use a `LazyVGrid`, `LazyVStack`, `LazyHStack` or other container component, based on your UI requirements. - -### Mirroring the VideoRendererView - -In most video calling apps, the video feed of the current user is mirrored. If you use the higher level view `VideoCallParticipantView`, the flipping is automatically handled, depending on whether the user is the current one. If you use the `VideoRendererView` directly, you would need to apply the flipping by yourself. - -To mirror the view, you can use the following SwiftUI modifier: - -```swift -.rotation3DEffect(.degrees(180), axis: (x: 0, y: 1, z: 0)) -``` - -You can check whether the video renderer view is for the current user with the following code: - -```swift -if participant.id == streamVideo.state.activeCall?.state.localParticipant?.id { - // apply the modifier -} -``` \ No newline at end of file diff --git a/docusaurus/docs/iOS/04-ui-components/08-call/01-call-container.mdx b/docusaurus/docs/iOS/04-ui-components/08-call/01-call-container.mdx deleted file mode 100644 index 51a7b5853..000000000 --- a/docusaurus/docs/iOS/04-ui-components/08-call/01-call-container.mdx +++ /dev/null @@ -1,64 +0,0 @@ -# CallContainer - -The easiest way to setup a screen that shows incoming, outgoing and active call screens which contain the current participants video feeds and the call controls is to use the `CallContainer`. - -`CallContainer` sets up the following functionality by connecting multiple components: - -* [**`OutgoingCall`**](../outgoing-call): When the user is calling other people. Shows other participants avatars and controls for switching audio/video and canceling the call. -* [**`IncomingCall`**](../incoming-call): When the user is being called by another person. Shows the incoming call screen. -* [**`ActiveCall`**](../active-call): When the user is in an active call. - -In this section we will cover this higher level component which enables you to quickly implement a Video Call app. - -## Usage - -`CallContainer` is a container for the different types of calling screens. It is a bound component that automatically handles Incoming/Outgoing and Active call components and states. All you have to do is pass it a `ViewFactory` and the `CallViewModel`: - -```swift - -@StateObject var viewModel = CallViewModel() - -public var body: some View { - ZStack { - YourRootView() - CallContainer(viewFactory: CustomViewFactory(), viewModel: viewModel) - } -} -``` - -The `ViewFactory` parameter is used for customizing the different view slots in the call container. You can find more information about the available slots [here](../../../guides/view-slots). - -In these slots, you can both replace the UI components and customize the behaviour of the buttons and actions displayed in the screens. - -If you want to use our default UI Components and behaviour, you can just pass our default view factory in the creation of the `CallContainer`: - -```swift -CallContainer(viewFactory: DefaultViewFactory.shared, viewModel: viewModel) -``` - -The `viewModel` parameter is the `CallViewModel` object that is responsible for managing the state of the calls. We recommend creating it as a `StateObject` in the view that will use the `CallContainer`. - -## CallModifier - -Another way of adding video call support to your views is attaching the `CallModifier` to them. With the call modifier, you get the call capabilities to any view, while the logic with the `CallContainer` is encapsulated in the modifier, making the video call support a one-line code. - -Here's an example usage: - -```swift - -@StateObject var viewModel = CallViewModel() - -var body: some View { - YourRootView() - .modifier(CallModifier(viewModel: viewModel)) -} -``` - -Optionally, you can also pass a `ViewFactory` to the `CallModifier`, if you need to customize the default UI components: - -```swift -var body: some View { - YourRootView() - .modifier(CallModifier(viewFactory: CustomViewFactory(), viewModel: viewModel)) -} -``` \ No newline at end of file diff --git a/docusaurus/docs/iOS/04-ui-components/08-call/02-outgoing-call.mdx b/docusaurus/docs/iOS/04-ui-components/08-call/02-outgoing-call.mdx deleted file mode 100644 index 80676e41d..000000000 --- a/docusaurus/docs/iOS/04-ui-components/08-call/02-outgoing-call.mdx +++ /dev/null @@ -1,55 +0,0 @@ -# OutgoingCallView - -The `OutgoingCallView` lets you easily build UI when you're calling or ringing other people in an app. It's used to show more information about the participants you're calling, as well as give you the option to cancel the call before anyone accepts. - -## Usage - -In order to create the `OutgoingCallView`, you need to instantiate it with the `outgoingCallMembers`, `callTopView` and the `callControls`. - -```swift -public var body: some View { - OutgoingCallView( - outgoingCallMembers: outgoingCallMembers, - callTopView: callTopView, - callControls: callControls - ) -} -``` - -The `callTopView` and `callControls` parameter is a SwiftUI view that is showing the actions you can do during a call. - -If you are using our `CallContainer` to add calling support to your views, this view is automatically shown when the `callingState` in the `CallViewModel` is `.outgoing`. - -If you want to customize (or completely replace) the `OutgoingCallView`, you should use the `ViewFactory` method `makeOutgoingCallView`: - -```swift -public func makeOutgoingCallView(viewModel: CallViewModel) -> some View { - CustomOutgoingCallView(viewModel: viewModel) -} -``` - -## Sounds - -By default, the outgoing call view plays ringing sound when the ringing is in progress. If you want to change the sounds, you should provide your own instance of the `Sounds` class in the `Appearance` object, while replacing the `outgoingCallSound` with your own sound file. - -```swift -let sounds = Sounds() -sounds.outgoingCallSound = "your_sounds.m4a" -let appearance = Appearance(sounds: sounds) -streamVideoUI = StreamVideoUI(streamVideo: streamVideo, appearance: appearance) -``` - -## Localization and icons - -You can change the texts and the icons in the `OutgoingCallView`. For more details about changing the texts, please check the localization [guide](../../../advanced/text-localization). - -In order to change the icons, you need to create your own version of the `Images` class and change the icons you want to customize. - -For example, if we want to change the `hangup` icon, we can do the following: - -```swift -let images = Images() -images.hangup = Image("custom_hangup_icon") -let appearance = Appearance(images: images) -streamVideoUI = StreamVideoUI(streamVideo: streamVideo, appearance: appearance) -``` \ No newline at end of file diff --git a/docusaurus/docs/iOS/04-ui-components/08-call/03-incoming-call.mdx b/docusaurus/docs/iOS/04-ui-components/08-call/03-incoming-call.mdx deleted file mode 100644 index 62d429fb2..000000000 --- a/docusaurus/docs/iOS/04-ui-components/08-call/03-incoming-call.mdx +++ /dev/null @@ -1,56 +0,0 @@ -# IncomingCallView - -The `IncomingCallView` lets you easily build UI when you're being called or ringed by other people in an app. It's used to show more information about the participants and the call itself, as well as give you the option to reject or accept the call. - -## Usage - -In order to create the `IncomingCallView`, you need to instantiate it with the following code: - -```swift -public var body: some View { - IncomingCallView( - callInfo: callInfo, - onCallAccepted: { _ in - // handle call accepted - }, onCallRejected: { _ in - // handle call rejected - } - ) -} -``` - -If you are using our `CallContainer` to add calling support to your views, this view is automatically shown when the `callingState` in the `CallViewModel` is `.incoming`. - -If you want to customize (or completely replace) the `IncomingCallView`, you should use the `ViewFactory` method `makeIncomingCallView`: - -```swift -public func makeIncomingCallView(viewModel: CallViewModel, callInfo: IncomingCall) -> some View { - CustomIncomingCallView(viewModel: viewModel, callInfo: callInfo) -} -``` - -## Sounds - -By default, the outgoing call view plays ringing sound when the ringing is in progress. If you want to change the sounds, you should provide your own instance of the `Sounds` class in the `Appearance` object, while replacing the `incomingCallSound` with your own sound file. - -```swift -let sounds = Sounds() -sounds.incomingCallSound = "your_sounds.m4a" -let appearance = Appearance(sounds: sounds) -streamVideoUI = StreamVideoUI(streamVideo: streamVideo, appearance: appearance) -``` - -## Localization and icons - -You can change the texts and the icons in the `IncomingCallView`. For more details about changing the texts, please check the localization [guide](../../../advanced/text-localization). - -In order to change the icons, you need to create your own version of the `Images` class and change the icons you want to customize. - -For example, if we want to change the `hangup` icon, we can do the following: - -```swift -let images = Images() -images.hangup = Image("custom_hangup_icon") -let appearance = Appearance(images: images) -streamVideoUI = StreamVideoUI(streamVideo: streamVideo, appearance: appearance) -``` \ No newline at end of file diff --git a/docusaurus/docs/iOS/04-ui-components/08-call/04-active-call.mdx b/docusaurus/docs/iOS/04-ui-components/08-call/04-active-call.mdx deleted file mode 100644 index 45832cadb..000000000 --- a/docusaurus/docs/iOS/04-ui-components/08-call/04-active-call.mdx +++ /dev/null @@ -1,116 +0,0 @@ -# CallView - -The `CallView` lets you easily build UI when inside a call. It handles the state and actions of enabling and disabling audio, front/back camera or video, as well as information about the call participtants. - -## Usage - -In order to create a `CallView`, you should provide a `ViewFactory` and a `CallViewModel`: - -```swift -public var body: some View { - CallView(viewFactory: DefaultViewFactory.shared, viewModel: viewModel) -} -``` - -If you are using our `CallView` to add calling support to your views, this view is automatically shown when the `callingState` in the `CallViewModel` is `.inCall`. - -If you want to customize (or completely replace) the `CallView`, you should use the `ViewFactory` method `makeCallView`: - -```swift -public func makeCallView(viewModel: CallViewModel) -> some View { - CustomCallView(viewFactory: self, viewModel: viewModel) -} -``` - -### CallView components - -By using your own implementation of the `ViewFactory`, you can swap parts of the `CallView` with your own implementation. - -#### Call Controls View - -The call controls view by default displays controls for hiding/showing the camera, muting/unmuting the microphone, changing the camera source (front/back) and hanging up. If you want to change these controls, you will need to implement the `makeCallControlsView(viewModel: CallViewModel) -> some View` method: - -```swift -func makeCallControlsView(viewModel: CallViewModel) -> some View { - CustomCallControlsView(viewModel: viewModel) -} -``` - -#### Video Participants View - -The video participants view slot presents the grid of users that are in the call. If you want to provide a different variation of the participants display, you will need to implement the `makeVideoParticipantsView` in the `ViewFactory`: - -```swift -public func makeVideoParticipantsView( - viewModel: CallViewModel, - availableFrame: CGRect, - onChangeTrackVisibility: @escaping @MainActor(CallParticipant, Bool) -> Void -) -> some View { - VideoParticipantsView( - viewFactory: self, - viewModel: viewModel, - availableFrame: availableFrame, - onChangeTrackVisibility: onChangeTrackVisibility - ) -} -``` - -In the method, the following parameters are provided: - -- `participants` - the list of participants. -- `availableFrame` - the available frame for the participants view. -- `onChangeTrackVisibility` - callback when the track changes its visibility. - -#### Video Participant View - -If you want to customize one particular participant view, you can change it via the method `makeVideoParticipantView`: - -```swift -public func makeVideoParticipantView( - participant: CallParticipant, - id: String, - availableFrame: CGRect, - contentMode: UIView.ContentMode, - customData: [String: RawJSON], - call: Call? -) -> some View { - VideoCallParticipantView( - participant: participant, - id: id, - availableFrame: availableFrame, - contentMode: contentMode, - customData: customData, - call: call - ) -} -``` - -Additionally, you can change the modifier applied to the view, by implementing the `makeVideoCallParticipantModifier`: - -```swift -public func makeVideoCallParticipantModifier( - participant: CallParticipant, - call: Call?, - availableFrame: CGRect, - ratio: CGFloat, - showAllInfo: Bool -) -> some ViewModifier { - VideoCallParticipantModifier( - participant: participant, - call: call, - availableFrame: availableFrame, - ratio: ratio, - showAllInfo: showAllInfo - ) -} -``` - -#### Top View - -This is the view presented in the top area of the call view. By default, it displays a back button (to go in minimized mode) and a button that shows the list of participants. You can swap this view with your own implementation, by implementing the `makeCallTopView` in the `ViewFactory`: - -```swift -public func makeCallTopView(viewModel: CallViewModel) -> some View { - CallTopView(viewModel: viewModel) -} -``` \ No newline at end of file diff --git a/docusaurus/docs/iOS/04-ui-components/08-call/05-call-controls.mdx b/docusaurus/docs/iOS/04-ui-components/08-call/05-call-controls.mdx deleted file mode 100644 index 7989ac933..000000000 --- a/docusaurus/docs/iOS/04-ui-components/08-call/05-call-controls.mdx +++ /dev/null @@ -1,47 +0,0 @@ -# CallControls - -The `CallControls` component lets you display any number of controls on the UI, that trigger different actions within a call. We provide default actions, such as changing the audio and video mute state or turning on the speakerphone and leaving the call. - -On top of these actions, you can provide a custom set of actions through the API. - -Let's see how to use it. - -## Usage - -The default `CallControlsView` is created by passing the `CallViewModel`: - -```swift -public var body: some View { - CallControlsView(viewModel: viewModel) -} -``` - -If you want to customize (or completely replace) the `CallControlsView`, you should use the `ViewFactory` method `makeCallControlsView`: - -```swift -public func makeCallControlsView(viewModel: CallViewModel) -> some View { - CustomCallControlsView(viewModel: viewModel) -} -``` - -Next, let's see the `CustomCallControlsView`: - -```swift -struct CustomCallControlsView: View { - - @ObservedObject var viewModel: CallViewModel - - var body: some View { - HStack(spacing: 32) { - VideoIconView(viewModel: viewModel) - MicrophoneIconView(viewModel: viewModel) - ToggleCameraIconView(viewModel: viewModel) - HangUpIconView(viewModel: viewModel) - } - .frame(maxWidth: .infinity) - .frame(height: 85) - } -} -``` - -In this example, we are building a custom call controls view, using buttons from the SDK, for muting video/audio, toggling camera and hanging up. You can easily add your own UI elements in the `HStack` above. You can use the `CallViewModel` for the standard call-related actions, or use the `Call` object directly for custom events and reactions (as described [here](../../../guides/reactions-and-custom-events), and for permissions related actions (as described [here](../../../guides/permissions-and-moderation)). \ No newline at end of file diff --git a/docusaurus/docs/iOS/04-ui-components/08-call/06-call-app-bar.mdx b/docusaurus/docs/iOS/04-ui-components/08-call/06-call-app-bar.mdx deleted file mode 100644 index 11e7fbd90..000000000 --- a/docusaurus/docs/iOS/04-ui-components/08-call/06-call-app-bar.mdx +++ /dev/null @@ -1,30 +0,0 @@ -# CallTopView - -The `CallTopView` represents the component displayed at the top of the `CallView`. By default, it contains a button to minimize the call (on the leading side), a layout menu (for changing different call participant layouts) and a button that can show the call participants list (on the trailing side). - -Let's see how to use the component. - -## Usage - -To instantiate the `CallTopView`, you just need to provide the `CallViewModel`: - -```swift -var body: some View { - CallTopView(viewModel: viewModel) -} -``` - -If you want to customize (or completely replace) the `CallTopView`, you should use the `ViewFactory` method `makeCallTopView`: - -```swift -public func makeCallTopView(viewModel: CallViewModel) -> some View { - CustomCallTopView(viewModel: viewModel) -} -``` - -### Changing layouts - -The default `CallTopView` contains a layout menu selection view, which can change the `participantsLayout` in the `CallViewModel`. The `participantsLayout` is an enum, with the following cases: -- `grid` - the participants are shown in a grid -- `spotlight` - one participants is shown in a spotlight, while the others are in a horizontal list below -- `fullScreen` - only one participant is shown \ No newline at end of file diff --git a/docusaurus/docs/iOS/04-ui-components/08-call/07-screen-share-content.mdx b/docusaurus/docs/iOS/04-ui-components/08-call/07-screen-share-content.mdx deleted file mode 100644 index ee2ad7d79..000000000 --- a/docusaurus/docs/iOS/04-ui-components/08-call/07-screen-share-content.mdx +++ /dev/null @@ -1,51 +0,0 @@ -# ScreenSharingView - -The `ScreenSharingView` is a component that allows you to display a video of a screen sharing session. This component is designed for use in video conferencing and other similar applications where users are able to share their entire screens or specific windows. - -The screensharing behaviour is enabled by default in the SDK, along with a standard UI that contains the screen sharing track and a list of participants. - -The `CallParticipant` model contains the screensharing info. It has a property called `isScreensharing`, to indicate whether a participant is sharing their screen. Additionally, you can access the `screenshareTrack` (if available) from this model. - -Only users with the `screenshare` capability can start screensharing sessions. At the moment, this is only possible from the React SDK. Users can request permission to screenshare, by calling the `Call`'s `request(permissions: [Permission])` method and passing the `screenshare` option. - -## Usage - -Here's an example how to create a `ScreenSharingView`: - -```swift -ScreenSharingView( - viewModel: viewModel, - screenSharing: screensharingSession, - availableFrame: availableFrame -) -``` - -In this method, the following parameters are provided: - -- `viewModel` - the `CallViewModel` used in the call. -- `screensharingSession` - The current screen sharing session, that contains information about the track, as well as the participant that is sharing. -- `availableFrame` - the available frame to layout the rendering view. - -## Customization - -If you want to implement your own UI when there's screen sharing in progress, you need to implement the `makeScreenSharingView` method in the `ViewFactory`. Here's an example implementation: - -```swift -public func makeScreenSharingView( - viewModel: CallViewModel, - screensharingSession: ScreenSharingSession, - availableFrame: CGRect -) -> some View { - CustomScreenSharingView( - viewModel: viewModel, - screenSharing: screensharingSession, - availableFrame: availableFrame - ) -} -``` - -Similarly to above, the following parameters are provided: - -- `viewModel` - the `CallViewModel` used in the call. -- `screensharingSession` - The current screen sharing session, that contains information about the track, as well as the participant that is sharing. -- `availableFrame` - the available frame to layout the rendering view. \ No newline at end of file diff --git a/docusaurus/docs/iOS/04-ui-components/08-call/_category_.json b/docusaurus/docs/iOS/04-ui-components/08-call/_category_.json deleted file mode 100644 index 9e2a13ca4..000000000 --- a/docusaurus/docs/iOS/04-ui-components/08-call/_category_.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "label": "Call" -} diff --git a/docusaurus/docs/iOS/04-ui-components/09-participants/01-call-participant.mdx b/docusaurus/docs/iOS/04-ui-components/09-participants/01-call-participant.mdx deleted file mode 100644 index 01d284e0a..000000000 --- a/docusaurus/docs/iOS/04-ui-components/09-participants/01-call-participant.mdx +++ /dev/null @@ -1,94 +0,0 @@ -# VideoCallParticipantView - -The `VideoCallParticipantView` component is used to render a participant in a call. It renders the participant video if their track is not `null` and is correctly published, or a user avatar if there is no video to be shown. - -The component has a `ViewModifier` called `VideoCallParticipantModifier`, that renders the user label, which includes the user's name and call status, such as mute state and a connection indicator. Additionally, if the user is focused, the component renders a border to indicate that the participant is the primary speaker. - -Let's see how to use it. - -## Usage - -To use the `VideoCallParticipantView` component, embed it anywhere in your custom UI and pass in the necessary parameters: - -```swift -VideoCallParticipantView( - participant: participant, - id: id, - availableFrame: availableFrame, - contentMode: contentMode, - customData: customData, - call: call -) -.modifier( - VideoCallParticipantModifier( - participant: participant, - call: call, - availableFrame: availableFrame, - ratio: ratio, - showAllInfo: true - ) -) -``` - -If you are using our `ViewFactory`, these parameters are provided in the factory method for the call participant view its view modifier in the following methods: - -```swift -public func makeVideoParticipantView( - participant: CallParticipant, - id: String, - availableFrame: CGRect, - contentMode: UIView.ContentMode, - customData: [String: RawJSON], - call: Call? -) -> some View { - VideoCallParticipantView( - participant: participant, - id: id, - availableFrame: availableFrame, - contentMode: contentMode, - customData: customData, - call: call - ) -} - -public func makeVideoCallParticipantModifier( - participant: CallParticipant, - call: Call?, - availableFrame: CGRect, - ratio: CGFloat, - showAllInfo: Bool -) -> some ViewModifier { - VideoCallParticipantModifier( - participant: participant, - call: call, - availableFrame: availableFrame, - ratio: ratio, - showAllInfo: showAllInfo - ) -} -``` - -For the `VideoCallParticipantView`, the required parameters are: -- `participant` - the `CallParticipant` object representing a user in a call -- `id` - the SwiftUI id for the view (you can pass the id of the `participant` here) -- `availableFrame` - the available frame for the view -- `contentMode` - the content mode of the view -- `customData` - any custom data that you can pass to the view -- `call` - the current call - -For the `VideoCallParticipantModifier` you should provide the following parameters: -- `participant` - the `CallParticipant` object representing a user in a call -- `participantCount` - the number of participants in the call -- `pinnedParticipant` - optional binding of the pinned participant (if any) -- `availableFrame` - the available frame≈ for the view -- `ratio` - the ratio for the view slot -- `showAllInfo` - if all information should be shown in the card - -If you are using your custom UI without our `ViewFactory`, you can fetch the information needed in these components via the `Call` object and its `participants` property. For example, you can iterate through the participants with `ForEach` and show a grid or any other UI container representation, based on your app's requirements. - -Each of the `VideoCallParticipantView` items with its modifier applied will look something like this: - -![CallParticipants Grid](https://user-images.githubusercontent.com/17215808/223418304-d21a2018-f0d2-4d37-afd0-87ef071e49b6.png) - -The users will have their video visible, or an avatar if there are no tracks available. On top of that, there is a label that has the name or ID displayed, as well as the current mute or speaking state, with the connection quality being on the side. - diff --git a/docusaurus/docs/iOS/04-ui-components/09-participants/02-call-participants.mdx b/docusaurus/docs/iOS/04-ui-components/09-participants/02-call-participants.mdx deleted file mode 100644 index 05b997d86..000000000 --- a/docusaurus/docs/iOS/04-ui-components/09-participants/02-call-participants.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: VideoParticipantsView ---- - -The `VideoParticipantsView` is a container view that displays the participants in a call in several different layouts. - -The following layouts are supported at the moment: -- `grid` - the users are displayed in a scrollable grid. -- `spotlight` - the dominant speaker takes a large section of the view, while the other participants are displayed in a scrollable horizontal list below. -- `fullScreen` - only the dominant speaker is presented. - -The layout for the participants is determined by the `participantsLayout` property in the `CallViewModel`. - -Here's how the default grid UI looks like: - -![CallParticipants Grid](https://user-images.githubusercontent.com/17215808/223418304-d21a2018-f0d2-4d37-afd0-87ef071e49b6.png) - -### Usage - -The `VideoParticipantsView` is a stateful component that requires a `CallViewModel`. Here's an example of how to use it as a standalone component: - -```swift -VideoParticipantsView( - viewFactory: DefaultViewFactory.shared, - viewModel: viewModel, - availableFrame: availableFrame, - onChangeTrackVisibility: onChangeTrackVisibility -) -``` - -The parameters needed for this component are as follows: -- `viewFactory` - the view factory used for creation of the views -- `viewModel` - the `CallViewModel` -- `availableFrame` - the available frame for the view -- `onChangeTrackVisibility` - called when the track changes its visibility - -If you are using our `ViewFactory`, you can swap this component with your implementation by implementing the following method: - -```swift -public func makeVideoParticipantsView( - viewModel: CallViewModel, - availableFrame: CGRect, - onChangeTrackVisibility: @escaping @MainActor(CallParticipant, Bool) -> Void -) -> some View { - CustomVideoParticipantsView( - viewFactory: self, - viewModel: viewModel, - availableFrame: availableFrame, - onChangeTrackVisibility: onChangeTrackVisibility - ) -} -``` - -The different layouts for this component are also provided as standalone components that you can use to build your own variations of the UI. - -#### Grid - -Here's an example how to use the grid layout in your views: - -```swift -ParticipantsGridLayout( - viewFactory: viewFactory, - call: viewModel.call, - participants: viewModel.participants, - availableFrame: availableFrame - onChangeTrackVisibility: onChangeTrackVisibility -) -``` - -#### Spotlight - -This example shows the usage of the spotlight layout: - -```swift -ParticipantsSpotlightLayout( - viewFactory: viewFactory, - participant: first, - call: viewModel.call, - participants: Array(viewModel.participants.dropFirst()), - frame: availableFrame, - onChangeTrackVisibility: onChangeTrackVisibility -) -``` - -#### Full Screen - -You can use the full screen layout with the code below: - -```swift -ParticipantsFullScreenLayout( - viewFactory: viewFactory, - participant: first, - call: viewModel.call, - frame: availableFrame, - onChangeTrackVisibility: onChangeTrackVisibility -) -``` \ No newline at end of file diff --git a/docusaurus/docs/iOS/04-ui-components/09-participants/03-call-participants-info-menu.mdx b/docusaurus/docs/iOS/04-ui-components/09-participants/03-call-participants-info-menu.mdx deleted file mode 100644 index 2ddc23723..000000000 --- a/docusaurus/docs/iOS/04-ui-components/09-participants/03-call-participants-info-menu.mdx +++ /dev/null @@ -1,51 +0,0 @@ -# CallParticipantsInfoView - -The `CallParticipantsInfoView` component represents a popup that lets the user see more information about a particular call and its participants. It allows you to see which participants are in the call, what their mute state is and attempt to trigger different actions based on your own capabilities. - -On top of that, the menu allows you to invite other people to the call. - -Let's see how to use the component. - -## Usage - -If you want to use the componant as a standalone, you can create it like this: - -```swift -let view = CallParticipantsInfoView(callViewModel: viewModel) -``` - -The required parameters for this method are: -- `callViewModel` - the call view model used for the call. - -Additionally, if you are using our `ViewFactory` and the default view, you can customize or swap this view with your own implementation. In order to do that, you should implement the method `makeParticipantsListView`: - -```swift -func makeParticipantsListView( - viewModel: CallViewModel -) -> some View { - CustomCallParticipantsInfoView( - callViewModel: viewModel, - ) -} -``` - -The component also provides a view that you can use to invite members to the call. You can provide your own list of users that should be browsable in the view. In order to do that, you should implement the `UserListProvider` protocol. - -The protocol has one method loadNextUsers(pagination: Pagination), that returns a list of users: - -```swift -func loadNextUsers(pagination: Pagination) async throws -> [User] { - // load the users, based on the pagination parameter provided -} -``` - -The `Pagination` model consists of the following properties: -- `pageSize` - the size of the page -- `offset` - the current pagination offset - -In order to inject your custom implementation, you need to provide it in the creation of the `Utils` class: - -```swift -let utils = Utils(userListProvider: MockUserListProvider()) -let streamVideoUI = StreamVideoUI(streamVideo: streamVideo, utils: utils) -``` \ No newline at end of file diff --git a/docusaurus/docs/iOS/04-ui-components/09-participants/04-local-video.mdx b/docusaurus/docs/iOS/04-ui-components/09-participants/04-local-video.mdx deleted file mode 100644 index a18d22edb..000000000 --- a/docusaurus/docs/iOS/04-ui-components/09-participants/04-local-video.mdx +++ /dev/null @@ -1,52 +0,0 @@ -# LocalVideoView - -The `LocalVideoView` displays the video content of the local participant in a video call, both in a floating way or while the user is waiting for others to join. When it's inside a `CornerDragableView`, the component can be moved around within its parent component bounds. The component can be used with any participant, but in our default components, it handles only the local participant. - -Let's see how to use the component. - -## Usage - -In order to create the `LocalVideoView` as a standalone component, you should use the following code: - -```swift -LocalVideoView( - viewFactory: viewFactory, - participant: localParticipant, - callSettings: viewModel.callSettings, - call: viewModel.call, - availableFrame: availableFrame -) -``` - -The parameters for this view are as follows: -- `viewFactory` - the view factory used for creating the views -- `participant` - the local participant. Could be accessed from the `CallViewModel` -- `callSettings` - the local participant's call settings -- `call` - the current call -- `availableFrame` - the available frame for the view. - - -If you want to use the floating version of the component, you need to create a `CornerDraggableView` and provide the `LocalVideoView` as a `content`: - -```swift -CornerDraggableView( - content: { availableFrame in - LocalVideoView( - viewFactory: viewFactory, - participant: localParticipant, - callSettings: viewModel.callSettings, - call: viewModel.call, - availableFrame: availableFrame - ) - }, - proxy: reader -) { - withAnimation { - if participants.count == 1 { - viewModel.localVideoPrimary.toggle() - } - } -} -``` - -The `LocalVideoView` is rotated by 180 degrees on the y-axis, since it feels more natural for the local user to see themselves as mirrored. \ No newline at end of file diff --git a/docusaurus/docs/iOS/04-ui-components/09-participants/_category_.json b/docusaurus/docs/iOS/04-ui-components/09-participants/_category_.json deleted file mode 100644 index 1f760e4ed..000000000 --- a/docusaurus/docs/iOS/04-ui-components/09-participants/_category_.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "label": "Participants" -} diff --git a/docusaurus/docs/iOS/04-ui-components/10-utility/02-sound-indicator.mdx b/docusaurus/docs/iOS/04-ui-components/10-utility/02-sound-indicator.mdx deleted file mode 100644 index 54f1d0aeb..000000000 --- a/docusaurus/docs/iOS/04-ui-components/10-utility/02-sound-indicator.mdx +++ /dev/null @@ -1,30 +0,0 @@ -# SoundIndicator - -If you're looking to build a way for users to know if a person is muted or unmuted, you can use our `SoundIndicator` component. - -It's very simple and represents a few states: - -* **Muted**: shows a muted microphone icon to indicate that the user has turned off their audio. -* **Unmuted**: shows a regular microphone icon to indicate that the user has turned their microphone on. - -## Usage - -To add the SoundIndicator component to your layout, you can use the `SoundIndicator` view like this: - -```swift -SoundIndicator(participant: participant) -``` - -The `participant` is of type `CallParticipant` and it represents one participant in the call. - -The view is very simple and it allows only changing its microphone icons. - -To do that, you need to provide your own values in the `Images` class and inject it in the `Appearance` object: - -```swift -let images = Images() -images.micTurnOn = Image("custom_mic_turn_on_icon") -images.micTurnOff = Image("custom_mic_turn_off_icon") -let appearance = Appearance(images: images) -streamVideoUI = StreamVideoUI(streamVideo: streamVideo, appearance: appearance) -``` \ No newline at end of file diff --git a/docusaurus/docs/iOS/04-ui-components/10-utility/03-avatars.mdx b/docusaurus/docs/iOS/04-ui-components/10-utility/03-avatars.mdx deleted file mode 100644 index 9dc788a4a..000000000 --- a/docusaurus/docs/iOS/04-ui-components/10-utility/03-avatars.mdx +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: User Avatar ---- - -The user avatar is available as a standalone component, that you can use in your custom UI. The SwiftUI view is called `UserAvatar` and it's created with the image URL and the size. - -Here's an example usage: - -```swift -var body: some View { - VStack { - UserAvatar(imageURL: participant.profileImageURL, size: 40) - SomeOtherView() - } -} -``` - -The view has a circled shape. If that does not fit your UI requirements, you can easily build your own view, by using the native `StreamLazyImage`. diff --git a/docusaurus/docs/iOS/04-ui-components/10-utility/04-connection-quality-indicator.mdx b/docusaurus/docs/iOS/04-ui-components/10-utility/04-connection-quality-indicator.mdx deleted file mode 100644 index c4f08cdf1..000000000 --- a/docusaurus/docs/iOS/04-ui-components/10-utility/04-connection-quality-indicator.mdx +++ /dev/null @@ -1,42 +0,0 @@ -# ConnectionQualityIndicator - -The `ConnectionQualityIndicator` allows you to display the connection quality of a call for a certain participant. It's fairly simple and takes in a `ConnectionQuality` value to define how good the connection is. This value depends on the data the user sends and receives from the call server. - -It's helpful for users to know if their connection quality is poor, because you can show custom UI and messages to the user in case they're not aware. - -Let's see how to use the component. - -## Usage - -### Before joining a call - -Before the user joins the call, the network quality is determined on the client side, from the SDK, by sending latency checks to the SFU that will host the call for the user. - -This is already handled in our `LobbyView`. However, if you want to create your own version of a lobby view, you can use our `LobbyViewModel`, for getting information about the current user's network quality. - -This information is available via the `connectionQuality` property in the `LobbyViewModel`. The connection quality property can have the following values: - -```swift -public enum ConnectionQuality: Sendable { - case unknown - case poor - case good - case excellent -} -``` - -You can build your own UI to display this value, or use our default `ConnectionQualityIndicator` view. The `ConnectionQualityIndicator` view expects the `connectionQuality` parameter, and changes its UI depending on the value. Here's an example usage: - -```swift -ConnectionQualityIndicator(connectionQuality: participant.connectionQuality) -``` - -Additionally, you can change the size and the width of the connection quality ticks, by passing the `size` and `width` parameters to the `ConnectionQualityIndicator` view. - -### Network quality in a call - -When you are in a call, the network quality for each participant is delivered from the server. It's available via the `connectionQuality` property for each `CallParticipant`. - -By default, the SwiftUI SDK displays this information in the `VideoCallParticipantModifier`, via the same `ConnectionQualityIndicator` as above. - -If you wish to change this behaviour, you should implement the `makeVideoCallParticipantModifier` method and provide your own implementation that can modify or hide this view. \ No newline at end of file diff --git a/docusaurus/docs/iOS/04-ui-components/10-utility/05-call-background.mdx b/docusaurus/docs/iOS/04-ui-components/10-utility/05-call-background.mdx deleted file mode 100644 index 68d99739a..000000000 --- a/docusaurus/docs/iOS/04-ui-components/10-utility/05-call-background.mdx +++ /dev/null @@ -1,20 +0,0 @@ -# CallBackground - -The `CallBackground` component is used as a background for the incoming and outgoing call screens. It can present both the participant that's being called (or calling you), or a fallback background if this information is not available. - -Let's see how to use the component. - -## Usage - -To use the `CallBackground`, you can simply embed it somewhere in your custom UI, like this: - -```swift -var body: some View { - YourView() - .background(CallBackground(imageURL: imageURL)) -} -``` - -If there is one participant in the call, the person who's calling you, or the person you're calling, their respective profile image would be rendered, provided the user has an image url. - -If the optional `imageURL` is nil, the default fallback background would be displayed, which is a dark `LinearGradient`. \ No newline at end of file diff --git a/docusaurus/docs/iOS/04-ui-components/10-utility/_category_.json b/docusaurus/docs/iOS/04-ui-components/10-utility/_category_.json deleted file mode 100644 index 0efe7537d..000000000 --- a/docusaurus/docs/iOS/04-ui-components/10-utility/_category_.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "label": "Utility" -} diff --git a/docusaurus/docs/iOS/04-ui-components/_category_.json b/docusaurus/docs/iOS/04-ui-components/_category_.json deleted file mode 100644 index 7daf546d2..000000000 --- a/docusaurus/docs/iOS/04-ui-components/_category_.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "label": "Video UI Components" -} diff --git a/docusaurus/docs/iOS/05-ui-cookbook/01-overview.mdx b/docusaurus/docs/iOS/05-ui-cookbook/01-overview.mdx deleted file mode 100644 index 1c8b486ab..000000000 --- a/docusaurus/docs/iOS/05-ui-cookbook/01-overview.mdx +++ /dev/null @@ -1,124 +0,0 @@ ---- -title: Overview -description: Overview of the UI cookbook ---- - -## UI Cookbook - -export const CookbookCard = ({ title, link, img }) => ( -
-

{title}

- -

- -

-
-
-); - -import CallControls from '../assets/replacing-call-controls.png'; -import Label from '../assets/removing-label-and-indicators.png'; -import VideoLayout from '../assets/custom-video-layout.png'; -import IncomingCall from '../assets/incoming-call.png'; -import LobbyPreview from '../assets/lobby-preview.png'; -import VideoFallback from '../assets/no-video-fallback-avatar.png'; - -import PermissionRequests from '../assets/permission-requests.png'; -import VolumeIndicator from '../assets/audio-volume-indicator.png'; -import Reactions from '../assets/reactions.png'; -import WatchingLivestream from '../assets/livestream-live-label.png'; - -import ConnectionQuality from '../assets/network-quality.png'; -import SpeakingWhileMuted from '../assets/speaking-while-muted.png'; -import ConnectionWarning from '../assets/connection-unstable.png'; - -This cookbook aims to show you how to build your own UI elements for video calling. - -### Video Calls & Ringing - -
- - - - - - -
- -### Audio rooms & Livestreams - -
- - - -
- -### Small Components - -
- - - -
\ No newline at end of file diff --git a/docusaurus/docs/iOS/05-ui-cookbook/02-replacing-call-controls.mdx b/docusaurus/docs/iOS/05-ui-cookbook/02-replacing-call-controls.mdx deleted file mode 100644 index aa06040d1..000000000 --- a/docusaurus/docs/iOS/05-ui-cookbook/02-replacing-call-controls.mdx +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: Call Controls -description: A guide on how to add/remove or replace call controls ---- - -The `CallControls` component lets you display any number of controls on the UI, that trigger different actions within a call. We provide default actions, such as changing the audio and video mute state or turning on the speakerphone and leaving the call. - -On top of these actions, you can provide a custom set of actions through the API. - -## Adding / removing a button - -If you want to customize (or completely replace) the default `CallControlsView`, you should use the `ViewFactory` method `makeCallControlsView`: - -```swift -public func makeCallControlsView(viewModel: CallViewModel) -> some View { - CustomCallControlsView(viewModel: viewModel) -} -``` - -Next, let's see the `CustomCallControlsView`: - -```swift -struct CustomCallControlsView: View { - - @ObservedObject var viewModel: CallViewModel - - var body: some View { - HStack(spacing: 32) { - VideoIconView(viewModel: viewModel) - MicrophoneIconView(viewModel: viewModel) - ToggleCameraIconView(viewModel: viewModel) - HangUpIconView(viewModel: viewModel) - } - .frame(maxWidth: .infinity) - .frame(height: 85) - } -} -``` - -The `CustomCallControlsView` is a simple `HStack`, and you can easily add or remove buttons to it, depending on your app's requirements. - -### Replacing the call controls - -Let's see how we can build our own call controls in the style of Facebook Messenger. - -![Screenshot shows the custom call controls](../assets/cookbook-call-controls.jpg) - -We need to create a different version of the views in the `HStack`, while we still reuse the functionalities of the `CallViewModel` for the actions. - -```swift -struct FBCallControlsView: View { - - @ObservedObject var viewModel: CallViewModel - - var body: some View { - HStack(spacing: 24) { - Button { - viewModel.toggleCameraEnabled() - } label: { - Image(systemName: "video.fill") - } - - Spacer() - - Button { - viewModel.toggleMicrophoneEnabled() - } label: { - Image(systemName: "mic.fill") - } - - Spacer() - - Button { - viewModel.toggleCameraPosition() - } label: { - Image(systemName: "arrow.triangle.2.circlepath.camera.fill") - } - - Spacer() - - HangUpIconView(viewModel: viewModel) - } - .foregroundColor(.white) - .padding(.vertical, 8) - .padding(.horizontal) - .modifier(BackgroundModifier()) - .padding(.horizontal, 32) - } - -} -``` - -Additionally, we need to update the styling, where we use a blurred background with a bigger corner radius. - -```swift -struct BackgroundModifier: ViewModifier { - - func body(content: Content) -> some View { - if #available(iOS 15, *) { - content - .background( - .ultraThinMaterial, - in: RoundedRectangle(cornerRadius: 24) - ) - } else { - content - .background(Color.black.opacity(0.8)) - .cornerRadius(24) - } - } - -} -``` - -Finally, we need to update our `ViewFactory` to use the newly created controls: - -```swift -func makeCallControlsView(viewModel: CallViewModel) -> some View { - FBCallControlsView(viewModel: viewModel) -} -``` - -That's everything that's needed for having a Facebook Messenger style of call controls. This example showed how easy it is to build your own call controls UI, while using `StreamVideo`'s building blocks. diff --git a/docusaurus/docs/iOS/05-ui-cookbook/03-custom-label.mdx b/docusaurus/docs/iOS/05-ui-cookbook/03-custom-label.mdx deleted file mode 100644 index adeb4d8bb..000000000 --- a/docusaurus/docs/iOS/05-ui-cookbook/03-custom-label.mdx +++ /dev/null @@ -1,81 +0,0 @@ ---- -title: Custom Label -description: How to make your own custom label ---- - -Showing participant info is an important part of the calling experience, and can have different design variations. By default, the SDK shows the name of the participant with white color, in a black container with opacity. Additionally, it shows the connection info, as well as whether the user is pinned. - -You can change this default UI by implementing the `makeVideoCallParticipantModifier` in your custom `ViewFactory`. For example, let's simplify this view and just display a bold white name of the participant. - -![Screenshot shows the custom video label](../assets/cookbook_label.png) - -```swift -func makeVideoCallParticipantModifier( - participant: CallParticipant, - call: Call?, - availableFrame: CGRect, - ratio: CGFloat, - showAllInfo: Bool -) -> some ViewModifier { - CustomParticipantModifier( - participant: participant, - call: call, - availableFrame: availableFrame, - ratio: ratio, - showAllInfo: showAllInfo - ) -} -``` - -The implementation of the `CustomParticipantModifier` looks like this: - -```swift -struct CustomParticipantModifier: ViewModifier { - - var participant: CallParticipant - var call: Call? - var availableFrame: CGRect - var ratio: CGFloat - var showAllInfo: Bool - - public init( - participant: CallParticipant, - call: Call?, - availableFrame: CGRect, - ratio: CGFloat, - showAllInfo: Bool - ) { - self.participant = participant - self.call = call - self.availableFrame = availableFrame - self.ratio = ratio - self.showAllInfo = showAllInfo - } - - public func body(content: Content) -> some View { - content - .adjustVideoFrame(to: availableFrame.size.width, ratio: ratio) - .overlay( - ZStack { - VStack { - Spacer() - HStack { - Text(participant.name) - .foregroundColor(.white) - .bold() - Spacer() - ConnectionQualityIndicator( - connectionQuality: participant.connectionQuality - ) - } - .padding(.bottom, 2) - } - .padding() - } - .modifier(VideoCallParticipantSpeakingModifier(participant: participant, participantCount: 1)) - ) - } -} -``` - -The important part here is the overlay applied to the view. Here, we are using a `ZStack` as a container, while putting the name and the connection quality indicator at the bottom of a `VStack`. \ No newline at end of file diff --git a/docusaurus/docs/iOS/05-ui-cookbook/04-video-layout.mdx b/docusaurus/docs/iOS/05-ui-cookbook/04-video-layout.mdx deleted file mode 100644 index 6752bfbb4..000000000 --- a/docusaurus/docs/iOS/05-ui-cookbook/04-video-layout.mdx +++ /dev/null @@ -1,245 +0,0 @@ ---- -title: Video Layout -description: Video Layout ---- - -### Introduction - -There are many different variations for building the user interface of a video calling app. The `StreamVideo` iOS SDK provides many different ways and flexibility to build your custom UI. Depending on your use-case, you can either reuse our lower-level UI components, or build completely custom ones, while making use of our video client and state handling. - -### The sample app - -In this tutorial, we will build a video calling app without using our UI SDKs. The goal would be to show you how to use our `StreamVideo` client and the `CallViewModel`, as well as how to build your custom UI components. - -Here's a screenshot of what we are going to build: - -![Screenshot shows the resulting UI](../assets/cookbook_01.png) - -### Prerequisites - -This tutorial focuses solely on building a custom UI without going through the setup of our `StreamVideo` client, or authenticating users. Make sure to check our [sample project](https://github.com/GetStream/stream-video-ios-examples/tree/main/UICookbook) and perform the required setup. - -### Starting a call - -In order to start a call, you will need to know the call's id. Let's add a simple UI that will allow the users to enter the call id: - -```swift -struct JoinCallView: View { - - @State var callId = "" - @ObservedObject var viewModel: CallViewModel - - var body: some View { - VStack { - TextField("Insert call id", text: $callId) - Button { - resignFirstResponder() - viewModel.startCall(callType: .default, callId: callId, members: []) - } label: { - Text("Join call") - } - Spacer() - } - .padding() - } - -} -``` - -Note the `viewModel.startCall` method, which is called on a tap of the "Join call" button. This starts a call with the provided call id. Adding participants is optional - if the call type is `default`, anyone can join the call. - -### Listening to the calling state - -Next, let's use this view in a container view, called `HomeView`, that will also present our calling screen. - -```swift -struct HomeView: View { - - @ObservedObject var appState: AppState - - var viewFactory: Factory - @StateObject var viewModel = CallViewModel() - - var body: some View { - ZStack { - JoinCallView(viewModel: viewModel) - - if viewModel.callingState == .joining { - ProgressView() - } else if viewModel.callingState == .inCall { - CallView(viewFactory: viewFactory, viewModel: viewModel) - } - } - } -} -``` - -In this view, we are creating the `CallViewModel`, that allows us to start a call, but also listen to the `callingState`. We can use this `@Published` variable to update our UI accordingly. - -When the call is in the `.joining` state, we can show a `ProgressView`. Whenever it changes to the `.inCall` state (which means the user has joined the call), we can show our custom `CallView`. - -### Building a custom CallView - -Next, let's build our custom `CallView`, which would have a different UI than the default UI SDK implementation. Our goal here would be to show a large video of the current speaker, while the other users' avatars are presented at the bottom, in a horizontally scrollable list. - -First, let's see how we can access the participants. - -```swift -var participants: [CallParticipant] { - viewModel.callParticipants - .map(\.value) - .sorted(by: defaultComparators) -} -``` - -The call participants are exposed via the `CallViewModel`'s `callParticipants` dictionary. You can sort them or group them based on their different properties, such as whether they are speaking, they have audio / video or any other different criteria. The `callParticipants` dictionary is a `@Published` variable, and it will trigger updates in your views, whenever its state changes. - -There are default sort comparators, that you can use to sort the participants. The default comparators prioritize the pinned user, then the dominant speaker etc: - -```swift -public let defaultComparators: [StreamSortComparator] = [ - pinned, - screensharing, - dominantSpeaker, - ifInvisible(isSpeaking), - ifInvisible(publishingVideo), - ifInvisible(publishingAudio), - ifInvisible(userId) -] -``` - -You can provide your own ordering by calling the `sorted(using: comparators)` method on the `CallParticipants`. - -Additionally, you can access the same properties for the local user, via the `CallViewModel`'s `localParticipant` variable. - -Next, let's see the implementation of the `CallView`'s `body`: - -```swift -var body: some View { - VStack { - ZStack { - GeometryReader { reader in - if let dominantSpeaker = participants.first { - VideoCallParticipantView( - participant: dominantSpeaker, - availableFrame: reader.frame(in: .global), - contentMode: .scaleAspectFit, - customData: customData, - call: call - ) - } - - VStack { - Spacer() - CustomCallControlsView(viewModel: viewModel) - } - } - } - .frame(maxWidth: .infinity, maxHeight: .infinity) - .cornerRadius(32) - .padding(.bottom) - .padding(.horizontal) - - ScrollView(.horizontal) { - HStack { - ForEach(participants.dropFirst()) { participant in - BottomParticipantView(participant: participant) - } - } - } - .padding(.all, 32) - .frame(height: 100) - .frame(maxWidth: .infinity) - } - .background(Color.black) -} -``` - -Our main container would be a `VStack`, that consists of two parts - the dominant speaker view and the scrollable participant list. -Additionally, the dominant speaker view also shows the call controls for the current user, which means we can use a `ZStack`. - -#### Dominant speaker view - -The dominant speaker is presented using the SDKs `VideoCallParticipantView`, which handles showing both the video feed of the user, or their profile image if the video is disabled. If you want a different behaviour or UI here, you can also use the lower-level component `VideoRendererView`, that shows the video feed of the participant, and add your additional custom UI elements. - -#### Custom call controls view - -Next, let's see the `CustomCallControlsView`: - -```swift -struct CustomCallControlsView: View { - - @ObservedObject var viewModel: CallViewModel - - var body: some View { - HStack(spacing: 32) { - VideoIconView(viewModel: viewModel) - MicrophoneIconView(viewModel: viewModel) - ToggleCameraIconView(viewModel: viewModel) - HangUpIconView(viewModel: viewModel) - } - .frame(maxWidth: .infinity) - .frame(height: 85) - } -} -``` - -In this example, we are building a custom call controls view, using buttons from the SDK, for muting video/audio, toggling camera and hanging up. You can easily add your own UI elements in the `HStack` above. You can use the `CallViewModel` for the standard call-related actions, or use the `Call` object directly for custom events and reactions (as described [here](../../advanced/events)), and for permissions related actions (as described [here](../../guides/permissions-and-moderation)). - -#### Horizontally scrollable list - -Finally, let's see the horizontally scrollable list at the bottom again: - -```swift -ScrollView(.horizontal) { - HStack { - ForEach(participants.dropFirst()) { participant in - BottomParticipantView(participant: participant) - } - } -} -``` - -Here, we drop the first element (that's displayed in the dominant speaker view) from the participants array. This components displays a custom view of type `BottomParticipantView`: - -```swift -struct BottomParticipantView: View { - - var participant: CallParticipant - - var body: some View { - UserAvatar(imageURL: participant.profileImageURL, size: 80) - .overlay( - !participant.hasAudio ? - BottomRightView { - MuteIndicatorView() - } - : nil - ) - } - -} -``` - -This is a simple view that makes use of our `UserAvatar` view for displaying the user's profile image. It also shows an overlay for the mute indicator, which in this case is a custom UI element: - -```swift -struct MuteIndicatorView: View { - - var body: some View { - Image(systemName: "mic.slash.fill") - .resizable() - .aspectRatio(contentMode: .fit) - .frame(width: 14) - .padding(.all, 12) - .foregroundColor(.gray) - .background(Color.black) - .clipShape(Circle()) - .offset(x: 4, y: 8) - } -} -``` - -### Conclusion - -That's everything that needs to be done to have the UI shown in the screenshot above. Since everything we build was with custom components, you can further change and re-arrange it to fit your use-case. diff --git a/docusaurus/docs/iOS/05-ui-cookbook/05-incoming-call.mdx b/docusaurus/docs/iOS/05-ui-cookbook/05-incoming-call.mdx deleted file mode 100644 index e12d73d98..000000000 --- a/docusaurus/docs/iOS/05-ui-cookbook/05-incoming-call.mdx +++ /dev/null @@ -1,107 +0,0 @@ ---- -title: Incoming Call -description: Incoming Call ---- - -The `IncomingCallView` lets you easily build UI when you're being called or ringed by other people in an app. It's used to show more information about the participants and the call itself, as well as give you the option to reject or accept the call. - -## Custom Incoming Call View - -If you want to customize (or completely replace) the `IncomingCallView`, you should use the `ViewFactory` method `makeIncomingCallView`: - -```swift -func makeIncomingCallView(viewModel: CallViewModel, callInfo: IncomingCall) -> some View { - CustomIncomingCallView(callInfo: callInfo, callViewModel: viewModel) -} -``` - -For example, let's build an incoming call view that can be used in a healthcare app. - -![Screenshot shows the custom incoming call view](../assets/cookbook_incoming.png) - -Next, let's see how we can build this view: - -```swift -struct CustomIncomingCallView: View { - - @Injected(\.colors) var colors - - @ObservedObject var callViewModel: CallViewModel - @StateObject var viewModel: IncomingViewModel - - init( - callInfo: IncomingCall, - callViewModel: CallViewModel - ) { - self.callViewModel = callViewModel - _viewModel = StateObject( - wrappedValue: IncomingViewModel(callInfo: callInfo) - ) - } - - var body: some View { - VStack { - Spacer() - Text("Incoming call") - .foregroundColor(Color(colors.textLowEmphasis)) - .padding() - - LazyImage(url: callInfo.caller.imageURL) - .frame(width: 80, height: 80) - .clipShape(RoundedRectangle(cornerRadius: 8)) - .padding() - - Text(callInfo.caller.name) - .font(.title) - .foregroundColor(Color(colors.textLowEmphasis)) - .padding() - - Spacer() - - HStack(spacing: 16) { - Spacer() - - Button { - callViewModel.rejectCall(callType: callInfo.type, callId: callInfo.id) - } label: { - Image(systemName: "phone.down.fill") - .foregroundColor(.white) - .padding() - .background( - RoundedRectangle(cornerRadius: 8) - .fill(Color.red) - .frame(width: 60, height: 60) - ) - } - .padding(.all, 8) - - Button { - callViewModel.acceptCall(callType: callInfo.type, callId: callInfo.id) - } label: { - Image(systemName: "phone.fill") - .foregroundColor(.white) - .padding() - .background( - RoundedRectangle(cornerRadius: 8) - .fill(Color.green) - .frame(width: 60, height: 60) - ) - } - .padding(.all, 8) - - Spacer() - } - .padding() - - } - .background(Color.white.edgesIgnoringSafeArea(.all)) - } - - var callInfo: IncomingCall { - viewModel.callInfo - } - -} -``` - -The incoming call view is built from standard SwiftUI components, as well as `StreamLazyImage`, for displaying the caller's avatar. In the view, we just display the data of the `IncomingCall` and we provide buttons to accept or reject the call. \ No newline at end of file diff --git a/docusaurus/docs/iOS/05-ui-cookbook/06-lobby-preview.mdx b/docusaurus/docs/iOS/05-ui-cookbook/06-lobby-preview.mdx deleted file mode 100644 index ea2a03326..000000000 --- a/docusaurus/docs/iOS/05-ui-cookbook/06-lobby-preview.mdx +++ /dev/null @@ -1,368 +0,0 @@ ---- -title: Lobby Preview -description: Lobby Preview ---- - -The lobby view shows a preview of the call, and it lets users configure their audio/video before joining a call. Our SwiftUI SDK already provides a `LobbyView` that you can directly use in your apps. - -In this cookbook, we will see how to implement this by yourself, while relying on some lower-level components from the StreamVideo SDK. - -### Custom LobbyView - -First, let's define the `CustomLobbyView`: - -```swift -public struct CustomLobbyView: View { - - @StateObject var viewModel: LobbyViewModel - @StateObject var microphoneChecker = MicrophoneChecker() - - var callId: String - var callType: String - @Binding var callSettings: CallSettings - var onJoinCallTap: () -> () - var onCloseLobby: () -> () - - public init( - callId: String, - callType: String, - callSettings: Binding, - onJoinCallTap: @escaping () -> (), - onCloseLobby: @escaping () -> () - ) { - self.callId = callId - self.callType = callType - self.onJoinCallTap = onJoinCallTap - self.onCloseLobby = onCloseLobby - _callSettings = callSettings - _viewModel = StateObject( - wrappedValue: LobbyViewModel( - callType: callType, - callId: callId - ) - ) - } - - public var body: some View { - CustomLobbyContentView( - viewModel: viewModel, - microphoneChecker: microphoneChecker, - callId: callId, - callType: callType, - callSettings: $callSettings, - onJoinCallTap: onJoinCallTap, - onCloseLobby: onCloseLobby - ) - } -} -``` - -Next, let's define the `CustomLobbyContentView`: - -```swift -struct CustomLobbyContentView: View { - - @Injected(\.images) var images - @Injected(\.colors) var colors - @Injected(\.streamVideo) var streamVideo - - @ObservedObject var viewModel: LobbyViewModel - @ObservedObject var microphoneChecker: MicrophoneChecker - - var callId: String - var callType: String - @Binding var callSettings: CallSettings - var onJoinCallTap: () -> () - var onCloseLobby: () -> () - - var body: some View { - GeometryReader { reader in - ZStack { - VStack { - Spacer() - Text("Before Joining") - .font(.title) - .foregroundColor(colors.text) - .bold() - - Text("Setup your audio and video") - .font(.body) - .foregroundColor(Color(colors.textLowEmphasis)) - - CameraCheckView( - viewModel: viewModel, - microphoneChecker: microphoneChecker, - callSettings: callSettings, - availableSize: reader.size - ) - - if microphoneChecker.isSilent { - Text("Your microphone doesn't seem to be working. Make sure you have all permissions accepted.") - .font(.caption) - .foregroundColor(colors.text) - } - - CallSettingsView(callSettings: $callSettings) - - JoinCallView( - callId: callId, - callType: callType, - callParticipants: viewModel.participants, - onJoinCallTap: onJoinCallTap - ) - } - .padding() - - TopRightView { - Button { - onCloseLobby() - } label: { - Image(systemName: "xmark") - .foregroundColor(colors.text) - } - .padding() - } - } - .frame(maxWidth: .infinity, maxHeight: .infinity) - .background(colors.lobbyBackground.edgesIgnoringSafeArea(.all)) - } - .onAppear { - viewModel.startCamera(front: true) - } - .onDisappear { - viewModel.stopCamera() - } - } -} -``` - -Next, let's explore the `CameraCheckView`, which checks the video/audio capabilities of the current user: - -```swift -struct CameraCheckView: View { - - @Injected(\.images) var images - @Injected(\.colors) var colors - @Injected(\.streamVideo) var streamVideo - - @ObservedObject var viewModel: LobbyViewModel - @ObservedObject var microphoneChecker: MicrophoneChecker - var callSettings: CallSettings - var availableSize: CGSize - - var body: some View { - Group { - if let image = viewModel.viewfinderImage, callSettings.videoOn { - image - .resizable() - .aspectRatio(contentMode: .fill) - .frame(width: availableSize.width - 32, height: cameraSize) - .cornerRadius(16) - .accessibility(identifier: "cameraCheckView") - .streamAccessibility(value: "1") - } else { - ZStack { - Rectangle() - .fill(colors.lobbySecondaryBackground) - .frame(width: availableSize.width - 32, height: cameraSize) - .cornerRadius(16) - - if #available(iOS 14.0, *) { - UserAvatar(imageURL: streamVideo.user.imageURL, size: 80) - .accessibility(identifier: "cameraCheckView") - .streamAccessibility(value: "0") - } - } - .opacity(callSettings.videoOn ? 0 : 1) - .frame(width: availableSize.width - 32, height: cameraSize) - } - } - .overlay( - VStack { - Spacer() - HStack { - MicrophoneCheckView( - audioLevels: microphoneChecker.audioLevels, - microphoneOn: callSettings.audioOn, - isSilent: microphoneChecker.isSilent, - isPinned: false - ) - .accessibility(identifier: "microphoneCheckView") - Spacer() - } - .padding() - } - ) - } - - private var cameraSize: CGFloat { - if viewModel.participants.count > 0 { - return availableSize.height / 2 - 64 - } else { - return availableSize.height / 2 - } - } -} -``` - -Here, we are using the `MicrophoneCheckView` and the `ConnectionQualityIndicator` from the SwiftUI SDK. They display the microphone state and the network quality of the current user. You can implement your own versions of these views, in case you want a different UI. - -Next, we have the `CallSettingsView`, which shows the controls for changing the audio and video state of the user in the call: - -```swift -struct CallSettingsView: View { - - @Injected(\.images) var images - - @Binding var callSettings: CallSettings - - private let iconSize: CGFloat = 50 - - var body: some View { - HStack(spacing: 32) { - Button { - callSettings = CallSettings( - audioOn: !callSettings.audioOn, - videoOn: callSettings.videoOn, - speakerOn: callSettings.speakerOn - ) - } label: { - CallIconView( - icon: (callSettings.audioOn ? images.micTurnOn : images.micTurnOff), - size: iconSize, - iconStyle: (callSettings.audioOn ? .primary : .transparent) - ) - .accessibility(identifier: "microphoneToggle") - .streamAccessibility(value: callSettings.audioOn ? "1" : "0") - } - - Button { - callSettings = CallSettings( - audioOn: callSettings.audioOn, - videoOn: !callSettings.videoOn, - speakerOn: callSettings.speakerOn - ) - } label: { - CallIconView( - icon: (callSettings.videoOn ? images.videoTurnOn : images.videoTurnOff), - size: iconSize, - iconStyle: (callSettings.videoOn ? .primary : .transparent) - ) - .accessibility(identifier: "cameraToggle") - .streamAccessibility(value: callSettings.videoOn ? "1" : "0") - } - } - .padding() - } -} -``` - -In this view, we are using the `CallIconView` component from the SwiftUI SDK, for displaying the mic and camera icons. This view updates the `CallSettings` provided as a `@Binding`, based on the user's selections. - -Next, we need the `JoinCallView`, which displays the button that allows users to join the call: - -```swift -struct JoinCallView: View { - - @Injected(\.colors) var colors - - var callId: String - var callType: String - var callParticipants: [User] - var onJoinCallTap: () -> () - - var body: some View { - VStack(spacing: 16) { - Text("You are about to join a call.") - .font(.headline) - .accessibility(identifier: "otherParticipantsCount") - .streamAccessibility(value: "\(otherParticipantsCount)") - - if #available(iOS 14, *) { - if callParticipants.count > 0 { - ParticipantsInCallView( - callParticipants: callParticipants - ) - } - } - - Button { - onJoinCallTap() - } label: { - Text("Join Call") - .bold() - .frame(maxWidth: .infinity) - .accessibility(identifier: "joinCall") - } - .frame(height: 50) - .background(colors.primaryButtonBackground) - .cornerRadius(16) - .foregroundColor(.white) - } - .padding() - .background(colors.lobbySecondaryBackground) - .cornerRadius(16) - } - - private var otherParticipantsCount: Int { - let count = callParticipants.count - 1 - if count > 0 { - return count - } else { - return 0 - } - } -} -``` - -Finally, let's see the view that displays the users that are already in the call: - -```swift -@available(iOS 14.0, *) -struct ParticipantsInCallView: View { - - struct ParticipantInCall: Identifiable { - let id: String - let user: User - } - - var callParticipants: [User] - - var participantsInCall: [ParticipantInCall] { - var result = [ParticipantInCall]() - for (index, participant) in callParticipants.enumerated() { - let id = "\(index)-\(participant.id)" - let participant = ParticipantInCall(id: id, user: participant) - result.append(participant) - } - return result - } - - private let viewSize: CGFloat = 64 - - var body: some View { - VStack(spacing: 4) { - Text("There are \(callParticipants.count) more people in the call.") - .font(.headline) - - ScrollView(.horizontal) { - LazyHStack { - ForEach(participantsInCall) { participant in - VStack { - UserAvatar( - imageURL: participant.user.imageURL, - size: 40 - ) - Text(participant.user.name) - .font(.caption) - } - .frame(width: viewSize, height: viewSize) - } - } - } - } - } -} -``` - -With that, we have a similar implementation to our default `LobbyView`, while reusing most of our low-level components and capabilities. Since this would be a custom implementation in your own app, you can easily modify it to suit your needs. diff --git a/docusaurus/docs/iOS/05-ui-cookbook/07-video-fallback.mdx b/docusaurus/docs/iOS/05-ui-cookbook/07-video-fallback.mdx deleted file mode 100644 index b2d59d1c1..000000000 --- a/docusaurus/docs/iOS/05-ui-cookbook/07-video-fallback.mdx +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: Video Fallback -description: Video Fallback ---- - -When the video is disabled, there are many different ways to show a placeholder view. By default, the SDK shows a circled image of the user's avatar (if available), and a blurred background of the same image. - -You can change this view with any SwiftUI view that fits your app's look and feel. - -For example, let's change the video fallback to be a gradient, with a microphone icon in the middle (as an indicator whether the user is speaking). - -![Screenshot shows the custom video fallback](../assets/cookbook_gradient.png) - -In order to do this, we need to implement the `makeVideoParticipantView` in our custom `ViewFactory`: - -```swift -func makeVideoParticipantView( - participant: CallParticipant, - id: String, - availableFrame: CGRect, - contentMode: UIView.ContentMode, - customData: [String: RawJSON], - call: Call? -) -> some View { - CustomVideoCallParticipantView( - participant: participant, - id: id, - availableFrame: availableFrame, - contentMode: contentMode, - call: call - ) -} -``` - -The implementation of the `CustomVideoCallParticipantView` looks like this: - -```swift -struct CustomVideoCallParticipantView: View { - - @Injected(\.images) var images - @Injected(\.streamVideo) var streamVideo - - let participant: CallParticipant - var id: String - var availableFrame: CGRect - var contentMode: UIView.ContentMode - var edgesIgnoringSafeArea: Edge.Set - var call: Call? - - public init( - participant: CallParticipant, - id: String? = nil, - availableFrame: CGRect, - contentMode: UIView.ContentMode, - edgesIgnoringSafeArea: Edge.Set = .all, - call: Call? - ) { - self.participant = participant - self.id = id ?? participant.id - self.availableFrame = availableFrame - self.contentMode = contentMode - self.edgesIgnoringSafeArea = edgesIgnoringSafeArea - self.call = call - } - - public var body: some View { - VideoRendererView( - id: id, - size: availableFrame.size, - contentMode: contentMode, - handleRendering: { view in - view.handleViewRendering(for: participant) { size, participant in - Task { - await call?.updateTrackSize(size, for: participant) - } - } - } - ) - .opacity(showVideo ? 1 : 0) - .edgesIgnoringSafeArea(edgesIgnoringSafeArea) - .accessibility(identifier: "callParticipantView") - .streamAccessibility(value: showVideo ? "1" : "0") - .overlay( - CallParticipantImageView( - id: participant.id, - name: participant.name, - imageURL: participant.profileImageURL - ) - .frame(width: availableFrame.size.width) - .opacity(showVideo ? 0 : 1) - ) - } - - private var showVideo: Bool { - participant.shouldDisplayTrack || customData["videoOn"]?.boolValue == true - } -} -``` - -The important part here is the `overlay` modifier, which is shown when the video is not displayed for the user. - -Instead of the standard image background, we are using a `ZStack` here, where first view is the `LinearGradient` (using green and black colors), followed by a circle with a microphone icon. - -The circle has on overlay as well, which is a green border shown only when the current participant is speaking. With that, there's a visual feedback when the participant is speaking. \ No newline at end of file diff --git a/docusaurus/docs/iOS/05-ui-cookbook/08-permission-requests.mdx b/docusaurus/docs/iOS/05-ui-cookbook/08-permission-requests.mdx deleted file mode 100644 index 5c5121ffb..000000000 --- a/docusaurus/docs/iOS/05-ui-cookbook/08-permission-requests.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Permission Requests -description: Permission Requests ---- - -Different call participants can have different capabilities and permissions. Depending on your app's requirements, some users might be able to request additional permissions during a call. More details about permissions [here](../../guides/permissions-and-moderation). - -Consider the following custom implementation of CallViewModel that manages permision requests: -```swift -final class CustomCallViewModel: CallViewModel { - var permissionRequest: PermissionRequestEvent? - @Published var permissionPopupShown = false - - func subscribeForPermissionsRequests() { - Task { - for await request in call!.subscribe(for: PermissionRequestEvent.self) { - self.permissionRequest = request - } - } - } - - func grantUserPermissions() async throws { - guard let request = permissionRequest else { return } - let permissionRequests = request.permissions.map { PermissionRequest(permission: $0, user: request.user.toUser, requestedAt: request.createdAt) } - for permissionRequest in permissionRequests { - try await call?.grant(request: permissionRequest) - } - } -} -``` - -When a user asks for additional capabilities, the hosts of the call receive an event that they can react to (approve or deny the request). You can listen to these events by subscribing to the `subscribe(for:)` async stream: - -```swift -func subscribeForPermissionsRequests() { - Task { - for await request in call!.subscribe(for: PermissionRequestEvent.self) { - self.permissionRequest = request - } - } -} -``` - -The simplest way to present these is via alerts in SwiftUI, with two buttons for approving or denying the permission request. - -```swift -struct CustomView: View { - @ObservedObject var viewModel: CustomCallViewModel - - var body: some View { - YourHostView() - .alert(isPresented: $viewModel.permissionPopupShown) { - Alert( - title: Text("Permission request"), - message: Text("\(viewModel.permissionRequest?.user.name ?? "Someone") raised their hand to speak."), - primaryButton: .default(Text("Allow")) { - Task { - try await viewModel.grantUserPermissions() - } - }, - secondaryButton: .cancel() - ) - } - } -} -``` - -Depending on your app's UI, you can also easily build a more customized version of this view. \ No newline at end of file diff --git a/docusaurus/docs/iOS/05-ui-cookbook/09-audio-volume-indicator.mdx b/docusaurus/docs/iOS/05-ui-cookbook/09-audio-volume-indicator.mdx deleted file mode 100644 index 98297893d..000000000 --- a/docusaurus/docs/iOS/05-ui-cookbook/09-audio-volume-indicator.mdx +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Audio Volume Indicator -description: Audio Volume Indicator ---- - -The audio indicator gives you a visual feedback when a user is speaking. To understand who's speaking we provide `call.state.dominantSpeaker` and `call.state.activeSpeakers`. On the participant you can observe `participant.isSpeaking`, `participant.isDominantSpeaker`, `participant.audioLevel` and `participant.audioLevels`. - -### Audio Volume Indicator - -You can use the `AudioVolumeIndicator` component to display the audio levels of the participant. For example, you can store the last few `audioLevel` value of the participant and provide them to the component for visualization. - -![Screenshot shows the audio volume indicator](../assets/audio-volume-indicator.png) - -Here's an example how to do that: - -```swift -AudioVolumeIndicator( - audioLevels: [0.8, 0.9, 0.7], - maxHeight: 14, - minValue: 0, - maxValue: 1 -) -``` - -The component expects the following parameters: -- `audioLevels` - array of `Float` values that are going to be displayed. -- `maxHeight` - the max height of one tick of the indicator. -- `minValue` - the min value that can be displayed. -- `maxValue` - the max value that can be dispalyed. - -### Showing Audio Levels in Lobby - -If you want to build a custom pre-joining (lobby) view, that's displayed to users before they join the call, you might want to show an indicator of the audio levels of the current user. - -In order to do this, you can use the `MicrophoneChecker` and the `MicrophoneCheckView` from the `StreamVideo` SwiftUI SDK. The `MicrophoneChecker` is an observable class that provides updates for the last audioLevels of the current user. The `MicrophoneCheckView` presents them in a reusable view component. - -Both components are used in our `LobbyView`, that you can also directly use in your apps. - -Here's an example usage. First, you instantiate the `MicrophoneChecker` class, e.g. as a `@StateObject`, if it's used directly in your SwiftUI views: - -```swift -@StateObject var microphoneChecker = MicrophoneChecker() -``` - -Optionally, you can provide a `valueLimit` in the initalizer of the `MicrophoneChecker`. By default, this value is 3, which means it returns the last three audioLevels. You can pass a bigger number if you want to show more values to the user. - -Then, in the `MicrophoneCheckView`, you pass the audioLevels array, as well as the `microphoneChecker.isSilent` value. If this value is `true`, the UI shows warning to the user that they might have an issue with their microphone. - -```swift -MicrophoneCheckView( - audioLevels: microphoneChecker.audioLevels, - microphoneOn: callViewModel.callSettings.audioOn, - isSilent: microphoneChecker.isSilent, - isPinned: false -) -``` \ No newline at end of file diff --git a/docusaurus/docs/iOS/05-ui-cookbook/10-network-quality-indicator.mdx b/docusaurus/docs/iOS/05-ui-cookbook/10-network-quality-indicator.mdx deleted file mode 100644 index aad0144ef..000000000 --- a/docusaurus/docs/iOS/05-ui-cookbook/10-network-quality-indicator.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: Network Quality Indicator -description: Network Quality Indicator ---- - -Network quality can impact the video experience a lot. Therefore, it's always a good idea to display the network quality of the participants in the call. - -### Network quality in a call - -When you are in a call, the network quality for each participant is delivered from the server. It's available via the `connectionQuality` property for each `CallParticipant`. The connection quality property can have the following values: - -```swift -public enum ConnectionQuality: Sendable { - case unknown - case poor - case good - case excellent -} -``` - -By default, the SwiftUI SDK displays this information in the `VideoCallParticipantModifier`, via the `ConnectionQualityIndicator` view. - -```swift -ConnectionQualityIndicator(connectionQuality: participant.connectionQuality) -``` - -Additionally, you can change the size and the width of the connection quality ticks, by passing the `size` and `width` parameters to the `ConnectionQualityIndicator` view. - -If you wish to change this behaviour, you should implement the `makeVideoCallParticipantModifier` method and provide your own implementation that can modify or hide this view. diff --git a/docusaurus/docs/iOS/05-ui-cookbook/11-speaking-while-muted.mdx b/docusaurus/docs/iOS/05-ui-cookbook/11-speaking-while-muted.mdx deleted file mode 100644 index 63fea7620..000000000 --- a/docusaurus/docs/iOS/05-ui-cookbook/11-speaking-while-muted.mdx +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: Speaking While Muted -description: Speaking While Muted ---- - -Some calling apps show an indicator when you are trying to speak while you are muted. This is helpful for users that forgot to turn on the microphone when they try to speak. - -You can easily add such functionality with the `StreamVideo` iOS SDK. One approach to achieve this is to add an overlay to the `CallView` with the indicator. The logic for displaying the popup can be determined by listening to sounds even when the microphone is turned off. When a sound goes over a certain threshold, we will display the indicator. - -Here's an example implementation for this approach, using the `MicrophoneChecker` from the SDK: - -```swift -struct CustomCallView: View { - - @Injected(\.colors) var colors - - var viewFactory: Factory - @ObservedObject var viewModel: CallViewModel - - @StateObject var microphoneChecker = MicrophoneChecker() - @State var mutedIndicatorShown = false - - var body: some View { - CallView(viewFactory: viewFactory, viewModel: viewModel) - .onReceive(viewModel.$callSettings) { callSettings in - Task { await updateMicrophoneChecker() } - } - .onReceive(microphoneChecker.$$audioLevels, perform: { values in - guard !viewModel.callSettings.audioOn else { return } - for value in values { - if (value > -50 && value < 0) && !mutedIndicatorShown { - mutedIndicatorShown = true - DispatchQueue.main.asyncAfter(deadline: .now() + 2.0, execute: { - mutedIndicatorShown = false - }) - return - } - } - }) - .overlay( - mutedIndicatorShown ? - VStack { - Spacer() - Text("You are muted.") - .padding(8) - .background(Color(UIColor.systemBackground)) - .foregroundColor(colors.text) - .cornerRadius(16) - .padding() - } - : nil - ) - } - - private func updateMicrophoneChecker() async { - if !viewModel.callSettings.audioOn { - await microphoneChecker.startListening() - } else { - await microphoneChecker.stopListening() - } - } -} -``` - -In the implementation, we are listening to the `callSettings` changes from the view model. Based on that, we decide whether we should listen for sounds from the microphone checker. - -Additionally, we are listening to the `@Published` property called `decibels` from the `MicrophoneChecker`, which returns an array of the last 3 decibels. If a value passes our threshold, then we set the `mutedIndicatorShown` to true, which displays a simple text popup. We reset the value to false after 2 seconds, to hide the popup. - -Finally, we need to use the custom call view in the custom `ViewFactory`: - -```swift -func makeCallView(viewModel: CallViewModel) -> some View { - CustomCallView(viewFactory: self, viewModel: viewModel) -} -``` \ No newline at end of file diff --git a/docusaurus/docs/iOS/05-ui-cookbook/12-connection-unstable.mdx b/docusaurus/docs/iOS/05-ui-cookbook/12-connection-unstable.mdx deleted file mode 100644 index f409ec082..000000000 --- a/docusaurus/docs/iOS/05-ui-cookbook/12-connection-unstable.mdx +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Connection Unstable -description: Connection Unstable ---- - -As described [here](../network-quality-indicator), you can listen to the participants' connection quality and show a network quality indicator. Additionally, you can use this information to notify the current user (in case their connection is bad). Depending on the where you want to place the warning, you can choose to customize one of the [view slots available](../../guides/view-slots), and add the indicator as an overlay: - -```swift -YourView() - .overlay( - participant.connectionQuality == .poor ? Text("Your network connection is bad.") : nil - ) -``` - -When the connection is unstable and it drops, we try to reconnect to the call. In that case, the `callingState` from the `CallViewModel` changes to `reconnecting` and we present a view that reflects this state. - -If you want to customize the default reconnection view, you should implement the method `makeReconnectionView` in the `ViewFactory`: - -```swift -func makeReconnectionView(viewModel: CallViewModel) -> some View { - ReconnectionView(viewModel: viewModel, viewFactory: self) -} -``` \ No newline at end of file diff --git a/docusaurus/docs/iOS/05-ui-cookbook/13-pinning-users.mdx b/docusaurus/docs/iOS/05-ui-cookbook/13-pinning-users.mdx deleted file mode 100644 index 85e0b6de6..000000000 --- a/docusaurus/docs/iOS/05-ui-cookbook/13-pinning-users.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: Pinning Users -description: Documentation on how to pin and unpin users ---- - -The StreamVideo SDK has support for pinning users, both locally for the current user, and remotely for everyone in the call. - -Every user can pin as many participants on the call as they want, and those users will be shown first in the list of participants for that user. Pinning someone for everyone in the call requires the `pinForEveryone` capability. - -By default, the pinned users appear first on the participants array. The ones which are pinned remotely appear before the local ones. If there are several remotely pinned users, they are sorted by the pinning date (the most recent pins appear first). You can change this sorting behaviour with your own implementation. - -#### Local pins - -In order to pin a user locally, you should call the following method on the `Call` object: - -```swift -try await call.pin(sessionId: "pinned_user_session_id") -``` - -To unpin the user, you should use the `unpin` method: - -```swift -try await call.unpin(sessionId: "pinned_user_session_id") -``` - -#### Remote pins - -If you want to pin a user for everyone in the call, you should use the following method: - -```swift -let response = try await call.pinForEveryone(userId: "pinned_user_id", sessionId: "pinned_user_session_id") -``` - -Similarly, to unpin a user, you should call the following method: - -```swift -let response = try await call.unpinForEveryone(userId: "pinned_user_id", sessionId: "pinned_user_session_id") -``` - -### UI SDK support - -By default, the UI components show a popup with these options (based on their permissions), on a double tap of the video feed of a particular user. This is implemented in the `VideoCallParticipantModifier` modifier that is attached on the video view. - -If you want to change this behaviour, you should implement your own version of it, by using the `ViewFactory` method `makeVideoCallParticipantModifier`: - -```swift -func makeVideoCallParticipantModifier( - participant: CallParticipant, - call: Call?, - availableFrame: CGRect, - ratio: CGFloat, - showAllInfo: Bool -) -> some ViewModifier { - CustomVideoCallParticipantModifier( - participant: participant, - call: call, - availableFrame: availableFrame, - ratio: ratio, - showAllInfo: showAllInfo - ) -} -``` \ No newline at end of file diff --git a/docusaurus/docs/iOS/05-ui-cookbook/14-livestream-player.mdx b/docusaurus/docs/iOS/05-ui-cookbook/14-livestream-player.mdx deleted file mode 100644 index d66d0403b..000000000 --- a/docusaurus/docs/iOS/05-ui-cookbook/14-livestream-player.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Watching a livestream -description: Documentation on how to play WebRTC livestream ---- - -As described in our livestream [tutorial](https://getstream.io/video/sdk/ios/tutorial/livestreaming/), there are two ways of watching a livestream with StreamVideo's SDK: HLS and WebRTC. - -Watching an HLS livestream can be done using Apple's native [AVPlayer](https://developer.apple.com/documentation/avfoundation/avplayer/). - -If you want to watch a WebRTC livestream, then you can either use our `LivestreamPlayer`, or build your own component. - -Our `LivestreamPlayer` provides a standard livestreaming experience: - -- shows a live indicator -- shows the duration of the livestream -- shows the number of participants -- possibility to enter/leave full screen -- possibility to pause/resume the livestream - -![Livestream Player](../assets/livestream-player.png) - -### Usage - -The `LivestreamPlayer` is a SwiftUI view that can be created with the livestream ID and the call type: - -```swift -LivestreamPlayer(type: "livestream", id: "some_id") -``` - -You can show it, for example, via a `NavigationLink`, or as part of your own custom views. - -```swift -NavigationLink { - LivestreamPlayer(type: "livestream", id: "vQyteZAnDYYk") -} label: { - Text("Join stream") -} -``` - -Make sure that the livestream id exists, and the call is not in backstage mode. Otherwise, the player will show a livestream not started error. - -### Customization options - -Apart from the required parameters, you can also specify some optional ones in the `LivestreamPlayer`'s init method: - -- `muted`: `Bool` - whether the livestream audio should be on when joining the stream (default is `false`). -- `showParticipantCount`: `Bool` - whether the participant count should be shown (default is `true`). -- `onFullScreenStateChange`: `((Bool) -> ())?` - closure that is invoked when the full screen state changes. Useful if you use the livestream component as part of your custom views, since this is the chance to update the visibility of your custom UI elements. - -## Accessing the livestream track - -You can also build your own version of a livestream player, depending on your requirements. In those cases, you need to have access to the livestream track (or tracks). - -If there is only one video track (you only have one person livestreaming), you can get it with the following code: - -```swift -let livestream = call.state.participants.first(where: { $0.track != nil }) -``` - -If you have multiple hosts that are livestreaming, and you want to show them all, you can fetch the hosts by role: - -```swift -var hosts: [CallParticipant] { - call.state.participants.filter { $0.roles.contains("host") } -} -``` - -Then, you can access the video track they are streaming, with the `track` property. \ No newline at end of file diff --git a/docusaurus/docs/iOS/05-ui-cookbook/15-long-press-to-focus.mdx b/docusaurus/docs/iOS/05-ui-cookbook/15-long-press-to-focus.mdx deleted file mode 100644 index 3d9c7c1d9..000000000 --- a/docusaurus/docs/iOS/05-ui-cookbook/15-long-press-to-focus.mdx +++ /dev/null @@ -1,135 +0,0 @@ ---- -title: Long Press to Focus -description: Documentation on implementing a long press gesture to focus on a particular user in a StreamVideo call. ---- - -# Long Press to Focus - -The StreamVideo SDK allows for interactive and intuitive user engagement during calls. Implementing a long press gesture to focus your local participant's video feed can significantly enhance the user experience. - -## Overview - -- **Focus Implementation**: Focus on the specified point on your video feed. -- **Gesture Recognition**: Detect a long press or a tap on a participant's video feed. -- **User Experience**: Intuitive interaction for a more immersive call experience. - -## Implementing Long Press to Focus - -### Focusing on a point in the Video Feed - -In order to focus the camera at the desired point, we need to forward the request to the WebRTC videoCapturer. Before we do that we need to create a call and then join it: -```swift -// Create the call with the callType and id -let call = streamVideo.call(callType: "default", callId: "123") - -// Create the call on server side -let creationResult = try await call.create() - -// Join the call -let joinResult = try await call.join() -``` - -Once we are in the call, we can leverage the StreamVideo SDK to focus on a specific point on our **local video stream**: - -```swift -// Retrieve the desired focus point(e.g using a tap or longPress gesture) -let focusPoint: CGPoint = CGPoint(x: 50, y: 50) - -// and pass it to our call -try call.focus(at: focusPoint) -``` - -:::note -It's worth mentioning here that: -1. The focus on a point depends on the device's capabilities as the camera needs to support it. -2. We can only set the focus point for our local video stream and not for any of the other participants. -::: - -### Detecting the Long Press Gesture - -You can find an implementation that we are using in our Demo app to focus on long press. To achieve that we are using the following ViewModifier: -```swift -struct LongPressToFocusViewModifier: ViewModifier { - - var availableFrame: CGRect - - var handler: (CGPoint) -> Void - - func body(content: Content) -> some View { - content - .gesture( - LongPressGesture(minimumDuration: 0.5) - .sequenced(before: DragGesture(minimumDistance: 0, coordinateSpace: .local)) - .onEnded { value in - switch value { - case .second(true, let drag): - if let location = drag?.location { - handler(convertToPointOfInterest(location)) - } - default: - break - } - } - ) - } - - func convertToPointOfInterest(_ point: CGPoint) -> CGPoint { - CGPoint( - x: point.y / availableFrame.height, - y: 1.0 - point.x / availableFrame.width - ) - } -} -``` - -We can then define a View extension to allow us easily use the ViewModifier: -```swift -extension View { - @ViewBuilder - func longPressToFocus( - availableFrame: CGRect, - handler: @escaping (CGPoint) -> Void - ) -> some View { - modifier( - LongPressToFocusViewModifier( - availableFrame: availableFrame, - handler: handler - ) - ) - } -} -``` - -### Modifying the UI SDK - -In order to use our ViewModifier and we can leverage the ViewFactory that the SDK ships with. By subclassing it we can override the method that provides the `VideoCallParticipantModifier` like below: - -```swift -func makeVideoParticipantView( - participant: CallParticipant, - id: String, - availableFrame: CGRect, - contentMode: UIView.ContentMode, - customData: [String : RawJSON], - call: Call? -) -> some View { - DefaultViewFactory.shared.makeVideoParticipantView( - participant: participant, - id: id, - availableFrame: availableFrame, - contentMode: contentMode, - customData: customData, - call: call - ) - .longPressToFocus(availableFrame: availableFrame) { point in - Task { - guard call?.state.sessionId == participant.sessionId else { return } // We are using this to only allow long pressing on our local video feed - try await call?.focus(at: point) - } - } -} -``` - -## Conclusion - -Implementing a long press to focus feature enhances user interaction, allowing participants to easily highlight and engage in a StreamVideo call. With customization options available developers have the flexibility to create a tailored and intuitive user experience. \ No newline at end of file diff --git a/docusaurus/docs/iOS/05-ui-cookbook/16-snapshot.mdx b/docusaurus/docs/iOS/05-ui-cookbook/16-snapshot.mdx deleted file mode 100644 index 496c0856d..000000000 --- a/docusaurus/docs/iOS/05-ui-cookbook/16-snapshot.mdx +++ /dev/null @@ -1,373 +0,0 @@ ---- -title: Capturing Snapshots -description: Documentation on implementing capturing snapshots in a StreamVideo call. ---- - -# Capturing Snapshots - -Capturing a snapshot of a call it's a very common usecase for video-call products. Luckily StreamVideo provides you with the means to implement it easily in yours. Below you will find the few simple steps required: - -## Overview - -- **Capturing snapshots during call**: Capture snapshots from any CallView. -- **Capturing local participant photo**: Capture photos from the local participant's camera. - -## Capturing snapshots during call - -### The snapshot ViewModifier - -The `StreamVideoSwiftUI` SDK ships with a ViewModifier that can should be attached on the container of the view hierarchy that we would like to capture in our snapshots. You can simply attach the modifier as in the example below: - -```swift -YourHostView() - .snapshot( - trigger: snapshotTrigger, - snapshotHandler: { snapshot in - // Further processing ... - } - ) -``` - -The snapshot ViewModifier requires few parameters: -- `trigger`: This is the object controls when the capture should occur. Usually the button that triggers the snapshot and view that will be captured are in different viewHierarchies, which makes passing bindings from a rootView to another, difficult. For this reason the viewModifier expects a trigger of type `SnapshotTriggering` in order to bridge this communication. Below we will see a simple implementation for the trigger. -- `snapshotHandler`: The closure that will be called once the snapshot's capturing completes. The SDK will pass the snapshot in the handler for further processing. - -### The snapshot trigger - -As we discussed below, in a common scenario that button that triggers the snapshot capturing and the view that we need to be snapshotted, will be in different ViewHierarchies. The trigger exists to bridge the communication between those and make capturing a snapshot easy and performant. Here is a simple implementation for the button that triggers the capture: - -```swift -final class StreamSnapshotTrigger: SnapshotTriggering { - lazy var binding: Binding = Binding( - get: { [weak self] in - self?.currentValueSubject.value ?? false - }, - set: { [weak self] in - self?.currentValueSubject.send($0) - } - ) - - var publisher: AnyPublisher { currentValueSubject.eraseToAnyPublisher() } - - private let currentValueSubject = CurrentValueSubject(false) - - init() {} - - func capture() { - binding.wrappedValue = true - } -} -``` - -In order to use it in our Views easily we are going to insert it in the SDK's DependencyInjection system for easy fetcing: - -```swift -/// Provides the default value of the `StreamSnapshotTrigger` class. -struct StreamSnapshotTriggerKey: InjectionKey { - @MainActor - static var currentValue: StreamSnapshotTrigger = .init() -} - -extension InjectedValues { - /// Provides access to the `StreamSnapshotTrigger` class to the views and view models. - var snapshotTrigger: StreamSnapshotTrigger { - get { - Self[StreamSnapshotTriggerKey.self] - } - set { - Self[StreamSnapshotTriggerKey.self] = newValue - } - } -} -``` - -With the trigger in the DI system we can easily create a `SnapshotButtonView` that we can add in our controls (or anywhere in our app) that will trigger a snapshot: - -```swift -struct SnapshotButtonView: View { - @Injected(\.snapshotTrigger) var snapshotTrigger - - var body: some View { - Button { - snapshotTrigger.capture() - } label: { - Label { - Text("Capture snapshot") - } icon: { - Image(systemName: "circle.inset.filled") - } - - } - } -} -``` - -### The snapshotHandler - -Once the snapshot is ready, the StreamVideo SDK will call the `snapshotHandler` we passed to the `snapshot` ViewModifier. At this point we have control over the snapshot and what we want to do with it. - -In the example below, we want to send the snapshot the CallPartipants View and using the WebRTC's event channel, send it to all participants in the call. When a new snapshot is being received then each participant will display a simple `ToastView` with the new snapshot. - -Firstly we need to attach the `snapshot` ViewModifier to the desired View. The `ViewFactory` comes in handy as we can update it as following: - -```swift -class CustomViewFactory: ViewFactory { - - func makeVideoParticipantsView( - viewModel: CallViewModel, - availableFrame: CGRect, - onChangeTrackVisibility: @escaping @MainActor(CallParticipant, Bool) -> Void - ) -> some View { - DefaultViewFactory.shared.makeVideoParticipantsView( - viewModel: viewModel, - availableFrame: availableFrame, - onChangeTrackVisibility: onChangeTrackVisibility - ) - .snapshot(trigger: snapshotTrigger) { [weak viewModel, weak self] in - guard - let resizedImage = self?.resize(image: $0, to: CGSize(width: 30, height: 30)), - let snapshotData = resizedImage.jpegData(compressionQuality: 0.8) - else { return } - Task { - do { - try await viewModel?.call?.sendCustomEvent([ - "snapshot": .string(snapshotData.base64EncodedString()) - ]) - log.debug("Snapshot was sent successfully ✅") - } catch { - log.error("Snapshot failed to send with error: \(error)") - } - } - } - } - - private func resize( - image: UIImage, - to targetSize: CGSize - ) -> UIImage? { - guard - image.size.width > targetSize.width || image.size.height > targetSize.height - else { - return image - } - - let widthRatio = targetSize.width / image.size.width - let heightRatio = targetSize.height / image.size.height - - // Determine the scale factor that preserves aspect ratio - let scaleFactor = min(widthRatio, heightRatio) - - let scaledWidth = image.size.width * scaleFactor - let scaledHeight = image.size.height * scaleFactor - let targetRect = CGRect( - x: ( - targetSize.width - scaledWidth - ) / 2, - y: (targetSize.height - scaledHeight) / 2, - width: scaledWidth, - height: scaledHeight - ) - - // Create a new image context - UIGraphicsBeginImageContextWithOptions(targetSize, false, 0) - image.draw(in: targetRect) - - let newImage = UIGraphicsGetImageFromCurrentImageContext() - UIGraphicsEndImageContext() - - return newImage - } -} -``` - -:::warning -As we want to send the snapshot using WebRTC's internal event channel, we are affected by some limitations. The size of each event cannot surpass the 100KB limit. For this reason, we are reducing the snapshot's size & quality before sending it. -::: - -In order then to present the snapshot for every participant, we need to subscribe on CallEvents of type `CustomVideoEvent` and then extract the snapshot before passing it to the UI for presentation. For simplicity, we are going to encapsulate all this logic around a `DemoSnapshotViewModel` object like below: - -```swift -@MainActor -final class DemoSnapshotViewModel: ObservableObject { - - private let viewModel: CallViewModel - private var snapshotEventsTask: Task? - - @Published var toast: Toast? - - init(_ viewModel: CallViewModel) { - self.viewModel = viewModel - subscribeForSnapshotEvents() - } - - private func subscribeForSnapshotEvents() { - guard let call = viewModel.call else { - snapshotEventsTask?.cancel() - snapshotEventsTask = nil - return - } - - snapshotEventsTask = Task { - for await event in call.subscribe(for: CustomVideoEvent.self) { - guard - let imageBase64Data = event.custom["snapshot"]?.stringValue, - let imageData = Data(base64Encoded: imageBase64Data), - let image = UIImage(data: imageData) - else { - return - } - - toast = .init( - style: .custom( - baseStyle: .success, - icon: AnyView( - Image(uiImage: image) - .resizable() - .frame(maxWidth: 30, maxHeight: 30) - .aspectRatio(contentMode: .fit) - .clipShape(Circle()) - ) - ), - message: "Snapshot captured!" - ) - } - } - } -} -``` - -Finally, we can use the snapshotViewModel in CallView (or any other View) in order to present a toast when a new snapshot arrives, as you can see below: - -```swift -YourRootView() - .toastView(toast: $snapshotViewModel.toast) -``` - -## Capturing local participant photo - -In order to capture photos of the local participant, we can leverage the `AVCaptureSession`. The `Call` object allows us to attach `AVCapturePhotoOutput` & `AVCaptureVideoDataOutput` on the active `AVCaptureSession` and `AVCaptureDevice`. - -```swift -let photoOutput: AVCapturePhotoOutput = .init() -let videoOutput: AVCaptureVideoDataOutput = .init() - -do { - guard call?.cId != oldValue?.cId else { return } - do { - if #available(iOS 16.0, *) { - try await call?.addVideoOutput(videoOutput) - /// Following Apple guidelines for videoOutputs from here: - /// https://developer.apple.com/library/archive/technotes/tn2445/_index.html - videoOutput.alwaysDiscardsLateVideoFrames = true - } else { - try await call?.addCapturePhotoOutput(photoOutput) - } - } catch { - log.error("Failed to setup for localParticipant snapshot", error: error) - } -} catch { - log.error("Failed to setup for localParticipant snapshot", error: error) -} -``` - -:::note -A maximum of one output of each type may be added. For applications linked on or after iOS 16.0, this restriction no longer applies to AVCaptureVideoDataOutputs. When adding more than one AVCaptureVideoDataOutput, AVCaptureSession.hardwareCost must be taken into account. Given that WebRTC adds a videoOutput for frame processing, we cannot accept videoOutputs on versions prior to iOS 16.0. -::: - -To capture a photo we can choose one of the following ways, depending on which output we would like to use: - -### Capture photo using AVCapturePhotoOutput - -```swift -func capturePhoto() { - guard !photoOutput.connections.isEmpty else { return } - photoOutput.capturePhoto(with: .init(), delegate: self) -} - -// MARK: - AVCapturePhotoCaptureDelegate - -func photoOutput( - _ output: AVCapturePhotoOutput, - didFinishProcessingPhoto photo: AVCapturePhoto, - error: Error? -) { - if let error { - log.error("Failed to capture photo.", error: error) - } else { - if let data = photo.fileDataRepresentation() { - Task { await sendImageData(data) } - } - } -} -``` - -### Capture photo using AVCaptureVideoDataOutput - -```swift -func captureVideoFrame() { - guard !videoOutput.connections.isEmpty else { return } - Task { await state.setIsCapturingVideoFrame(true) } -} - -// MARK: - AVCaptureVideoDataOutputSampleBufferDelegate - -func captureOutput( - _ output: AVCaptureOutput, - didOutput sampleBuffer: CMSampleBuffer, - from connection: AVCaptureConnection -) { - Task { - guard await state.isCapturingVideoFrame else { return } - - if let imageBuffer = sampleBuffer.imageBuffer { - let ciImage = CIImage(cvPixelBuffer: imageBuffer) - if let data = UIImage(ciImage: ciImage).jpegData(compressionQuality: 1) { - await sendImageData(data) - } - } - - await state.setIsCapturingVideoFrame(false) - } -} -``` - -We are using a `State` actor to control the photo's triggering. - -```swift -actor State { - private(set) var isCapturingVideoFrame = false - - func setIsCapturingVideoFrame(_ value: Bool) { - isCapturingVideoFrame = value - } -} -``` - -Both examples are using the same method to send the Image data: - -```swift -func sendImageData(_ data: Data) async { - defer { videoOutput.setSampleBufferDelegate(nil, queue: nil) } - guard - let snapshot = UIImage(data: data), - let resizedImage = resize(image: snapshot, to: .init(width: 30, height: 30)), - let snapshotData = resizedImage.jpegData(compressionQuality: 0.8) - else { - return - } - - do { - try await call?.sendCustomEvent([ - "snapshot": .string(snapshotData.base64EncodedString()) - ]) - } catch { - log.error("Failed to send image.", error: error) - } -} -``` - -We are removing the videoOutput's delegate so we stop receiving frames and avoid video delays due to unnecessary processing. - -## Conclusion - -By using the snapshot `ViewModifier`, the `trigger` and passing your logic inside the `snapshotHandler`, you get control over when and what will happen during snapshot capturing. \ No newline at end of file diff --git a/docusaurus/docs/iOS/05-ui-cookbook/17-camera-zoom.mdx b/docusaurus/docs/iOS/05-ui-cookbook/17-camera-zoom.mdx deleted file mode 100644 index 8a5d2802f..000000000 --- a/docusaurus/docs/iOS/05-ui-cookbook/17-camera-zoom.mdx +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Camera zoom -description: Documentation on implementing cammera zooming. ---- - -# Camera zoom - -Zooming the local participant's camera is a feature that users will look into when using your app. Fortunately the StreamVideo SDK makes that easy for you by taking care of all interactions with the `AVCaptureSession` and the `AVCaptureDevice`. - -Zoom factors are limited with the following: `1 < zoomFactor < activeCaptureDevice.activeFormat.videoMaxZoomFactor`. - -In order to zoom you can simply call the method on the `Call` instance by providing the desired zoomFactor: - -```swift -try await call.zoom(by: 1.5) -``` \ No newline at end of file diff --git a/docusaurus/docs/iOS/05-ui-cookbook/18-call-quality-rating.mdx b/docusaurus/docs/iOS/05-ui-cookbook/18-call-quality-rating.mdx deleted file mode 100644 index 7670722a3..000000000 --- a/docusaurus/docs/iOS/05-ui-cookbook/18-call-quality-rating.mdx +++ /dev/null @@ -1,218 +0,0 @@ ---- -title: Call Quality Rating -description: Documentation on implementing feedback collection when a call ends. ---- - -# Call Quality Rating - -## Introduction -​ -In this guide, we are going to show how one can build a call quality rating form on top of our Swift Video SDK. It is a good practice to ask your end users about their overall experience after the end of the call or, while being in a call. - -Here is a preview of the component we are going to build: -![alt text](../assets/call-quality-rating.png) - -## Submit Feedback API -​ -Our Swift Video SDK provides an API for collecting this feedback which later can be seen in the call stats section of our dashboard. - -```swift -try await call.collectUserFeedback( - rating: rating, // a rating grade from 1 - 5, - reason: "it worked great!", // the main feedback - custom: [ - // ... any extra properties that you wish to collect - "callWasAwesome": .bool(true) - ] -) -``` - -## Implementation - -We are going to present a simple view that will be presented as a modal to the user and ask for feedback. - -The feedback view we are going to show will look like the one on the image above and is represented by this code: - -```swift -struct DemoFeedbackView: View { - - @Environment(\.openURL) private var openURL - @Injected(\.appearance) private var appearance - - @State private var email: String = "" - @State private var comment: String = "" - @State private var rating: Int = 1 - @State private var isSubmitting = false - - private var call: Call - private var dismiss: () -> Void - private var isSubmitEnabled: Bool { !email.isEmpty && !isSubmitting } - - init(_ call: Call, dismiss: @escaping () -> Void) { - self.call = call - self.dismiss = dismiss - } - - var body: some View { - ScrollView { - VStack(spacing: 32) { - Image(.feedbackLogo) - - VStack(spacing: 8) { - Text("How is your call going?") - .font(appearance.fonts.headline) - .foregroundColor(appearance.colors.text) - .lineLimit(1) - - Text("All feedback is celebrated!") - .font(appearance.fonts.subheadline) - .foregroundColor(.init(appearance.colors.textLowEmphasis)) - .lineLimit(2) - } - .frame(maxWidth: .infinity, alignment: .center) - .multilineTextAlignment(.center) - - VStack(spacing: 27) { - VStack(spacing: 16) { - TextField( - "Email Address *", - text: $email - ) - .textFieldStyle(DemoTextfieldStyle()) - - DemoTextEditor(text: $comment, placeholder: "Message") - } - - HStack { - Text("Rate Quality") - .font(appearance.fonts.body) - .foregroundColor(.init(appearance.colors.textLowEmphasis)) - .frame(maxWidth: .infinity, alignment: .leading) - - DemoStarRatingView(rating: $rating) - } - } - - HStack { - Button { - resignFirstResponder() - openURL(.init(string: "https://getstream.io/video/#contact")!) - } label: { - Text("Contact Us") - } - .frame(maxWidth: .infinity) - .foregroundColor(appearance.colors.text) - .padding(.vertical, 4) - .clipShape(Capsule()) - .overlay(Capsule().stroke(Color(appearance.colors.textLowEmphasis), lineWidth: 1)) - - Button { - resignFirstResponder() - isSubmitting = true - Task { - do { - try await call.collectUserFeedback( - rating: rating, - reason: """ - \(email) - \(comment) - """ - ) - Task { @MainActor in - dismiss() - } - isSubmitting = false - } catch { - log.error(error) - dismiss() - isSubmitting = false - } - } - } label: { - if isSubmitting { - ProgressView() - } else { - Text("Submit") - } - } - .frame(maxWidth: .infinity) - .foregroundColor(appearance.colors.text) - .padding(.vertical, 4) - .background(isSubmitEnabled ? appearance.colors.accentBlue : appearance.colors.lightGray) - .disabled(!isSubmitEnabled) - .clipShape(Capsule()) - } - - Spacer() - } - .padding(.horizontal) - } - } -} -``` - -The View uses a simple star rating View implementation: - -```swift -struct DemoStarRatingView: View { - var rating: Binding - - private var range: ClosedRange - - init( - rating: Binding, - minRating: Int = 1, - maxRating: Int = 5 - ) { - self.rating = rating - range = minRating...maxRating - } - - var body: some View { - HStack { - ForEach(range, id: \.self) { index in - Image(systemName: index <= rating.wrappedValue ? "star.fill" : "star") - .resizable() - .frame(width: 30, height: 30) - .foregroundColor(.yellow) - .onTapGesture { - rating.wrappedValue = index - } - } - } - } -} -``` - -With the FeedbackView declared, the next step is to find a way to inject the View in the call's lifecycle in order to be presented to the user at the right time. To simplify this step, the Swift Video SDK provides the `onCallEnded` ViewModifier . The modifier accepts two closures with inputs an optional `Call` object while only the second one also receives a dismiss closure. - -The first closure can be used to provide additional logic when calculating the decision to present or not the modal. The second closure is a `ViewBuilder` that will be called to provide the modal's content. - -The example below, presents the feedback modal **only** to the call's creator. - -```swift -struct CallContainer: View { - @Injected(\.streamVideo) var streamVideo - - var body: some View { - YourRootView() - .modifier(CallModifier(viewModel: viewModel)) - .onCallEnded(presentationValidator: { call in call?.state.createdBy?.id == streamVideo.user.id }) { call, dismiss in - if let call { - DemoFeedbackView(call, dismiss: dismiss) - } - } - } -} -``` - -The ViewModifier observes the Call's lifecycle and looks for the following triggering criteria: -- Once the active call has ended -- If the max number of joined participants, during call's duration, grew to more than one -- It will evaluate the `presentationValidator` - -Then the modifier will trigger the provided closure and will expect a view that will presented inside the modal. - -:::note -The ViewModifier will provide you with a dismiss closure that you can use in your UI to dismiss the modal. -::: diff --git a/docusaurus/docs/iOS/05-ui-cookbook/19-transcriptions.mdx b/docusaurus/docs/iOS/05-ui-cookbook/19-transcriptions.mdx deleted file mode 100644 index 17be376f5..000000000 --- a/docusaurus/docs/iOS/05-ui-cookbook/19-transcriptions.mdx +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: Transcriptions -description: Documentation on implementing transcriptions. ---- - -# Transcriptions - -Enabling your application to provide a transcript for a call can be very useful for you users. We understand though, that this can be a difficult feature to implement/support. - -This is why, the StreamVideo SDK comes with out of the box Transcription support that you can easily manage. - -The fo `Call` object provides 2 levels of control. The first one is in the `Call.state.settings.transcription` where you can find settings related to transcription, as they have been configured from the dashboard. The `mode` property defines the feature's availability with : -- `available`: the feature is available for your call and can be enabled. -- `disabled`: the feature is not available for your call. In this case, it's a good idea to "hide" any UI element you have related to transcription. -- `autoOn`: the feature is available and it will be enabled automatically, once the user is connected on the call. - -The second level of control is the `Call.state.transcribing` which allows you to check if the transcription is enabled at any given time. - -With that in mind, we can build a simple UI element that will allow the user to toggle on/off the Transcription feature. The element will also take care of showing/hiding depending on the feature's availability. - -```swift -struct TranscriptionButtonView: View { - - @ObservedObject var viewModel: CallViewModel - @State private var isTranscriptionAvailable = false - @State private var isTranscribing = false - - init(viewModel: CallViewModel) { - self.viewModel = viewModel - if let mode = viewModel.call?.state.settings?.transcription.mode { - self.isTranscriptionAvailable = mode != .disabled - } else { - self.isTranscriptionAvailable = false - } - self.isTranscribing = viewModel.call?.state.transcribing == true - } - - var body: some View { - if let call = viewModel.call { - Group { - if isTranscriptionAvailable { - Button { - Task { - do { - if isTranscribing { - try await call.stopTranscription() - } else { - try await call.startTranscription() - } - } catch { - log.error(error) - } - } - } label: { - Label { - Text(isTranscribing ? "Disable Transcription" : "Transcription") - } icon: { - Image( - systemName: isTranscribing - ? "captions.bubble.fill" - : "captions.bubble" - ) - } - } - .onReceive(call.state.$transcribing) { isTranscribing = $0 } - } - } - .onReceive(call.state.$settings) { - guard let mode = $0?.transcription.mode else { - isTranscriptionAvailable = false - return - } - isTranscriptionAvailable = mode != .disabled - } - } - } -} -``` \ No newline at end of file diff --git a/docusaurus/docs/iOS/05-ui-cookbook/20-noise-cancellation.mdx b/docusaurus/docs/iOS/05-ui-cookbook/20-noise-cancellation.mdx deleted file mode 100644 index 062bb6fa4..000000000 --- a/docusaurus/docs/iOS/05-ui-cookbook/20-noise-cancellation.mdx +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: Noise Cancellation UI -description: Documentation on toggling noise cancellation. ---- - -# Noise Cancellation - -Enabling your application to provide noise cancellation for the audio in the call can be very useful for your users. We understand though, that this can be a difficult feature to implement/support. - -This is why, the StreamVideo SDK comes with support for AudioFilters. In addition we offer an easy to integrate package that provides a ready to use `NoiseCancellation` `AudioFilter` using [krisp.ai](https://krisp.ai) technology. You can read more about integrating the `StreamVideoNoiseCancellation` SDK [here](../../guides/noise-cancellation). - -With that in mind, we can build a simple UI element that will allow the user to toggle on/off the Noise Cancellation feature. The element will also take care of showing/hiding depending on the feature's availability. - -```swift -struct NoiseCancellationButtonView: View { - - @Injected(\.streamVideo) var streamVideo - - @ObservedObject var viewModel: CallViewModel - @State var isNoiseCancellationAvailable = false - @State var isActive: Bool = false - - init(viewModel: CallViewModel) { - self.viewModel = viewModel - if let mode = viewModel.call?.state.settings?.audio.noiseCancellation?.mode { - self.isNoiseCancellationAvailable = mode != .disabled - } else { - self.isNoiseCancellationAvailable = false - } - self.isActive = streamVideo.videoConfig.noiseCancellationFilter?.id == streamVideo.videoConfig.audioProcessingModule.activeAudioFilterId - } - - var body: some View { - if - let call = viewModel.call, // Ensure we have an active call. - let noiseCancellationAudioFilter = streamVideo.videoConfig.noiseCancellationFilter, // Ensure that we have noiseCancellation audioFilter to activate. - streamVideo.isHardwareAccelerationAvailable // Ensure that the device supports Apple's neuralEngine. - { - Group { - if isNoiseCancellationAvailable { - Button { - if isActive { - call.setAudioFilter(nil) - isActive = false - } else { - call.setAudioFilter(noiseCancellationAudioFilter) - isActive = true - } - } label: { - Label { - Text(isActive ? "Disable Noise Cancellation" : "Noise Cancellation") - } icon: { - Image( - systemName: isActive - ? "waveform.path.ecg" - : "waveform.path" - ) - } - } - } - } - .onReceive(call.state.$settings.map(\.?.audio.noiseCancellation)) { - if let mode = $0?.mode { - isNoiseCancellationAvailable = mode != .disabled - } else { - isNoiseCancellationAvailable = false - } - } - } - } -} -``` diff --git a/docusaurus/docs/iOS/05-ui-cookbook/21-session-timers.mdx b/docusaurus/docs/iOS/05-ui-cookbook/21-session-timers.mdx deleted file mode 100644 index b82897862..000000000 --- a/docusaurus/docs/iOS/05-ui-cookbook/21-session-timers.mdx +++ /dev/null @@ -1,285 +0,0 @@ ---- -title: Session Timers -description: Documentation on using session timers. ---- - -# Session Timers - -In some cases, you want to be able to limit the duration of a call. StreamVideo supports this use-case by allowing you to specify the duration of a call on creation. Additionally, it provides you a way to extend the duration during the call, if needed. - -## Low-level client capabilities - -First, let's see how we can create a call that has limited duration. - -```swift -let call = streamVideo.call(callType: "default", callId: "callId") -try await call.create(members: [], maxDuration: 300) -``` - -This code will create a call, which will have a duration of 300 seconds (5 minutes), as soon as the session is started (a participant joined the call). - -You can check the start date of a call with the following code: - -```swift -let startedAt = call.state.session?.startedAt -``` - -When the `maxDuration` of a call is specified, the call session also provides the `timerEndsAt` value, which provides the date when the call will end. When a call is ended, all the participants are removed from the call. - -```swift -let timerEndsAt = call.state.session?.timerEndsAt -``` - -## Extending the call duration - -You can also extend the duration of a call, both before or during the call. To do that, you should use the `call.update` method: - -```swift -let newDuration = (call.state.settings?.limits.maxDurationSeconds ?? 0) + Int(extendDuration) -try await call.update(settingsOverride: .init(limits: .init(maxDurationSeconds: newDuration))) -``` - -When the call duration is extended, the `timerEndsAt` will be updated to reflect that change. - -# Example implementation - -Let's see how we can put these methods together in a sample session timer implementation. - -In this cookbook, we will show a popup that will notify the user that a call will end soon. It will also allow the creator of the call to extend its duration. - -Prerequisite for following along is a working StreamVideo integration and the ability to establish calls. To help with that, check our [tutorials](https://getstream.io/video/sdk/ios/#tutorials) and getting started [docs](https://getstream.io/video/docs/ios/basics/quickstart/). - -## Session Timer example - -Let's create a new Swift file, and call it `SessionTimer`. We will put the following contents in it: - -```swift -@MainActor class SessionTimer: ObservableObject { - - @Published var showTimerAlert: Bool = false { - didSet { - if showTimerAlert, let timerEndsAt { - sessionEndCountdown?.invalidate() - secondsUntilEnd = timerEndsAt.timeIntervalSinceNow - sessionEndCountdown = Timer.scheduledTimer( - withTimeInterval: 1.0, - repeats: true, - block: { [weak self] _ in - guard let self else { return } - Task { @MainActor in - if self.secondsUntilEnd <= 0 { - self.sessionEndCountdown?.invalidate() - self.sessionEndCountdown = nil - self.secondsUntilEnd = 0 - self.showTimerAlert = false - return - } - self.secondsUntilEnd -= 1 - } - } - ) - } else if !showTimerAlert { - sessionEndCountdown?.invalidate() - secondsUntilEnd = 0 - } - } - } - - @Published var secondsUntilEnd: TimeInterval = 0 - - private var call: Call? - private var cancellables = Set() - private var timerEndsAt: Date? { - didSet { - setupTimerIfNeeded() - } - } - - private var timer: Timer? - private var sessionEndCountdown: Timer? - - private let alertInterval: TimeInterval - - private var extendDuration: TimeInterval - - private let changeMaxDurationPermission = Permission( - rawValue: OwnCapability.changeMaxDuration.rawValue - ) - - let extensionTime: TimeInterval - - var showExtendCallDurationButton: Bool { - call?.state.ownCapabilities.contains(.changeMaxDuration) == true - } - - @MainActor init( - call: Call?, - alertInterval: TimeInterval, - extendDuration: TimeInterval = 120 - ) { - self.call = call - self.alertInterval = alertInterval - self.extendDuration = extendDuration - extensionTime = extendDuration - timerEndsAt = call?.state.session?.timerEndsAt - setupTimerIfNeeded() - subscribeForSessionUpdates() - } - - func extendCallDuration() { - guard let call else { return } - Task { - do { - let newDuration = (call.state.settings?.limits.maxDurationSeconds ?? 0) + Int(extendDuration) - extendDuration += extendDuration - log.debug("Extending call duration to \(newDuration) seconds") - try await call.update(settingsOverride: .init(limits: .init(maxDurationSeconds: newDuration))) - showTimerAlert = false - } catch { - log.error("Error extending call duration \(error.localizedDescription)") - } - } - } - - // MARK: - private - - private func subscribeForSessionUpdates() { - call?.state.$session.sink { [weak self] response in - guard let self else { return } - if response?.timerEndsAt != self.timerEndsAt { - self.timerEndsAt = response?.timerEndsAt - } - } - .store(in: &cancellables) - } - - private func setupTimerIfNeeded() { - timer?.invalidate() - timer = nil - showTimerAlert = false - if let timerEndsAt { - let alertDate = timerEndsAt.addingTimeInterval(-alertInterval) - let timerInterval = alertDate.timeIntervalSinceNow - if timerInterval < 0 { - showTimerAlert = true - return - } - log.debug("Starting a timer in \(timerInterval) seconds") - timer = Timer.scheduledTimer( - withTimeInterval: timerInterval, - repeats: false, - block: { [weak self] _ in - guard let self else { return } - log.debug("Showing timer alert") - Task { @MainActor in - self.showTimerAlert = true - } - } - ) - } - } - - deinit { - timer?.invalidate() - sessionEndCountdown?.invalidate() - } -} -``` - -The session timer will be used in our SwiftUI view shown during a call. It will provide information when the session timer popup should be shown, as well as a countdown timer. - -The `showTimerAlert` published variable will be set to true whenever the popup should be shown. This value is set whenever the `timerEndsAt` variable is updated. We listen to this value in the `subscribeForSessionUpdates` method above. - -When the popup is shown, we start a timer called `sessionEndCountdown`, which will count down the seconds until the call is ended. We will use this value in the UI layer to inform the user. - -We also created a method called `extendCallDuration`, which will allows us the extend the duration of the call, which would be an option provided to the user. - -Next, let's declare this timer in our view shown during a duration of the call: - -```swift -@StateObject var sessionTimer: SessionTimer - -init( - // other params ommited - call: Call? -) { - _sessionTimer = .init(wrappedValue: .init(call: call, alertInterval: 60)) -} -``` - -Our `SessionTimer` is created with a `Call` object, and an `alertInterval`. The alert interval in seconds tells us when the popup for session end should appear. The 60 seconds value means that the popup will be shown a minute before the session expires. Depending on your app's use-case, you can set a bigger value. - -We can set the popup view as an overlay to your existing call view: - -```swift -YourExistingCallView() - .overlay( - sessionTimer.showTimerAlert ? DemoSessionTimerView(sessionTimer: sessionTimer) : nil - ) -``` - -## View implementation - -Next, let's see a sample implementation of the `DemoSessionTimerView`: - -```swift -struct DemoSessionTimerView: View { - - @Injected(\.colors) var colors - @Injected(\.fonts) var fonts - public var formatter: DateComponentsFormatter = { - let formatter = DateComponentsFormatter() - formatter.unitsStyle = .positional - formatter.allowedUnits = [.hour, .minute, .second] - formatter.zeroFormattingBehavior = .pad - return formatter - }() - - @ObservedObject var sessionTimer: SessionTimer - - var body: some View { - VStack { - HStack { - if let duration = formatter.string(from: sessionTimer.secondsUntilEnd) { - Text("Call will end in \(duration)") - .font(fonts.body.monospacedDigit()) - .minimumScaleFactor(0.2) - .lineLimit(1) - } else { - Text("Call will end soon") - } - Divider() - if sessionTimer.showExtendCallDurationButton { - Button(action: { - sessionTimer.extendCallDuration() - }, label: { - Text("Extend for \(Int(sessionTimer.extensionTime / 60)) min") - .bold() - }) - } - } - .foregroundColor(Color(colors.callDurationColor)) - .padding(.horizontal) - .padding(.vertical, 4) - .background(Color(colors.participantBackground)) - .clipShape(Capsule()) - .frame(height: 60) - .padding(.top, 80) - - Spacer() - } - } -} -``` - -In the implementation, we are formatting the `secondsUntilEnd` value from the `sessionTimer`, in order to inform the user about the session end. - -Additionally, we expose a button for extending the call duration. The visibility of this button is controlled by the `showExtendCallDurationButton` from the `sessionTimer`, which checks if the user has the `changeMaxDuration` capability: - -```swift -var showExtendCallDurationButton: Bool { - call?.state.ownCapabilities.contains(.changeMaxDuration) == true -} -``` - -With that, you can have a working implementation of a session timer. \ No newline at end of file diff --git a/docusaurus/docs/iOS/05-ui-cookbook/22-manual-video-quality-selection.mdx b/docusaurus/docs/iOS/05-ui-cookbook/22-manual-video-quality-selection.mdx deleted file mode 100644 index 0688fc936..000000000 --- a/docusaurus/docs/iOS/05-ui-cookbook/22-manual-video-quality-selection.mdx +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Manual Video Quality Selection -description: Documentation on manually setting the video quality settings. ---- - -# Manual Video Quality Selection -By default, our SDK chooses the incoming video quality that best matches the size of a video element for a given participant. It makes less sense to waste bandwidth receiving Full HD video when it's going to be displayed in a 320 by 240 pixel rectangle. - -However, it's still possible to override this behavior and manually request higher resolution video for better quality, or lower resolution to save bandwidth. It's also possible to disable incoming video altogether for an audio-only experience. - -:::note -Actual incoming video quality depends on a number of factors, such as the quality of the source video, and network conditions. Manual video quality selection allows you to specify your preference, while the actual resolution is automatically selected from the available resolutions to match that preference as closely as possible. -::: - -In this article we'll build a UI control for manual video quality selection. - -## Getting and Setting Incoming Video Settings -​ -To get the current incoming video quality settings, we will use the setIncomingVideoQualitySettings method on `Call` to set it and the `Call.state.incomingVideoQualitySettings` to access it. The value is of type `incomingVideoQualitySettings` which is an enum with the following case: -### `.none` -As the name suggests, it means that we have no preference on the incoming video quality settings. In that way we allow the SDK to take control and decide the best possible settings. - -### `.manual(group: Group, targetSize: CGSize)` -Allows manual control over the enabled video streams and based on the group applies the provided `targetSize` for the video, if required. - -### `.disabled(group: Group)` -Disables video streams for the specified group of session IDs. - -:::note -The `Group` type is another enum that is defined with the following cases: -#### `Group.all` -This group evaluates as true for every given participant. - -#### `Group.custom(sessionIds: Set)` -While this group evalues as true only for the participants whose sessionIds are contained from the provided set. -::: - -With that in mind we can do the following: - -```swift -// Set all incoming videos to a low resolution -await call.setIncomingVideoQualitySettings( - .manual( - group: .all, - targetSize: CGSize(width: 640, height: 480) - ) -) - -/// Set incoming videos of specific users to a low resolution -// let otherParticipant: CallParticipant = ... -await call.setIncomingVideoQualitySettings( - .manual( - group: .custom(sessionIds: [otherParticipant.sessionId]), - targetSize: CGSize(width: 640, height: 480) - ) -) - -// Disable incoming video for all participants -await call.setIncomingVideoQualitySettings(.disabled(group: .all)) - -// Disable incoming video for one specific participant -// let otherParticipant: CallParticipant = ... -await call.setIncomingVideoQualitySettings(.disabled(group: .custom(sessionIds: [otherParticipant.sessionId]))) -``` - -:::note -Even though `IncomingVideoQualitySettings` provides a lot of flexibility, in this cookbook we assume that the preferences apply to all call participants. -::: - -## Building Incoming Video Quality Selector -Now we're ready to build a UI control to display and change the incoming video quality. - -```swift -import Foundation -import StreamVideo -import StreamVideoSwiftUI -import SwiftUI - -struct IncomingVideoQualitySelector: View { - - enum ManualQuality: Hashable { - case auto - case fourK - case fullHD - case HD - case SD - case dataSaver - case disabled - - var policy: IncomingVideoQualitySettings { - switch self { - case .auto: - return .none - case .fourK: - return .manual(group: .all, targetSize: .init(width: 3840, height: 2160)) - case .fullHD: - return .manual(group: .all, targetSize: .init(width: 1920, height: 1080)) - case .HD: - return .manual(group: .all, targetSize: .init(width: 1280, height: 720)) - case .SD: - return .manual(group: .all, targetSize: .init(width: 640, height: 480)) - case .dataSaver: - return .manual(group: .all, targetSize: .init(width: 256, height: 144)) - case .disabled: - return .disabled(group: .all) - } - } - } - - @State private var isActive: Bool = false - - var call: Call? - - init(call: Call?) { - self.call = call - } - - var body: some View { - Menu { - buttonView(for: .auto) - buttonView(for: .fourK) - buttonView(for: .fullHD) - buttonView(for: .HD) - buttonView(for: .SD) - buttonView(for: .dataSaver) - buttonView(for: .disabled) - } label: { - Button { - isActive.toggle() - } label: { - Label { - Text("Manual quality") - } icon: { - Image(systemName: "square.resize") - } - } - } - } - - @MainActor - @ViewBuilder - private func buttonView( - for manualQuality: ManualQuality - ) -> some View { - let title = { - switch manualQuality { - case .auto: - return "Auto quality" - case .fourK: - return "4K 2160p" - case .fullHD: - return "Full HD 1080p" - case .HD: - return "HD 720p" - case .SD: - return "SD 480p" - case .dataSaver: - return "Data saver 144p" - case .disabled: - return "Disable video" - } - }() - Button { - execute(manualQuality) - } label: { - Label { - Text(title) - } icon: { - if manualQuality.policy == call?.state.incomingVideoQualitySettings { - Image(systemName: "checkmark") - } - } - } - } - - private func execute( - _ manualQuality: ManualQuality - ) { - Task { @MainActor in - await call?.setIncomingVideoQualitySettings(manualQuality.policy) - } - } -} -``` - -![Screenshot shows the newly created Manual Quality Selector component.](../assets/manual-video-quality-preview.png) \ No newline at end of file diff --git a/docusaurus/docs/iOS/05-ui-cookbook/23-network-disruption b/docusaurus/docs/iOS/05-ui-cookbook/23-network-disruption deleted file mode 100644 index c0a5b57d7..000000000 --- a/docusaurus/docs/iOS/05-ui-cookbook/23-network-disruption +++ /dev/null @@ -1,179 +0,0 @@ ---- -title: Managing network disruptions during a call ---- - -## Summary - -This tutorial guides you through using the `setDisconnectionTimeout` method within the **Call** object to manage user disconnections due to network issues. By setting a timeout, users are given a grace period to reconnect before they are removed from the call, ensuring that temporary network disruptions don’t immediately end their participation. - -## Overview - -The `setDisconnectionTimeout` method allows you to specify how long a user can remain disconnected before being removed from the call. This is particularly useful when users experience brief network interruptions but can reconnect quickly. By setting a timeout, you ensure that users are only dropped if their disconnection persists beyond the specified duration. - -:::note -By default the `disconnectionTimeout` is set to `0`, allowing the user either to remain _in_ the call until their connection restores or select to hang up. -::: - -## Setting Up the Disconnection Timeout - -Once the call has been created, you can set a disconnection timeout that defines how long a user can stay disconnected before being dropped. Here’s how to do it: - -```swift -let call = streamVideo.call(callType: "default", callId: callId) - -// Set the disconnection timeout to 60 seconds -call.setDisconnectionTimeout(60) -``` - -## Inform the user after a disconnection occurs - -With that set, we want to make sure to inform the user once they get disconnected due to a network disruption. To do that, we are going to extend the FeedbackView we created [here](./18-call-quality-rating.mdx). Specifically we are going to update the `DemoFeedbackView` like below: -```swift -struct DemoFeedbackView: View { - - @Environment(\.openURL) private var openURL - @Injected(\.appearance) private var appearance - - @State private var email: String = "" - @State private var comment: String = "" - @State private var rating: Int = 5 - @State private var isSubmitting = false - @State private var toast: Toast? - - private weak var call: Call? - private var dismiss: () -> Void - private var isSubmitEnabled: Bool { !email.isEmpty && !isSubmitting } - - init(_ call: Call, dismiss: @escaping () -> Void) { - self.call = call - self.dismiss = dismiss - } - - var body: some View { - ScrollView { - VStack(spacing: 32) { - Image("feedbackLogo") - - VStack(spacing: 8) { - Text("How is your call going?") - .font(appearance.fonts.headline) - .foregroundColor(appearance.colors.text) - .lineLimit(1) - - Text("All feedback is celebrated!") - .font(appearance.fonts.subheadline) - .foregroundColor(.init(appearance.colors.textLowEmphasis)) - .lineLimit(2) - } - .frame(maxWidth: .infinity, alignment: .center) - .multilineTextAlignment(.center) - - VStack(spacing: 27) { - VStack(spacing: 16) { - TextField( - "Email Address *", - text: $email - ) - .textFieldStyle(DemoTextfieldStyle()) - - DemoTextEditor(text: $comment, placeholder: "Message") - } - - HStack { - Text("Rate Quality") - .font(appearance.fonts.body) - .foregroundColor(.init(appearance.colors.textLowEmphasis)) - .frame(maxWidth: .infinity, alignment: .leading) - - DemoStarRatingView(rating: $rating) - } - } - - HStack { - Button { - resignFirstResponder() - openURL(.init(string: "https://getstream.io/video/#contact")!) - } label: { - Text("Contact Us") - } - .frame(maxWidth: .infinity) - .foregroundColor(appearance.colors.text) - .padding(.vertical, 4) - .clipShape(Capsule()) - .overlay(Capsule().stroke(Color(appearance.colors.textLowEmphasis), lineWidth: 1)) - - Button { - resignFirstResponder() - isSubmitting = true - Task { - do { - try await call?.collectUserFeedback( - rating: rating, - reason: """ - \(email) - \(comment) - """ - ) - Task { @MainActor in - dismiss() - } - isSubmitting = false - } catch { - log.error(error) - dismiss() - isSubmitting = false - } - } - } label: { - if isSubmitting { - ProgressView() - } else { - Text("Submit") - } - } - .frame(maxWidth: .infinity) - .foregroundColor(appearance.colors.text) - .padding(.vertical, 4) - .background(isSubmitEnabled ? appearance.colors.accentBlue : appearance.colors.lightGray) - .disabled(!isSubmitEnabled) - .clipShape(Capsule()) - } - - Spacer() - } - .padding(.horizontal) - } - .toastView(toast: $toast) - .onAppear { checkIfDisconnectionErrorIsAvailable() } - } - - // MARK: - Private helpers - - @MainActor - func checkIfDisconnectionErrorIsAvailable() { - if call?.state.disconnectionError is ClientError.NetworkNotAvailable { - toast = .init( - style: .error, - message: "Your call was ended because it seems your internet connection is down." - ) - } - } -} -``` - -The parts that we changed here are: -> @State private var toast: Toast? -We now defined a state property for the Toast that is going to be presented to the user. - -> `.toastView(toast: $toast)` -We attach the `toastView` ViewModifier on our view (similar to how we are doing with `alert`). - -> `.onAppear { checkIfDisconnectionErrorIsAvailable() }` -On appear we are checking if there is an error of type `NetworkNotAvailable` and if there, we setup a toast to be presented. - -> `checkIfDisconnectionErrorIsAvailable()` -We define a method that will do the error checking for us. - -## Conclusion - -By configuring the `setDisconnectionTimeout` and handling disconnection errors using the `disconnectionError` property, you can provide a more seamless experience for users, allowing them a grace period to reconnect during temporary network issues. Additionally, by integrating user feedback mechanisms, you can give users clear notifications when they have been disconnected due to network problems, helping them understand the issue and take appropriate action. This approach enhances both the reliability of your video calls and user satisfaction, even in less-than-ideal network conditions. \ No newline at end of file diff --git a/docusaurus/docs/iOS/06-advanced/00-ringing.mdx b/docusaurus/docs/iOS/06-advanced/00-ringing.mdx deleted file mode 100644 index e1d477707..000000000 --- a/docusaurus/docs/iOS/06-advanced/00-ringing.mdx +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: Ringing -description: How to ring the call and notify all members ---- - -The `Call` object provides several options to ring and notify users about a call. - -### Ringing - -If a call was not created before, you should first create it and pass the ring value to `true`. For this, you can use the `create` method from the `Call` object. - -```swift -let call = streamVideo.call(callType: "default", callId: callId) -let callResponse = try await call.create(members: members, ring: true) -``` - -When `ring` is true, a VoIP notification will be sent to the members, provided you have the required setup for CallKit and PushKit. If you don't have a VoIP push setup, a regular push notification will be sent to the members. For more details around push notifications, please check [this page](../push-notifications). - -If `ring` is false, no push notification will be sent. - -If a call was created before, the method will just get it and send the notifications. If you are sure that a call exists, you can use the `get` method instead: - -```swift -let call = streamVideo.call(callType: "default", callId: callId) -let callResponse = try await call.get(ring: true) -``` - -Additionally, you can use the `ring` method which is just a shortcut for the method above: - -```swift -let call = streamVideo.call(callType: "default", callId: callId) -let callResponse = try await call.ring() -``` - -### Notifying users - -In some cases, you just want to notify users that you joined a call, instead of ringing. To do this, you should use the `notify` option: - -```swift -let call = streamVideo.call(callType: "default", callId: callId) -let callResponse = try await call.create(members: members, notify: true) -``` - -When `notify` is true, a regular push notification will be sent to all the members. This can be useful for livestreams apps or huddles. - -Similarly to ringing, you can use the `get` method if you are sure that the call exists: - -```swift -let call = streamVideo.call(callType: "default", callId: callId) -let callResponse = try await call.get(notify: true) -``` - -You can also use a shortcut of this method, with the `notify` method: - -```swift -let call = streamVideo.call(callType: "default", callId: callId) -let callResponse = try await call.notify() -``` - -### AutoLeave Policy - -There may be scenarios where once a call is concluded there is no point for a user to remain in the call if the other participants have already left. In cases like that you may find youself in a position where you need to last participant to automatically leave the call. Luckily, the StreamVideoSwiftUI SDK makes that very easy by allowing you to set the `ParticipantAutoLeavePolicy`, like below: - -```swift -let callViewModel = CallViewModel() -callViewModel.participantAutoLeavePolicy = LastParticipantAutoLeavePolicy() -``` - -By doing that, we are instructing the `CallViewModel` to observe the participants during the ringing flow call (incoming or outgoing). Once a user remain the last one, the callViewModel will automatically trigger the flow to leave the call. - -:::note -The `participantAutoLeavePolicy` is set to `DefaultParticipantAutoLeavePolicy` which is a no-operation policy, meaning that if the user remain in the call alone, no automatic action will be performed. -::: \ No newline at end of file diff --git a/docusaurus/docs/iOS/06-advanced/01-deeplinking.mdx b/docusaurus/docs/iOS/06-advanced/01-deeplinking.mdx deleted file mode 100644 index c45769d33..000000000 --- a/docusaurus/docs/iOS/06-advanced/01-deeplinking.mdx +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: Deeplinking -description: How to start a call from a deep link. ---- - -### Introduction - -It's common in mobile calling apps that a call can be started from a link, that should start the app and dial in immediately. This can be accomplished by [universal links on iOS](https://developer.apple.com/ios/universal-links/). The setup needs to be done in your hosting app, while the StreamVideo SDK allows for a call to be invoked from a universal link. - -### App ID setup - -In order to support universal links, you need to have paid Apple developer account. On the Apple developer website, you will need to add the "associated domains" for your app id. - -Next, you need to enable the "Associated Domains" capability for your app in Xcode, and specify an app link, in the format `applinks:{host}`. - -Next, you need to upload `apple-app-site-association` file, either to the root directory of your website, or in the `.wellKnown` directory. The AASA (short for apple-app-site-association) is a JSON file that lives on your website and associates your domain with your native app. - -In its simplest form, your AASA file should have the following format: - -```swift -{ - "applinks": { - "apps": [], - "details": [{ - "appID": "{your_team_id}.{your_bundle_id}", - "paths": [ - "*" - ] - }] - } -} -``` - -You can also specify exact paths if you want to have a stricter control over which ones can invoke your app. You can also specify several apps on the same domain. - -Before proceeding, please make sure that the uploaded file is a valid one, and it's deployed at the right place. For this, you can use Apple's [validation tool](https://search.developer.apple.com/appsearch-validation-tool). - -### iOS Code - -The minimal data the link that should invoke the call should contain are the call id and type. In your iOS app, you should parse the parameters of your link, and use those to start a call from our `CallViewModel`. Additionally, you should already have a user logged in, since if you're app was not running, you will need to setup the `StreamVideo` client with a user first. - -One example of how your URL should look like could be this: `https://{host}/{path}/{some_call_id}?type={some_call_type}`. - -If you are using SwiftUI, the easiest way to handle these links is to attach the `onOpenURL` modifier: - -```swift -YourView() - .onOpenURL { url in - handleDeepLink(from: url) - } - -private func handleDeepLink(from url: URL) { - if appState.userState == .notLoggedIn { - return - } - let callId = url.lastPathComponent - let queryParams = url.queryParameters - let callType = queryParams["type"] ?? "default" - appState.deeplinkInfo = DeeplinkInfo(callId: callId, callType: callType) -} -``` - -In this example, we are keeping the app state in a simple struct, that contains information about a `@Published` `deeplinkInfo`. The `DeeplinkInfo` type contains information about the `callId` and the `callType`. - -Next, your SwiftUI view that hosts the calling functionality, can react to changes of the `deeplinkInfo` and join a call accordingly. - -```swift -ViewThatHostsCall() - .onReceive(appState.$deeplinkInfo, perform: { deeplinkInfo in - if deeplinkInfo != .empty { - callViewModel.joinCall(callType: deeplinkInfo.callType, callId: deeplinkInfo.callId) - appState.deeplinkInfo = .empty - } - }) -``` - -If you are using `UIKit`, you will need to implement a similar logic in `application:continueUserActivity:restorationHandler`, in your `AppDelegate`. \ No newline at end of file diff --git a/docusaurus/docs/iOS/06-advanced/02-push-notifications.mdx b/docusaurus/docs/iOS/06-advanced/02-push-notifications.mdx deleted file mode 100644 index 2fe9fa4a9..000000000 --- a/docusaurus/docs/iOS/06-advanced/02-push-notifications.mdx +++ /dev/null @@ -1,177 +0,0 @@ ---- -title: Push Notifications -description: Push Notifications setup ---- - -The `StreamVideo` SDK supports two types of push notifications: regular and VoIP notifications. You can use one of them, or both, depending on your use-case. - -Push notifications are sent in the following scenarios: -- you create a call with the `ring` value set to true. In this case, a VoIP notification that shows a ringing screen is sent. -- you create a call with the `notify` value set to true. In this case, a regular push notification is sent. -- you haven't answered a call. In this case, a missed call notification is sent (regular push notification). - -### StreamVideo setup - -The push notification config is provided optionally, when the SDK is initalized. By default, the config uses `apn` as a push provider, for both VoIP and regular push. The push provider name for regular push is "apn", while for VoIP, the name is "voip". - -You should have these providers configured on your dashboard. If you don't have them, an error would be thrown when trying to add a device. - -You can also change the names or push providers. One restriction is that for VoIP notifications, you have to use `apn` (Firebase doesn't have such support). - -Here's an example how to create your own `PushNotificationsConfig`: - -```swift -let notificationsConfig = PushNotificationsConfig( - pushProviderInfo: PushProviderInfo(name: "apn", pushProvider: .apn), - voipPushProviderInfo: PushProviderInfo(name: "voip", pushProvider: .apn) -) -``` - -When you initialize the SDK, you should provide the notifications config as a parameter: - -```swift -let streamVideo = StreamVideo( - apiKey: apiKey, - user: user, - token: token, - videoConfig: VideoConfig(), - pushNotificationsConfig: notificationsConfig, - tokenProvider: tokenProvider -) -``` - -### Push Notifications - -Push notifications can be sent for many other different events, such as participants joining, streaming started etc. The setup for push notifications is the standard one - by providing a certificate or APNs key. - -#### Dashboard Configuration -In order to configure Push Notifications, you need to visit your app's dashboard. From there, select the `Push Notifications` menu option as you can see in the image below: - -![Screenshot shows the creation of a VoIP certificate](../assets/push-notifications-dashboard-menu.png) - -From there you can create a new configuration by clicking the `New Configuration` button. After selecting the Push Notification Provider you want, you will be asked to provide the following information: - -| Field Name | Usage description | -|---|---| -| `Name` | Used to select this configuration in SDK or API calls. | -| `Description` | Allows you to set a description on the configuration to help identify its usage in your app's context. | -| `Bundle/Topic ID` | Your app's bundle id to which the notification will be pushed to. | -| `TeamID` | The Team ID is generated by Apple for your developer account. Can be found in the top right of your Apple developer account. | -| `KeyID` | This is the unique identifier for the p8 authentication key. You can find your Key ID in the keys section of your [Apple developer](https://developer.apple.com/account/) account. | -| `.p8 Token or .p12 Certificate` | The token or certificate that will be used to send the push notification. | - -![Screenshot shows the creation of a VoIP certificate](../assets/developer-console-teamid-keyid-location.png) - -For our example, we are using `apn` as name and we are filling the remaining information as you can see in the image below: - -![Screenshot shows the creation of a VoIP certificate](../assets/regular-push-configuration-example.png) - -#### App Configuration - -Here's an example setup in the `AppDelegate`: - -```swift -class AppDelegate: NSObject, UIApplicationDelegate, UNUserNotificationCenterDelegate { - - func application( - _ application: UIApplication, - didFinishLaunchingWithOptions launchOptions: [UIApplication.LaunchOptionsKey: Any]? = nil - ) -> Bool { - UNUserNotificationCenter.current().delegate = self - setupRemoteNotifications() - return true - } - - func application( - _ application: UIApplication, - didRegisterForRemoteNotificationsWithDeviceToken deviceToken: Data - ) { - let deviceToken = deviceToken.map { String(format: "%02x", $0) }.joined() - AppState.shared.pushToken = deviceToken - } - - func userNotificationCenter( - _ center: UNUserNotificationCenter, - didReceive response: UNNotificationResponse, - withCompletionHandler completionHandler: @escaping () -> Void - ) { - log.debug("push notification received \(response.notification.request.content)") - } - - func setupRemoteNotifications() { - UNUserNotificationCenter - .current() - .requestAuthorization(options: [.alert, .sound, .badge]) { granted, _ in - if granted { - DispatchQueue.main.async { - UIApplication.shared.registerForRemoteNotifications() - } - } - } - } -} -``` - -In the code above, we are first asking for push notification permission, and if granted, we are registering for push notifications. - -Whenever we receive a push notification token, we are storing it in our `AppState` (in the demo app). When the `StreamVideo` client is initalized, we are registering the device with our backend, using the `setDevice` method: - -```swift -private func setPushToken() { - if let pushToken, let streamVideo { - Task { - try await streamVideo.setDevice(id: pushToken) - } - } -} -``` - -Additionally, consider storing the push notifications tokens locally. You would need to remove them from the Stream backend if the user decides to logout from your app. - -With that, the push notifications setup is done and you should be able to receive notifications, if you have proper setup in your Apple developer account, as well as our dashboard. - -If you want to customize the content of the push notification, consider implementing a [Notification Service Extension](https://developer.apple.com/documentation/usernotifications/modifying_content_in_newly_delivered_notifications). - -#### VoIP Notifications - -The VoIP notifications are sent only when a new call is created and the `ring` flag is set to true. You can find more details about the setup in the [CallKit integration guide](./03-callkit-integration.mdx). - -When a VoIP notification is sent, a regular push notification is not delivered. VoIP notifications don't work on the iOS simulator, you would need to use a real device. - -In order to register a device for VoIP push notifications, you should use the following method: - -```swift -try await streamVideo.setVoipDevice(id: voipPushToken) -``` - -### Removing devices - -If you store the device tokens locally, you can remove them at a later stage. For example, the user can explicitly ask for this from your app UI, or you can do this for the user when they decide to logout from your app. - -In order to delete a device, you should use the following method from the `StreamVideo` SDK: - -```swift -try await streamVideo.deleteDevice(id: savedToken) -``` - -### Listing devices - -If you want to list the devices that are registered to the current user, you should use the `listDevices` method from the `StreamVideo` object: - -```swift -let devices = try await streamVideo.listDevices() -``` - -This method returns an array of the `Device` type, that contains information about the device: - -```swift -/** Date/time of creation */ -public var createdAt: Date -/** Whether device is disabled or not */ -public var disabled: Bool? -/** Reason explaining why device had been disabled */ -public var disabledReason: String? -public var id: String -public var pushProvider: String -public var pushProviderName: String? -``` diff --git a/docusaurus/docs/iOS/06-advanced/03-callkit-integration.mdx b/docusaurus/docs/iOS/06-advanced/03-callkit-integration.mdx deleted file mode 100644 index 3fa064790..000000000 --- a/docusaurus/docs/iOS/06-advanced/03-callkit-integration.mdx +++ /dev/null @@ -1,279 +0,0 @@ ---- -title: CallKit integration -description: Setup for VoIP calls ---- - -## Introduction - -[CallKit](https://developer.apple.com/documentation/callkit) allows us to have system-level phone integration. With that, we can use CallKit to present native incoming call screens, even when the app is closed. CallKit integration also enables the calls made through third-party apps be displayed in the phone's recent call list in the Phone app. - -The StreamVideo SDK is compatible with CallKit, enabling a complete calling experience for your users. - -## Setup - -In order to get started, you would need have a paid Apple developer account, and an app id with push notifications enabled. - -In the "Signing & Capabilities" section of your target, make sure that in the "Background Modes" section you have selected: - -- "Voice over IP" -- "Remote notifications" -- "Background processing" - -![Screenshot shows the required background modes in Xcode](../assets/callkit_01.png) - -Next, you need to create a VoIP calling certificate. In order to do that, go to your Apple developer account, select "Certificates, Identifiers & Profiles" and create a new certificate. Make sure to select `VoIP Services Certificate`, located under the "Services" section. Follow the steps to create the required certificate. - -![Screenshot shows the creation of a VoIP certificate](../assets/callkit_02.png) - -After you've created the certificate, you would need to convert the `aps.cer` file to a .p12 certificate file using keychain access and upload it to our dashboard. - -:::note -Make sure that you configure no password for the p12 file. -::: - -### Dashboard Configuration - -In order to configure VoIP Push Notifications, you need to visit your app's dashboard. From there, select the `Push Notifications` menu option as you can see in the image below: - -![Screenshot shows the creation of a VoIP certificate](../assets/push-notifications-dashboard-menu.png) - -From there you can create a new configuration by clicking the `New Configuration` button. After selecting the Push Notification Provider you want, you will be asked to provide the following information: - -| Field Name | Usage description | -|---|---| -| `Name` | Used to select this configuration in SDK or API calls. | -| `Description` | Allows you to set a description on the configuration to help identify its usage in your app's context. | -| `Bundle/Topic ID` | Your app's bundle id to which the notification will be pushed to. | -| `TeamID` | The Team ID is generated by Apple for your developer account. Can be found in the top right of your Apple developer account. | -| `KeyID` | This is the unique identifier for the p8 authentication key. You can find your Key ID in the keys section of your [Apple developer](https://developer.apple.com/account/) account. | -| `.p8 Token or .p12 Certificate` | The token or certificate that will be used to send the push notification. | - -For our example, we are using `voip` as name and we are filling the remaining information as you can see in the image below: - -![Screenshot shows the creation of a VoIP certificate](../assets/voip-push-configuration-example.png) - -## iOS app integration - -From iOS app perspective, there are two Apple frameworks that we need to integrate in order to have a working CallKit integration: `CallKit` and `PushKit`. [PushKit](https://developer.apple.com/documentation/pushkit) is needed for handling VoIP push notifications, which are different than regular push notifications. - -We have a working CallKit integration in our demo app. Feel free to reference it for more details, while we will cover the most important bits here. - -In order for the CallKit integration to work, you should have a logged in user into your app. For simplicity, we are saving the user in the `UserDefaults`, but we strongly discourage that in production apps, since it's not secure. - -### CallKit integration - -The StreamVideo SDK provides you with the tools for an easy integration with CallKit. Let' see how: - -#### Logging in - -Firstly, whenever our user logs in and we instantiate a new StreamVideo object, we need to pass it to the `CallKitAdapter` object like below: -```swift -@Injected(\.callKitAdapter) var callKitAdapter - -let streamVideo = StreamVideo( - apiKey: apiKey, - user: user, - token: token, - tokenProvider: { _ in } -) -callKitAdapter.streamVideo = streamVideo -``` - -In the next (and final step) we are going to instruct the `CallKitAdapter` to register for VoIP push notifications. We can do that in our SwiftUI `onAppear` block as below: -```swift -struct MyCustomView: View { - @Injected(\.callKitAdapter) var callKitAdapter - - var body: some View { - EmptyView() // Your content goes here. - .onAppear { - callKitAdapter.registerForIncomingCalls() - - // Here we can also inject an image (e.g. a logo) that will be used - // by CallKit's UI. - callKitAdapter.iconTemplateImageData = UIImage(named: "logo")?.pngData() - } - } -} -``` - -or if you are using `UIKit`, you can do the same in your `UIViewController` as below: -```swift -final class MyCustomViewController: UIViewController { - @Injected(\.callKitAdapter) var callKitAdapter - - override func viewDidLoad() { - super.viewDidLoad() - callKitAdapter.registerForIncomingCalls() - callKitAdapter.iconTemplateImageData = UIImage(named: "logo")?.pngData() - } -} -``` - -#### Logging out - -In order to unregister the device from receiving VoIP push notifications we need to simply call the `CallKitAdapter` and update its `streamVideo` property with `nil` -```swift -struct MyCustomView: View { - @Injected(\.streamVideo) var streamVideo - @Injected(\.callKitAdapter) var callKitAdapter - @Injected(\.callKitPushNotificationAdapter) var callKitPushNotificationAdapter - - var body: some View { - Button { - let deviceToken = callKitPushNotificationAdapter.deviceToken - if !deviceToken.isEmpty { - Task { - // Unregister the device token - try await streamVideo.deleteDevice(id: deviceToken) - } - } - // Perform any other logout operations - callKitAdapter.streamVideo = nil - } label: { - Text("Logout") - } - } -} -``` - -By doing that, the `CallKitAdapter` will make sure to unregister the `VoIP` token from receiving notifications._createMdxContent - -#### Call display name - -The Stream backend fills 2 properties in the VoIP push notification payload that can be used as the display name of the call. -- **call_display_name** -The `call_display_name` is a calculated property that evaluates the following customData fields on the Call object, in the order they are being presented: -- `display_name` -- `name` -- `title` - -```swift -let call = streamVideo.call(callType: "default", callId: UUID().uuidString) -let result = try await call.create( - memberIds: members, - custom: ["display_name": .string("My awesome group")], - ring: true -) -``` -If none of the fields above are being set, the property will be an empty string. - -- **created_by_display_name** -The property is always set and contains the name of the user who created the call. - -#### Call Settings when accepting a call - -Depending on your business logic, you may need users to join call with different `CallSettings`(e.g auioOn=true while videoOn=false). In order to achieve that when using the `CallKitAdapter` you can provide your custom `CallSettings` at any point before you receive a call: - -```swift -@Injected(\.callKitAdapter) var callKitAdapter - -callKitAdapter.callSettings = CallSettings(audioOn: true, videoOn: false) -``` - -#### Call's type suffix - -Depending on the `Call` type `CallKit` adds a suffix in the push notification's subtitle (which contains the application name). That suffix can either be `Audio` or `Video`. `CallKitService` allows you to configure what the supported call types are, by setting the `CallKitService.supportsVideo` property like below: - -```swift -@Injected(\.callKitService) var callKitService - -// Setting the `supportsVideo` property to `true` will -// make the subtitle's format be: ` Video` -callKitService.supportsVideo = true - -// Setting the `supportsVideo` property to `false` will -// make the subtitle's format be: ` Audio` -callKitService.supportsVideo = false -``` - -`CallKitService.supportsVideo` default value is `false`. - -### Registering for VoIP Push Notifications - -Even though using the `CallKitAdapter` abstracts most of the `CallKit` & `PushKit` complexity from you, there is still a need for you to register/unregister the deviceToken. - -You can observe the `voIP` token value by accessing the `deviceToken` property on the `CallKitPushNotificationAdapter`. Whenever the value changes, you should register it with StreamVideo, like below: -```swift -@Injected(\.streamVideo) var streamVideo -@Injected(\.callKitPushNotificationAdapter) var callKitPushNotificationAdapter -var lastVoIPToken: String? -var voIPTokenObservationCancellable: AnyCancellable? - -voIPTokenObservationCancellable = callKitPushNotificationAdapter.$deviceToken.sink { [streamVideo] updatedDeviceToken in - Task { - if let lastVoIPToken { - try await streamVideo.deleteDevice(id: lastVoIPToken) - } - try await streamVideo.setVoipDevice(id: updatedDeviceToken) - lastVoIPToken = updatedDeviceToken - } -} -``` - -By using the provided `CallKit` integration tools you can start receiving VoIP push notifications in a breeze. - -If you woud like to use your integration, there is no need to worry about `StreamVideo` SDK's integration. The integration is only initialized when you set the `CallKitAdapter.streamVideo` property. - -### Starting a call from Recents - -When a call is started via `CallKit`, it appears in the "Recents" section in the native iOS phone app. Usually, when you tap on a recents entry, you should be able to call the person again. - -For this, we need to add a `INStartCallIntent` intent extension. To do this, go to your targets in Xcode and add a new "Intents Extension". - -After the extension is created, go to its `IntentHandler` and add the following code: - -```swift -import Intents - -class IntentHandler: INExtension, INStartCallIntentHandling { - override func handler(for intent: INIntent) -> Any { - return self - } - - func handle(intent: INStartCallIntent, completion: @escaping (INStartCallIntentResponse) -> Void) { - let userActivity = NSUserActivity(activityType: NSStringFromClass(INStartCallIntent.self)) - let response = INStartCallIntentResponse(code: .continueInApp, userActivity: userActivity) - - completion(response) - } -} -``` - -In the `Info.plist` file of the extension, add the `INStartCallIntent` value in `IntentsSupported`, under `NSExtension` -> `NSExtensionAttributes`. - -With this setup, our app has the ability to react to `INStartCallIntent`s. Next, let's handle these intents in our SwiftUI code. - -In the `CallView` in our DemoApp, we are adding the following code: - -```swift -var body: some View { - HomeView(viewModel: viewModel) - .modifier(CallModifier(viewModel: viewModel)) - .onContinueUserActivity( - NSStringFromClass(INStartCallIntent.self), - perform: { - userActivity in - let interaction = userActivity.interaction - if let callIntent = interaction?.intent as? INStartCallIntent { - - let contact = callIntent.contacts?.first - - guard let name = contact?.personHandle?.value else { return } - viewModel.startCall( - callType: .default, - callId: UUID().uuidString, - members: [.init(userId: name)], - ring: true - ) - } - } - ) -} -``` - -The important part is the `onContinueUserActivity`, where we listen to `INStartCallIntent`s. In the closure, we are extracting the first contact and take their name, which is the user id. We use that name to start a ringing call. - -Additionally, if you have integration with the native contacts on iOS (`Contacts` framework), you can extract the full name, phone number etc, and use those to provide more details for the members. Alternatively, you can call our `queryUsers` method to get more user information that's available on the Stream backend. - -If you are using UIKit, you should implement the method `application(_ application: UIApplication, continue userActivity: NSUserActivity, restorationHandler: @escaping ([UIUserActivityRestoring]?) -> Void)` in your `AppDelegate`, and provide a similar handling as in the SwiftUI sample. diff --git a/docusaurus/docs/iOS/06-advanced/04-screensharing.mdx b/docusaurus/docs/iOS/06-advanced/04-screensharing.mdx deleted file mode 100644 index 550670ca0..000000000 --- a/docusaurus/docs/iOS/06-advanced/04-screensharing.mdx +++ /dev/null @@ -1,177 +0,0 @@ ---- -title: Screen sharing -description: Setup for screen sharing ---- - -## Introduction - -The StreamVideo iOS SDK has support for displaying screensharing tracks, as well as screensharing from an iOS device. There are two options for screensharing from an iOS device: -- in-app screensharing - the screen is shared only while the app is active. -- broadcasting - the device shares the screen even when the app goes into the background. - -Both of these options use Apple's framework `ReplayKit` for broadcasting the user's screen. - -In order for a user to be able to share their screen, they must have the `screenshare` capability configured for the call they are in. - -This can be configured on the dashboard for your call type: - -![Screenshot shows screensharing dashboard setting](../assets/screensharing-dashboard.png) - -## In-app screensharing - -In-app screensharing broadcasts only the app's screens. When you are in a call, and you have the required capability, you can start screensharing by calling the `startScreensharing` method, with the `inApp` screensharing type: - -```swift -Task { - let call = streamVideo.call(callType: "default", callId: "123") - try await call.join() - try await call.startScreensharing(type: .inApp) -} -``` - -If you use our UI components and the `CallViewModel`, the same method is also available from there. - -When the method is invoked, `ReplayKit` will ask for the user's consent that their screen will be shared. Only after the permission is granted, the screensharing starts. - -If you want to stop screensharing, you need to call the method `stopScreensharing`, available from both the `Call` object and the `CallViewModel`: - -```swift -Task { - try await call.stopScreensharing() -} -``` - -When the current user shares their screen, the `screenshareTrack` property can be used to present the track into a video rendering view. For example, you can use our `VideoRendererView`: - -```swift -VideoRendererView( - id: "\(participant.id)-screenshare", - size: videoSize, - contentMode: .scaleAspectFit -) { view in - if let track = participant.screenshareTrack { - log.debug("adding screensharing track to a view \(view)") - view.add(track: track) - } -} -``` - -If you use our default UI components, this logic is already handled for you. In that case, you can customize the look and feel of the screensharing view, by implementing the `makeScreensharingView` in our `ViewFactory`: - -```swift -public func makeScreenSharingView( - viewModel: CallViewModel, - screensharingSession: ScreenSharingSession, - availableFrame: CGRect -) -> some View { - CustomScreenSharingView( - viewModel: viewModel, - screenSharing: screensharingSession, - availableFrame: availableFrame - ) -} -``` - -We also have a component called `ScreenshareIconView` that you can integrate into your video controls, to directly start screensharing: - -```swift -ScreenshareIconView(viewModel: viewModel) -``` - -## Broadcasting - -In most cases, you would need to share your screen while you are in the background, to be able to open other apps. For this, you need to create a Broadcast Upload Extension: - -![Screenshot shows how to create broadcast upload extension in Xcode](../assets/broadcast-extension.png) - -After you create the extension, there should be a class called `SampleHandler`, that implements the `RPBroadcastSampleHandler` protocol. Remove the protocol conformance and the methods, import our `StreamVideo` SDK, and make the `SampleHandler` a subclass of our class called `BroadcastSampleHandler`, that internally handles the broadcasting. - -The resulting file should look like this: - -```swift -import ReplayKit -import StreamVideo - -class SampleHandler: BroadcastSampleHandler {} -``` - -Next, you should create an app group for your app id. You can find more details on how to create app groups on Apple's developer [website](https://developer.apple.com/documentation/xcode/configuring-app-groups#Create-App-Groups-for-all-other-platforms). Make sure that both the app and its extension have the same app group id configured, since that one will be used for passing data between them. - -Finally, you should add a new entry in the `Info.plist` files in both the app and the broadcast extension, with a key `BroadcastAppGroupIdentifier` and a value of the app group id. - -With that, the setup for the broadcast upload extension is done. - -### Starting screensharing - -After you have done the setup above, you can start screensharing. For this, you would need to use Apple's [RPSystemBroadcastPickerView](https://developer.apple.com/documentation/replaykit/rpsystembroadcastpickerview), which presents a system UI for starting the screensharing. - -For easier integration, we offer a `BroadcastIconView`, which is a already setup component that triggers the system UI, listens to events when interacting with it, and starts the screensharing accordingly. - -```swift -BroadcastIconView( - viewModel: viewModel, - preferredExtension: "bundle_id_of_broadcast_upload_extension" -) -``` - -The `preferredExtension` parameter should match the bundle id of the broadcast upload extension you created above. - -If you want to implement your own UI component for broadcasting, you can use our building blocks, `BroadcastPickerView` (a SwiftUI wrapper for `RPSystemBroadcastPickerView`) and the `BroadcastObserver` observable object, which provides information about the `BroadcastState`, with the following values: - -```swift -public enum BroadcastState { - case notStarted - case started - case finished -} -``` - -Here's an example implementation: - -```swift -public struct BroadcastIconView: View { - - var call: Call - @StateObject var broadcastObserver = BroadcastObserver() - let size: CGFloat - let preferredExtension: String - - public init( - call: Call, - preferredExtension: String, - size: CGFloat = 50 - ) { - self.call = call - self.preferredExtension = preferredExtension - self.size = size - } - - public var body: some View { - BroadcastPickerView( - preferredExtension: preferredExtension - ) - .onChange(of: broadcastObserver.broadcastState, perform: { newValue in - if newValue == .started { - startScreensharing() - } else if newValue == .finished { - stopScreensharing() - } - }) - .onAppear { - broadcastObserver.observe() - } - } - - private func startScreensharing() { - Task { - try await call.startScreensharing(type: .broadcast) - } - } - - private func stopScreensharing() { - Task { - try await call.stopScreensharing() - } - } -} -``` diff --git a/docusaurus/docs/iOS/06-advanced/04-troubleshooting-calls.mdx b/docusaurus/docs/iOS/06-advanced/04-troubleshooting-calls.mdx deleted file mode 100644 index 045afa7b7..000000000 --- a/docusaurus/docs/iOS/06-advanced/04-troubleshooting-calls.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Troubleshooting Guide ---- - -There are several possible integration issues that can lead to calls not being established. This section will cover the most frequent ones. - -### Connection issues - -Connection issues usually happen when you provide an invalid token during the SDK setup. When this happens, a web socket connection can't be established with our backend, resulting in errors when trying to connect to a call. - -#### Expired tokens - -When you initialize the `StreamVideo` object, you provide a token, as described [here](https://getstream.io/video/docs/ios/guides/client-auth/). The tokens generated in the docs have an expiry date, therefore please make sure to always use a token with a valid expiry date. You can check the contents of a JWT token on websites like [this](https://jwt.io) one. - -Additionally, when expiring tokens are used, you need to provide a `tokenProvider` when creating `StreamVideo`, that will be invoked when the existing token expires. This is your chance to update the token by calling your backend. - -#### Wrong secret for token generation - -When you start integrating the SDK into your app, you might copy-paste the token from the docs into your project. However, that will not work. Tokens are generated with the help of the app secret (available in your dashboard), and are unique per app id. Your app id is different than the demo apps we have as examples in our docs. - -On website like [this](https://jwt.io) one, you can verify if the token is signed with the correct signature. - -While developing, you can manually generate tokens by providing your secret and the user's ID [here](https://getstream.io/chat/docs/ios-swift/tokens_and_authentication/?language=swift#manually-generating-tokens). However, note that for production usage, your backend would need to generate these tokens. - -#### User-token mismatch - -The token can be valid and correct, but for the wrong user. Make sure that the token you provide matches the id of the user that is used when creating the `StreamVideo` object. - -#### Third-party network debuggers - -There are network debuggers like [Wormholy](https://github.com/pmusolino/Wormholy), that allow you to see all the network requests done with your app. However, some of them can interfere and block our web socket connection, like in this [case](https://github.com/pmusolino/Wormholy/issues/118). In order to prevent this, you need to exclude our hosts from debugger tools, as described on the linked issue. - -### Ringing calls issues - -Ringing calls issues usually present themselves in a failure to show the incoming call screen to the user we're trying to call. There are 2 scenarios when an incoming screen is shown: -- your app is in the foreground and the web socket connection is active. In this case, an in-app ringing screen is shown to the other user. -- your app is killed or in the background. In this case, you need `CallKit` integration in order to show the iPhone's native calling screen. Follow [these docs](https://getstream.io/video/docs/ios/advanced/callkit-integration/) for more details on how to accomplish that. - -#### Members in a call - -One common issue is that you only specify one user and try to call the same user on another device. This will not work, if you are the caller, you will not receive a notification that you're being called - you can't call yourself. - -As you would do it in the real world, you would need to specify another member (or members) that you want to call. Another important note - that member should also exist in Stream's platform (it must have connected at least once). This is needed because we need to know the user's device and where to send the call notification. - -#### Reusing a call id - -Call IDs in general can be reused - you can join a call with the same id many times. However, the ringing is done only once per call ID. Therefore, if you implement calls with ringing, make sure that you provide a unique ID every time, in order for the ring functionality to work. One option is to use a `UUID` as a call ID. - -#### CallKit integration issues - -If you followed the CallKit [guide](https://getstream.io/video/docs/ios/advanced/callkit-integration/), and still have issues, here are some troubleshooting steps: -- make sure there are no connection issues (see points above) -- check if the generated VoIP certificate matches the bundle id specified in the dashboard -- check if the app is using the correct bundle id -- check if you have created push providers and you specified their correct names when creating the SDK -- check if you registered the device correctly, by examining whether it's returned in the `me` response in the `connection.ok` event -- check the "Webhook & Push Logs" section on the dashboard to see if there are any push notification failures -- try sending a hardcoded VoIP notification using a [third-party service](https://apnspush.com/), to make sure your app integration is correct - -Note that if you have failed to report a VoIP notification to `CallKit`, the operating system may stop sending you notifications. In those cases, you need to re-install the app and try again. - -### Logs - -For further debugging, you can turn on more detailed logging. In order to do that, add the following code before the `StreamVideo` instance is created: - -```swift -LogConfig.level = .debug -``` \ No newline at end of file diff --git a/docusaurus/docs/iOS/06-advanced/05-picture-in-picture.mdx b/docusaurus/docs/iOS/06-advanced/05-picture-in-picture.mdx deleted file mode 100644 index 23970b320..000000000 --- a/docusaurus/docs/iOS/06-advanced/05-picture-in-picture.mdx +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Picture-in-Picture mode -description: How we support picture in picture for video calls ---- - -Picture-in-Picture (PiP) is an essential part of video calls on mobile. It provides users the possibility to perform other actions on their phones, while still being on a call. The StreamVideo iOS SDK has PiP support out of the box, on devices running iOS 15 and above. - -If you use our view components, as soon as you enter background, the native iOS PiP view will appear. It will show the first participant (based on the participant sorting criteria), that is not the current user. If a user is screen sharing, the screen sharing track would be shown instead. - -### Toggling Picture-in-Picture support - -You have control over when the Picture-in-Picture is enabled by accessing the `isPictureInPictureEnabled` property on the `CallViewModel`. By default, the value is set to `true`. - -### Picture-in-Picture on custom Views - -In case where you implement your own `CallView` but still want StreamVideo SDK to manage Picture in Picture, you can use the `enablePictureInPicture` `ViewModifier` on your `CallView`. Internally that will hool StreamVideo's PictureInPicture logic on your View. Below you can see an example usage of the `ViewModifier` from our DemoApp: - -```swift -public struct CallView: View { - - ... - - @ObservedObject var viewModel: CallViewModel - - ... - - public var body: some View { - ... - .enablePictureInPicture(viewModel.isPictureInPictureEnabled) - } - - ... -} -``` - -### Current user camera - -By default, iOS does not allow access to the user's camera, while in background. There are two possibilities to display the user's camera in video calls and PiP in this state: -- Your app has the [multitasking camera access](https://developer.apple.com/documentation/bundleresources/entitlements/com_apple_developer_avfoundation_multitasking-camera-access?changes=__8) entitlement. This entitlement can be requested directly from Apple. -- iPads with Stage Manager support, starting from iOS 16 can access the camera in the background, without the entitlement above. Our SDK already does the required steps to setup the capture session with background support. More details about this topic can be found [here](https://developer.apple.com/documentation/avkit/accessing_the_camera_while_multitasking_on_ipad?changes=__8). - -:::note -Note that the second option has limited devices support. If you are building an app for mass usage, you would need Apple's entitlement. However, the approval of such entitlement can take a longer time. -::: \ No newline at end of file diff --git a/docusaurus/docs/iOS/06-advanced/06-apply-video-filters.mdx b/docusaurus/docs/iOS/06-advanced/06-apply-video-filters.mdx deleted file mode 100644 index e54ee0950..000000000 --- a/docusaurus/docs/iOS/06-advanced/06-apply-video-filters.mdx +++ /dev/null @@ -1,383 +0,0 @@ ---- -title: Video & Audio filters -description: How to build video or audio filters ---- - -## Video Filters - -Some calling apps allow filters to be applied to the current user's video, such as blurring the background, adding AR elements (glasses, moustaches, etc) or applying image filters (such as sepia, bloom etc). StreamVideo's iOS SDK has support for injecting your custom filters into the calling experience. - -How does this work? If you initialized the SDK with custom filters support and the user selected a filter, you will receive each frame of the user's local video as `CIImage`, allowing you to apply the filters. This way you have complete freedom over the processing pipeline. - -You can find a working example of the filters (together with other great example projects) in our `VideoWithChat` [sample project](https://github.com/GetStream/stream-video-ios-examples/tree/main/VideoWithChat). Here is how the project you are about to build will look like in the end: - -## Adding a Video Filter - -The `VideoFilter` class allows you to create your own filters. It contains the `id` and `name` of the filter, along with an `async` function that converts the original `CIImage` to an output `CIImage`. If no filter is selected, the same input image is returned. - -For example, let's add a simple "Sepia" filter, from the default `CIFilter` options by Apple: - -```swift -let sepia: VideoFilter = { - let sepia = VideoFilter(id: "sepia", name: "Sepia") { input in - let sepiaFilter = CIFilter(name: "CISepiaTone") - sepiaFilter?.setValue(input.originalImage, forKey: kCIInputImageKey) - return sepiaFilter?.outputImage ?? input.originalImage - } - return sepia -}() -``` - -You can now create a helper `FilterService`, that will keep track of the available filters, as well as hold state information about the selected filter and whether the filters picker is shown: - -```swift -class FiltersService: ObservableObject { - @Published var filtersShown = false - @Published var selectedFilter: VideoFilter? - - static let supportedFilters = [sepia] -} -``` - -Next, you need to pass the supported filters to the `StreamVideo` object, via its `VideoConfig` and connect the user: - -```swift -let streamVideo = StreamVideo( - apiKey: apiKey, - user: userCredentials.user, - token: token, - // highlight-start - videoConfig: VideoConfig( - videoFilters: FiltersService.supportedFilters - ), - // highlight-end - tokenProvider: { result in - // Unrelated code skipped. Check repository for complete code: - // https://github.com/GetStream/stream-video-ios-examples/blob/main/VideoWithChat/VideoWithChat/StreamWrapper.swift - } -) -connectUser() - -private func connectUser() { - Task { - try await streamVideo.connect() - } -} -``` - -Now, let's enable the filter selection in the user interface. One option is to include the filters in the call controls shown at the bottom of the call view. For this, the first step is to override the `makeCallControlsView` function in your custom implementation of the `ViewFactory`: - -```swift -class VideoViewFactory: ViewFactory { - - /* ... Previous code skipped. */ - - // highlight-start - func makeCallControlsView(viewModel: CallViewModel) -> some View { - ChatCallControls(viewModel: viewModel) - } - // highlight-end -} -``` - -You will now create the `ChatCallControls` view that does two things. It will first place an icon to toggle the filters menu (via the `filtersService.filtersShown` property) and allows users to select the filter they want to apply. - -Second, it will conditionally show a list of the filters with a button for each one to (de-)select it. - -In this section, only the code to show the filters is added. You can see the full code [here](https://github.com/GetStream/stream-video-ios-examples/blob/main/VideoWithChat/VideoWithChat/Sources/ChatCallControls.swift), but let's have a look at the simplified version: - -```swift -struct ChatCallControls: View { - var body: some View { - VStack { - HStack { - /* Skip unrelated code */ - // highlight-next-line - // 1. Button to toggle filters view - Button { - withAnimation { - filtersService.filtersShown.toggle() - } - } label: { - CallIconView( - icon: Image(systemName: "camera.filters"), - size: size, - iconStyle: filtersService.filtersShown ? .primary : .transparent - ) - } - /* Skip unrelated code */ - } - - if filtersService.filtersShown { - HStack(spacing: 16) { - // highlight-next-line - // 2. Show a button for each filter - ForEach(FiltersService.supportedFilters, id: \.id) { filter in - Button { - withAnimation { - // highlight-next-line - // 3. Select or de-select filter on tap - if filtersService.selectedFilter?.id == filter.id { - filtersService.selectedFilter = nil - } else { - filtersService.selectedFilter = filter - } - viewModel.setVideoFilter(filtersService.selectedFilter) - } - } label: { - Text(filter.name) - .background(filtersService.selectedFilter?.id == filter.id ? Color.blue : Color.gray) - /* more modifiers */ - } - - } - } - } - } - /* more modifiers */ - } -} -``` - -Here are the three things this code does: - -1. Adding the icon for the filters, that will control the `filtersShown` state. -2. Whenever the `filtersShown` is true, you're showing the list of the available filters. -3. When a user taps on a filter, the `CallViewModel`'s `setVideoFilter` method is called. This will enable or disable the video filter for the ongoing call. - -That is everything that is needed for a basic video filter support. - -## Adding AI Filters -### Face filters - -In some cases, you might also want to apply AI filters. That can be an addition to the user's face (glasses, moustaches, etc), or an ML filter. In this section this use-case will be covered. Specifically, you will show Stream's logo over the user's face. Whenever the user moves along, you will update the logo's location. - -:::tip -The code will be slightly simplified for the sake of this guide. If you want to see the entire example with the full code, you can see the [sample on GitHub](https://github.com/GetStream/stream-video-ios-examples/tree/main/VideoWithChat). -::: - -To do this, you will use the [Vision framework](https://developer.apple.com/documentation/vision) and the `VNDetectFaceRectanglesRequest`. First, let's create the method that will detect the faces: - -```swift -func detectFaces(image: CIImage) async throws -> CGRect { - return try await withCheckedThrowingContinuation { continuation in - let detectFaceRequest = VNDetectFaceRectanglesRequest { (request, error) in - if let result = request.results?.first as? VNFaceObservation { - continuation.resume(returning: result.boundingBox) - } else { - continuation.resume(throwing: ClientError.Unknown()) - } - } - let vnImage = VNImageRequestHandler(ciImage: image, orientation: .downMirrored) - try? vnImage.perform([detectFaceRequest]) - } -} -``` - -:::note -The `VNDetectFaceRectanglesRequest` does not support the `async/await` syntax yet, so it is converted using the `withCheckedThrowingContinuation` mechanism ([see Apple documentation](). -::: - -Next, let's add some helper methods, that will allow conversion between `CIImage` and `UIImage`, as well as the possibility to draw over an image: - -```swift -func convert(ciImage: CIImage) -> UIImage { - let context = CIContext(options: nil) - let cgImage = context.createCGImage(ciImage, from: ciImage.extent)! - let image = UIImage(cgImage: cgImage, scale: 1, orientation: .up) - return image -} - -@MainActor -func drawImageIn(_ image: UIImage, size: CGSize, _ logo: UIImage, inRect: CGRect) -> UIImage { - let format = UIGraphicsImageRendererFormat() - format.scale = 1 - format.opaque = true - let renderer = UIGraphicsImageRenderer(size: size, format: format) - return renderer.image { context in - image.draw(in: CGRect(origin: CGPoint.zero, size: size)) - logo.draw(in: inRect) - } -} -``` - -With those two helpers in place, you can now implement your custom AI filter. The same principle applies when creating a `VideoFilter` as in [the first part](#adding-a-video-filter) of this guide. - -```swift -let stream: VideoFilter = { - let stream = VideoFilter(id: "stream", name: "Stream") { input in - // highlight-next-line - // 1. detect, where the face is located (if there's any) - guard let faceRect = try? await detectFaces(image: input.originalImage) else { return input.originalImage } - let converted = convert(ciImage: input.originalImage) - let bounds = input.originalImage.extent - let convertedRect = CGRect( - x: faceRect.minX * bounds.width - 80, - y: faceRect.minY * bounds.height, - width: faceRect.width * bounds.width, - height: faceRect.height * bounds.height - ) - // highlight-next-line - // 2. Overlay the rectangle onto the original image - let overlayed = drawImageIn(converted, size: bounds.size, streamLogo, inRect: convertedRect) - - // highlight-next-line - // 3. convert the created image to a CIImage - let result = CIImage(cgImage: overlayed.cgImage!) - return result - } - return stream -}() -``` - -Here's what this code does: - -1. It's detecting the face rectangle using the `detectFaces` method you defined earlier and it converts the `CIImage` to `UIImage`. The rectangle to the real screen dimensions (since it returns percentages). -2. This information is passed to the `drawImageIn` method, that adds the logo at the `convertedRect` frame. -3. The result is then converted back to a `CIImage` and returned. - -The last remaining thing to do is to add the `stream` filter in the supported filters method. This is done in the `FiltersService` class: - -```swift -static let supportedFilters = [sepia, stream] -``` - -The end result will look like this (supposedly with a different face): - -![Stream Filter](../assets/stream_filter.jpg) - -This guide shows you a concrete example how to add a specific AI filter. However, with the way this is structured we aim to give you full control over what you can build with this. - -We would love to see the cool things you'll build with this functionality, feel free to tweet about them and [tag us](https://twitter.com/getstream_io). - -### Background filters - -A very common use-case during a videoCall is to apply some effect on our backgrounds. Those backgrounds can vary but the most common ones are blurring and adding a static image. StreamVideo leverages Apple's Vision technology and ships with those 2 filters out of the box. Below we are going to show you how to use each of them. - -By simply calling the `setVideoFilter(_: VideoFilter)` method on your `Call` object you can apply or remove the provided filter. - -#### Background blurring - -```swift -Button { - call.setVideoFilter(.blurredBackground) -} label: { - Text("Apply blur background filter") -} - -Button { - call.setVideoFilter(nil) -} label: { - Text("Remove background filter") -} -``` - -#### Background set to a static image - -For setting a static image as the participant's background we simply need to load our image and provide it to the filter. Finally we pass the filter to the call similarly on how we did before with the blur filter. - -```swift -Button { - call.setVideoFilter(.imageBackground(CIImage(image: uiImage)!, id: "my-awesome-image-background-filter")) -} label: { - Text("Apply image background filter") -} - -Button { - call.setVideoFilter(nil) -} label: { - Text("Remove background filter") -} -``` - -## Audio Filters - -The StreamVideo SDK also supports audio processing of the local track. This opens up possibilities for [noise cancellation](../../guides/noise-cancellation), voice changing or other audio effects. - -StreamVideo allows any audio filter that conforms to the `AudioFilter` protocol below: - -```swift -public protocol AudioFilter: Sendable { - /// Unique identifier for the audio filter. - var id: String { get } - - /// Initializes the audio filter with specified sample rate and number of channels. - func initialize(sampleRate: Int, channels: Int) - - /// Applies the defined audio effect to the given audio buffer. - func applyEffect(to audioBuffer: inout RTCAudioBuffer) - - /// Releases resources associated with the audio filter. - func release() -} -``` - -In the following example, we will build a simple audio filter that gives the user's voice a robotic touch. - -```swift -final class RobotVoiceFilter: AudioFilter { - - let pitchShift: Float - - init(pitchShift: Float) { - self.pitchShift = pitchShift - } - - // MARK: - AudioFilter - - var id: String { "robot-\(pitchShift)" } - - func applyEffect(to buffer: inout RTCAudioBuffer) { - let frameSize = 256 - let hopSize = 128 - let scaleFactor = Float(frameSize) / Float(hopSize) - - let numFrames = (buffer.frames - frameSize) / hopSize - - for channel in 0..= 0 && shiftedIndex < frameSize && originalIndex >= 0 && originalIndex < buffer.frames { - outputFrame[shiftedIndex] = channelBuffer[originalIndex] - } - } - - // Copy back to the input buffer - for j in 0..= 0 && outputIndex < buffer.frames { - channelBuffer[outputIndex] = outputFrame[j] - } - } - } - } - } -} -``` - -The filter is initalized with pitch shift, which configures how much the channel buffer's indexes should be shifted for the effect. We use a default value of 0.8, but you can configure it depending on how "robotic" the voice should be. - -This is a simple algorithm that just does shifting of the indexes. For a more complex one, you can also use some voice processing library. The important part is that you update the `channelBuffer` with the filtered values. - -Finally, we apply the audio filter on our call, like below: - -```swift -// Get a call object -let call = streamVideo.call(callType: "default", callId: UUID().uuidString) - -// Create our audio filter -let filter = RobotVoiceFilter(pitchShift: 0.8) - -// Apply the audio filter on the call. To deactivate the filter we can simply pass `nil`. -call.setAudioFilter(filter) -``` diff --git a/docusaurus/docs/iOS/06-advanced/06-chat-integration.mdx b/docusaurus/docs/iOS/06-advanced/06-chat-integration.mdx deleted file mode 100644 index ee9f94c0b..000000000 --- a/docusaurus/docs/iOS/06-advanced/06-chat-integration.mdx +++ /dev/null @@ -1,177 +0,0 @@ ---- -title: Chat Integration -description: How to integrate chat & video ---- - -## Introduction - -It's common for calling apps to have chat, as well as the opposite - chat apps to have a calling functionality. Stream's Chat and Video SDKs are perfectly compatible between each other, and can easily be integrated into an app. - -:::tip -You can find an example chat integration with our video product, in our samples [repo](https://github.com/GetStream/stream-video-ios-examples/tree/main/VideoWithChat/VideoWithChat). -::: - -## Adding chat into video - -In this guide you will take a video-based application and add chat functionality with the Stream Chat SDK on top of it. Here is an example of what the end result will look like: - -![Watch the gif](../assets/chat-integration.gif) - -:::info -The starting point for this guide is a functioning video calling application. If you don't have one and want to follow along, feel free to do our [step-by-step tutorial](https://getstream.io/video/docs/ios/tutorials/video-calling/) first. -::: - -### Connecting the Video and Chat Client - -You should have a setup for both the video and chat client. They both require the same API key, as well as the same user token. - -The sample repo contains a simple [`StreamWrapper`](https://github.com/GetStream/stream-video-ios-examples/blob/main/VideoWithChat/VideoWithChat/StreamWrapper.swift), that can help you initialize and connect both clients. - -Here's an example on how to initialize the `StreamWrapper`: - -```swift -let streamWrapper = StreamWrapper( - apiKey: "YOUR_API_KEY", - userCredentials: user, - tokenProvider: { result in - let token = \\ Fetch the token from your backend - result(.success(token)) - } -) -``` - -Note that you can also setup the clients in a different way, without using this wrapper. More information on how to setup the Stream Chat SwiftUI SDK can be found [here](https://getstream.io/chat/docs/sdk/ios/swiftui/getting-started/). - -## Adding Chat to the Call Controls - -The simplest way to add chat to an existing video calling app is to extend the call controls with an additional chat icon. To do this, implement the `makeCallControlsView` in your custom implementation of the `ViewFactory` from the Stream Video SDK (in our case it's called `VideoWithChatViewFactory`, see [here](https://github.com/GetStream/stream-video-ios-examples/blob/main/VideoWithChat/VideoWithChat/Sources/VideoWithChatViewFactory.swift)): - -```swift -class VideoWithChatViewFactory: ViewFactory { - - /* ... Previous code skipped. */ - - // highlight-start - func makeCallControlsView(viewModel: CallViewModel) -> some View { - ChatCallControls(viewModel: viewModel) - } - // highlight-end -} - -``` - -Create a new SwiftUI view called `ChatCallControls` and add the code for the `ToggleChatButton` to the file (for example at the bottom): - -```swift -struct ToggleChatButton: View { - - @ObservedObject var chatHelper: ChatHelper - - var body: some View { - Button { - // highlight-next-line - // 1. Toggle chat window - withAnimation { - chatHelper.chatShown.toggle() - } - } - label: { - // highlight-next-line - // 2. Show button - CallIconView( - icon: Image(systemName: "message"), - size: 50, - iconStyle: chatHelper.chatShown ? .primary : .transparent - ) - // highlight-next-line - // 3. Overlay unread indicator - .overlay( - chatHelper.unreadCount > 0 ? - TopRightView(content: { - UnreadIndicatorView(unreadCount: chatHelper.unreadCount) - }) - : nil - ) - } - } -} -``` - -The code does three interesting things (see the numbered comments): - -1. On tapping the button it toggles the chat window -2. Showing a button that indicates that there is a chat to open -3. It overlays an unread indicator when there's new chat messages - -Here's the (simplified, [see full version](https://github.com/GetStream/stream-video-ios-examples/blob/main/VideoWithChat/VideoWithChat/Sources/ChatCallControls.swift)) implementation of the `ChatCallControls` itself, that handles the display of the chat inside it. - -```swift -struct ChatCallControls: View { - - @ObservedObject var viewModel: CallViewModel - - @StateObject private var chatHelper = ChatHelper() - - public var body: some View { - // highlight-next-line - // 1. Arrange code in VStack - VStack { - HStack { - ToggleChatButton(chatHelper: chatHelper) - - // Unrelated code skipped. Check repository for complete code: - // https://github.com/GetStream/stream-video-ios-examples/blob/main/VideoWithChat/VideoWithChat/Sources/ChatCallControls.swift - } - - // highlight-next-line - // 2. If chat is activated, show the ChatChannelView - if chatHelper.chatShown { - if let channelController = chatHelper.channelController { - ChatChannelView( - viewFactory: ChatViewFactory.shared, - channelController: channelController - ) - .frame(height: UIScreen.main.bounds.height / 3 + 50) - .onAppear { - chatHelper.markAsRead() - } - } else { - Text("Chat not available") - } - } - } - .frame(maxWidth: .infinity) - .frame(height: chatHelper.chatShown ? (UIScreen.main.bounds.height / 3 + 50) + 100 : 100) - /* more modifiers */ - // highlight-next-line - // 3. Listen to changes in call participants and update the UI accordingly - .onReceive(viewModel.$callParticipants, perform: { output in - if viewModel.callParticipants.count > 1 { - chatHelper.update(memberIds: Set(viewModel.callParticipants.map(\.key))) - } - }) - } -} -``` - -The lines that are marked do the following: - -1. The entire code is wrapped in a `VStack` to show content vertically, with chat being slid in from the bottom, once shown. The buttons on the other hand are wrapped in a `HStack`. -2. If `chatHelper.chatShown` is true and a `channelController` can be retrieved, the `ChatChannelView` from the Stream Chat SDK is used to display chat. -3. Subscribing to changes in the `callParticipants` allows to make sure the UI is always up-to-date. - -Note that both the Video and Chat SDK should be setup with the same API key and token, before displaying this view. - -:::tip -Not sure how to do this? Start [here for video](https://getstream.io/video/docs/ios/guides/client-auth/) and [here for chat](https://getstream.io/chat/docs/sdk/ios/swiftui/getting-started/#creating-the-swiftui-context-provider-object). -::: - -That's everything that's needed to add a button in the call controls, that will show a chat during the call. - -### Conclusion - -In this article, we have seen how the two SDKs for chat and video can work together. - -For more examples on how to create enganging video experiences, please check our video [cookbook](https://getstream.io/video/docs/ios/ui-cookbook/overview/) and [UI components](https://getstream.io/video/docs/ios/ui-components/overview/). - -You can find more details about our chat SDK in the [chat docs](https://getstream.io/chat/docs/sdk/ios/swiftui/). \ No newline at end of file diff --git a/docusaurus/docs/iOS/06-advanced/06-events.mdx b/docusaurus/docs/iOS/06-advanced/06-events.mdx deleted file mode 100644 index e017d49d5..000000000 --- a/docusaurus/docs/iOS/06-advanced/06-events.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: Events -description: How to listen to events ---- - -In most cases, you should rely on the `StreamVideo` or `CallViewModel` and its events for building and updating your UI. However for some customizations you'll want to listen to the underlying events that power these objects. - -## Listening to events -Both the call and streamVideo object allow you to subscribe to events. You can listen to a specific event or all of them. This example shows how to listen to all events. - -```swift -Task { - for await event in streamVideo.subscribe() { - print(event) - } -} -``` - -You can also subscribe for a specific call. -```swift -Task { - let call = streamVideo.call("default", "123") - for await event in call.subscribe() { - print(event) - } -} -``` - -:::note -When subscribing to all events either on the client or a call, you will be receiving events of type `VideoEvent`. `VideoEvent` is the discriminator object for all websocket events, you should use this to map event payloads to their own type. - -```swift -Task { - let call = streamVideo.call("default", "123") - for await event in call.subscribe() { - switch event { - case .typeBlockedUserEvent(let blockedUserEvent): - print(blockedUserEvent) - case .typeCallAcceptedEvent(let callAcceptedEvent): - print(callAcceptedEvent) - default: - break - } - } -} -``` -::: - -Or listen to a specific event -```swift -Task { - let call = streamVideo.call("default", "123") - for await event in call.subscribe(for: ConnectedEvent.self) { - print(event) - } -} -``` - -## Events - -The following events are triggered by the client: - - -| Event Name | Description | -| ---------- | ----------- | -|`BlockedUserEvent`|This event is sent to call participants to notify when a user is blocked on a call, clients can use this event to show a notification. If the user is the current user, the client should leave the call screen as well| -|`CallAcceptedEvent`|This event is sent when a user accepts a notification to join a call.| -|`CallBroadcastingStartedEvent`|This event is sent when call broadcasting has started| -|`CallBroadcastingStoppedEvent`|This event is sent when call broadcasting has stopped| -|`CallCreatedEvent`|This event is sent when a call is created. Clients receiving this event should check if the ringing field is set to true and if so, show the call screen| -|`CallEndedEvent`|This event is sent when a call is mark as ended for all its participants. Clients receiving this event should leave the call screen| -|`CallLiveStartedEvent`|This event is sent when a call is started. Clients receiving this event should start the call.| -|`CallMemberAddedEvent`|This event is sent when one or more members are added to a call| -|`CallMemberRemovedEvent`|This event is sent when one or more members are removed from a call| -|`CallMemberUpdatedEvent`|This event is sent when one or more members are updated| -|`CallMemberUpdatedPermissionEvent`|This event is sent when one or more members get its role updated| -|`CallNotificationEvent`|This event is sent to all call members to notify they are getting called| -|`CallReactionEvent`|This event is sent when a reaction is sent in a call, clients should use this to show the reaction in the call screen| -|`CallRecordingStartedEvent`|This event is sent when call recording has started| -|`CallRecordingStoppedEvent`|This event is sent when call recording has stopped| -|`CallRejectedEvent`|This event is sent when a user rejects a notification to join a call.| -|`CallRingEvent`|This event is sent to all call members to notify they are getting called| -|`CallSessionEndedEvent`|This event is sent when a call session ends| -|`CallSessionParticipantJoinedEvent`|This event is sent when a participant joins a call session| -|`CallSessionParticipantLeftEvent`|This event is sent when a participant leaves a call session| -|`CallSessionStartedEvent`|This event is sent when a call session starts| -|`CallUpdatedEvent`|This event is sent when a call is updated, clients should use this update the local state of the call. This event also contains the capabilities by role for the call, clients should update the own_capability for the current.| -|`ConnectedEvent`|This event is sent when the WS connection is established and authenticated, this event contains the full user object as it is stored on the server| -|`ConnectionErrorEvent`|This event is sent when the WS connection fails| -|`CustomVideoEvent`|A custom event, this event is used to send custom events to other participants in the call.| -|`HealthCheckEvent`|-| -|`PermissionRequestEvent`|This event is sent when a user requests access to a feature on a call, clients receiving this event should display a permission request to the user| -|`UnblockedUserEvent`|This event is sent when a user is unblocked on a call, this can be useful to notify the user that they can now join the call again| -|`UpdatedCallPermissionsEvent`|This event is sent to notify about permission changes for a user, clients receiving this event should update their UI accordingly| -|`VideoEvent`|The discriminator object for all websocket events, you should use this to map event payloads to their own type| -|`WSCallEvent`|This is just a placeholder for all call events| -|`WSClientEvent`|This is just a placeholder for all client events| \ No newline at end of file diff --git a/docusaurus/docs/iOS/06-advanced/08-recording.mdx b/docusaurus/docs/iOS/06-advanced/08-recording.mdx deleted file mode 100644 index 61ed0d326..000000000 --- a/docusaurus/docs/iOS/06-advanced/08-recording.mdx +++ /dev/null @@ -1,107 +0,0 @@ ---- -title: Recording -description: Recording ---- - -### Call's recording features - -In some cases, you want to be able to record a meeting and share the recording with the participants later on. The StreamVideo SDK has support for this use-case. - -In order to support this feature, you will need to use the `Call`'s recording features, available after you join a call. - -#### Recording state - -The recording state of the call is available via the `CallViewModel`'s `recordingState` published property. It's an `enum`, which has the following values: -- `noRecording` - default value, there's no recording on the call. -- `requested` - recording was requested by the current user. -- `recording` - recording is in progress. - -If you are not using our `CallViewModel`, you can also listen to this state via the `Call`'s property `recordingState`. - -#### Start a recording - -To start a recording, you need to call the `startRecording` method of the call: - -```swift -func startRecording() { - Task { - try await call.startRecording() - } -} -``` - -This will change the current recording state of the call to `requested`. Since it takes several seconds before the recording is started, it's best to handle this state by presenting a progress indicator to provide a better user experience. - -After the recording is started, the `recordingState` changes to `recording`. - -#### Stop a recording - -To stop a recording, you need to call the `stopRecording` method of the `Call`: - -```swift -func stopRecording() { - Task { - try await call.stopRecording() - } -} -``` - -This will change the current recording state of the call to `noRecording`. - -#### Recording events - -You can listen to the recording events and show visual indications to the users based on these events, by subscribing to the async stream of the `recordingEvents`: - -```swift -func subscribeToRecordingEvents() { - Task { - for await event in call.subscribe() { - switch event { - case .typeCallRecordingStartedEvent(let recordingStartedEvent): - log.debug("received an event \(recordingStartedEvent)") - /* handle recording event */ - case .typeCallRecordingStoppedEvent(let recordingStoppedEvent): - log.debug("received an event \(recordingStoppedEvent)") - /* handle recording event */ - default: - break - } - } - } -} -``` - -#### Search recordings - -You can search for recordings in a video call, using the `Call`'s `listRecordings` method: - -```swift -func loadRecordings() { - Task { - self.recordings = try await call.listRecordings() - } -} -``` - -This will return a list of recordings, that contains information about the filename, URL, as well as the start and end time. You can use the URL to present the recording in a player. Here's an example in SwiftUI: - -```swift -import SwiftUI -import StreamVideo -import AVKit - -struct PlayerView: View { - - let recording: CallRecording - - var body: some View { - Group { - if let url = URL(string: recording.url) { - VideoPlayer(player: AVPlayer(url: url)) - } else { - Text("Video can't be loaded") - } - } - } -} -``` \ No newline at end of file diff --git a/docusaurus/docs/iOS/06-advanced/09-broadcasting.mdx b/docusaurus/docs/iOS/06-advanced/09-broadcasting.mdx deleted file mode 100644 index 1e8e0df11..000000000 --- a/docusaurus/docs/iOS/06-advanced/09-broadcasting.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: Broadcasting -description: Broadcasting to HLS & RTMP ---- - -The StreamVideo SDK has support for broadcasting calls. You can find a tutorial on how to implement broadcasting calls in our [livestreaming tutorial](https://getstream.io/video/sdk/ios/tutorial/livestreaming/). - -### Creating a call - -First, you need to create a call with a user that has the capability to start broadcasting calls (`start-broadcast-call`). - -```swift -let call = streamVideo.call(callType: .default, callId: callId) -Task { - try await call.join() -} -``` - -The code above will create and join the call. If you just want to create it (and not join it), you can use the method `create`: - -```swift -let call = try await call.create(members: members, custom: [:], startsAt: Date(), ring: false) -``` - -### Start HLS - -You can start HLS broadcasting a call, by calling the method `startHLS`: - -```swift -try await call.startHLS() -``` - -After few seconds of setup, the call would be broadcasted, and you will receive an event called `BroadcastingStartedEvent`. Also, the `state` of the call would be updated - the `broadcasting` value would become true. - -You can listen to the broadcasting events on the call by subscribing to its `AsyncStream`: - -```swift -for await event in call.subscribe() { - switch event { - case .typeCallHLSBroadcastingStartedEvent(let broadcastingStartedEvent): - log.debug("received an event \(broadcastingStartedEvent)") - /* handle recording event */ - case .typeCallHLSBroadcastingStoppedEvent(let broadcastingStoppedEvent): - log.debug("received an event \(broadcastingStoppedEvent)") - /* handle recording event */ - default: - break - } -} -``` - -In the `BroadcastingStartedEvent`, you will receive the `hlsPlaylistUrl`, a URL that can be used by other participants to watch the broadcasting. - -### Stop HLS - -When you are done with broadcasting, you should call the method `stopHLS`. - -```swift -try await call.stopHLS() -``` - -This action will also send `BroadcastingStoppedEvent` to all participants who are watching the call. \ No newline at end of file diff --git a/docusaurus/docs/iOS/06-advanced/10-background-modes.mdx b/docusaurus/docs/iOS/06-advanced/10-background-modes.mdx deleted file mode 100644 index 16709c900..000000000 --- a/docusaurus/docs/iOS/06-advanced/10-background-modes.mdx +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Background Modes -description: How to configure and use background modes ---- - -## What Are Background Modes? - -Background modes are services an app offers that require it to execute tasks when it is not active or running. In the case of VoIP (audio and video) calling apps, enabling background modes can cause the app to update, and execute tasks in the background when the user launches another app or transitions to the home screen. An app may require several background capabilities for different tasks and services, such as audio, video, location, fetch, Bluetooth central, and processing. Check out our [CallKit integration guide](./03-callkit-integration.mdx) for more information. - -### How Background Modes Work in Your VoIP App - -In the case of your app, assuming user **A** has audio unmuted, video on, and is in an active call with user **B.** When user **A** suspends the app to go into the background, iOS device capabilities, such as the camera and microphone, will not be accessible. In this case, the system will mute user **A**'s audio, and the picture-in-picture (video) feature will not be available when the app is in the background. The app's inability to access audio and PIP from the background is a default behavior on iOS. To override this default behavior, specify the background modes below so that audio and picture-in-picture become accessible when the app goes to the background. - -1. Click the app's name in the Project Navigator, select your target, and go to the **Signing & Capabilities** tab. -2. Enable these capabilities by selecting the following checkboxes. - -![Configure background modes](../assets/callkit_01.png) - -After enabling these background mode capabilities, unmuted audio will remain unmuted when the app goes into the background. Also, picture-in-picture will be available to the call participant while the app remains in the background. diff --git a/docusaurus/docs/iOS/06-advanced/11-custom-data.mdx b/docusaurus/docs/iOS/06-advanced/11-custom-data.mdx deleted file mode 100644 index 960eff0fe..000000000 --- a/docusaurus/docs/iOS/06-advanced/11-custom-data.mdx +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: Custom Data -description: How can you use custom data in your applications? ---- - -Custom data is additional information that can be added to the default data of Stream. It is a dictionary of key-value pairs that can be attached to users, events, and pretty much almost every domain model in the Stream SDK. - -On iOS, custom data is represented by the following dictionary, `[String: RawJSON]`. The `RawJSON` is an `enum` that can be represented by different types of values. It can be a String, Number, Boolean, Array, Dictionary, or null. In the end, this is to make the dictionary strongly typed so that it is safer and easier to use. The code snippet below shows the simplified implementation of `RawJSON`. - -```swift -indirect enum RawJSON: Codable, Hashable { - case number(Double) - case string(String) - case bool(Bool) - case dictionary([String: RawJSON]) - case array([RawJSON]) -} -``` - -## Adding Custom Data - -Adding extra data can be done through the Server-Side SDKs or through the Client SDKs. In the iOS Stream Video SDK, you can add extra data when creating/updating a user, event, reaction and other models. -As a simple example, let's see how you can add a new email field to the user. - -```swift -let userInfo = User( - id: id, - name: name, - imageURL: imageURL, - customData: ["email": .string("test@test.com")] -) -``` - -## Reading Custom Data - -All of the most important domain models in the SDK have an `customData` property that you can read the additional information added by your app. - -The following code snippet shows how to get an email from a user's custom data. - -```swift -let email = user.customData["email"]?.stringValue ?? "" -print(email) -``` - -:::tip -In order to access the email even more easily, you can extend our models to provide an extra property, in this case, you can add an `email` property to the `User` model like this: - -```swift -extension User { - var email: String? { - customData["email"]?.stringValue - } -} -``` - -::: - -To see how you can get data with different types from custom data, we can pick the example of the ticket information again and see how you can get it from custom data. - -```swift -let ticket = user.customData["ticket"]?.dictionaryValue -let name = ticket?["name"]?.stringValue ?? "" -let price = ticket?["price"]?.doubleValue ?? 0.0 -``` - -As you can see above, each type of value can be easily accessible from an custom data property. The SDK will try to convert the raw type to a strongly typed value and return it if the property exists, and if the type is correct. Below is the list of all values supported: - -- `stringValue: String?` -- `numberValue: Double?` -- `boolValue: Bool?` -- `dictionaryValue: [String: RawJSON]?` -- `arrayValue: [RawJSON]?` -- `stringArrayValue: [String]?` -- `numberArrayValue: [Double]?` -- `boolArrayValue: [Bool]?` diff --git a/docusaurus/docs/iOS/06-advanced/12-interrupt-handling.mdx b/docusaurus/docs/iOS/06-advanced/12-interrupt-handling.mdx deleted file mode 100644 index 04291dcea..000000000 --- a/docusaurus/docs/iOS/06-advanced/12-interrupt-handling.mdx +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Interrupt Handling -description: How to handle interrupts on your calls. ---- - -### Video Calls Interruptions - -During a video call, there can be network issues. For example, the internet connection on the user's device is lost, or the SFU that hosts the call is no longer available. The StreamVideo iOS SDK has a reconnection mechanism that tries to recover from any interruptions during the call. - -#### No Network Connection - -When the web socket connection to the SFU is lost, the SDK tries to reconnect. If the failure is due to lost network connection on the device, that is considered an unrecoverable error and after retrying for 30 seconds, the call will be closed. - -#### Recoverable errors - -When the users connect to an SFU, they receive a token with an expiry date. If the call is long, the token could expire in the meantime. When that happens, the SDK automatically fetches new token and reconnects the user. Usually that happens fast and there are no visible changes to the user's experience. - -If the SFU that hosts the call becomes unavailable, and the user has internet connection, the client SDK tries to recover from this failure. It asks our edge infrastructure for a new server to connect to, and usually reconnects after few seconds. By default, the UI SDKs present a reconnection popup while this process is happening. - -### Reading the reconnection state - -If you are using our `CallViewModel`, you can refer to the `callingState`'s value of `reconnecting` to listen and react to this state. If you are not using our `CallViewModel`, you can read this state via the `Call`'s `reconnecting` variable, which is also `@Published`. - -### Changing the ReconnectionView - -If you are using our SwiftUI SDK, and the default calling experience, in the case of a reconnection, the `ReconnectionView` is shown. You can provide your custom view to be presented instead, by implementing the `makeReconnectionView` in the `ViewFactory`: - -```swift -public func makeReconnectionView(viewModel: CallViewModel) -> some View { - CustomReconnectionView(viewModel: viewModel, viewFactory: self) -} -``` - -Additionally, the `ReconnectionView` is public and you can reuse it in your custom calling use-cases. diff --git a/docusaurus/docs/iOS/06-advanced/14-text-localization.mdx b/docusaurus/docs/iOS/06-advanced/14-text-localization.mdx deleted file mode 100644 index 8d8eff7a5..000000000 --- a/docusaurus/docs/iOS/06-advanced/14-text-localization.mdx +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Localization ---- - -## Introduction - -If your app supports multiple languages, the StreamVideo SDK has support for localizations. For example, you can add more languages, or you can change translations for the existing texts used throughout the SDK. - -## Adding a New Language - -1. If you don't have `strings` or `stringsdict` files in your project, add those new files to `Localizable.strings` and `Localizable.stringsdict`. -2. Next [add new language to the project](https://developer.apple.com/documentation/xcode/adding-support-for-languages-and-regions). -3. Copy the StreamVideoUI localization keys into your `strings` and `stringsdict` files. -4. Set the `localizationProvider` to provide your `Bundle` instead of the one provided by `StreamVideoSwiftUI` SDK (as early as possible in the App life-cylce, for example in the `AppDelegate`): - -```swift -Appearance.localizationProvider = { key, table in - Bundle.main.localizedString(forKey: key, value: nil, table: table) -} -``` - -5. Now, you're ready to implement your `strings` and `stringsdict` files for different languages. - -:::tip -We recommend naming your `strings` and `stringsdict` files: `Localizable.strings` and `Localizable.stringsdict`. -::: - -## Override Existing Languages - -Overriding the existing language works in the same as adding a new language. - -## Resources - -Every string included in StreamVideo can be changed and translated to a different language. All strings used by UI components are in these two files: - -- [`Localizable.strings`](https://github.com/GetStream/stream-video-swift/blob/main/Sources/StreamVideoSwiftUI/Resources/en.lproj/Localizable.strings) -- [`Localizable.stringsdict`](https://github.com/GetStream/stream-video-swift/blob/main/Sources/StreamVideoSwiftUI/Resources/en.lproj/Localizable.stringsdict) diff --git a/docusaurus/docs/iOS/06-advanced/15-sdk-size-impact.mdx b/docusaurus/docs/iOS/06-advanced/15-sdk-size-impact.mdx deleted file mode 100644 index fb845a3ca..000000000 --- a/docusaurus/docs/iOS/06-advanced/15-sdk-size-impact.mdx +++ /dev/null @@ -1,7 +0,0 @@ -# SDK Size Impact - -When developing a mobile app, one crucial performance metric is app size. An app’s size can be difficult to accurately measure with multiple variants and device spreads. Once measured, it’s even more difficult to understand and identify what’s contributing to size bloat. - -We track and update the SDK size on every commit to our `develop` branch. The sizes of all SDKs that are part of our video product are shown with badges, at the top of our GitHub [repo](https://github.com/GetStream/stream-video-swift). - -Therefore, please check our repo to get the up-to date sizes of our video SDKs. diff --git a/docusaurus/docs/iOS/06-advanced/16-migration-from-dolby.mdx b/docusaurus/docs/iOS/06-advanced/16-migration-from-dolby.mdx deleted file mode 100644 index c78088c53..000000000 --- a/docusaurus/docs/iOS/06-advanced/16-migration-from-dolby.mdx +++ /dev/null @@ -1,821 +0,0 @@ ---- -title: Migration Guide from Dolby.io to Stream SDK ---- -import { TokenSnippet } from '../../../shared/_tokenSnippet.jsx'; - -## Summary -This migration guide is designed to assist developers in transitioning their Audio room applications from the Dolby.io Conference SDK to the Stream SDK. Our aim is to provide a comprehensive, step-by-step walkthrough that not only highlights the differences between the two SDKs but also showcases the unique features and enhancements offered by Stream. Whether you are looking to leverage advanced functionalities, improve performance, or simply explore new avenues with Stream, this guide will equip you with all the necessary tools and knowledge for a seamless migration. - -## Introduction -While Dolby.io offers high-quality sound and reliable streaming for audio conferencing, it struggles with adaptability and scalability to keep up with evolving digital communication needs. In contrast, Stream SDK offers a modern, scalable solution with advanced features like enhanced data security and an intuitive API, designed to meet the diverse demands of contemporary audio communication. - -Through this guide, you will: -- Discover the fundamental differences and similarities between Dolby.io and Stream SDKs, particularly in the context of Audio room applications. -- Receive detailed, step-by-step instructions to ensure a smooth and efficient migration process. -- Learn about the additional capabilities and benefits that Stream SDK brings to your audio conferencing solutions. -- Find solutions to common challenges and answers to frequently asked questions during the transition. - -By the end of this guide, you will be equipped with a thorough understanding of how Stream SDK can enhance your audio conferencing applications, providing a clear rationale for its adoption as a superior alternative to Dolby.io in your technology stack. In case you are looking for another video or audio appication type, you can look into our tutorials of [Audio Room](https://getstream.io/video/sdk/ios/tutorial/audio-room/), [Video calling](https://getstream.io/video/sdk/ios/tutorial/video-calling/) and [Livestreaming](https://getstream.io/video/sdk/ios/tutorial/livestreaming/) for help. - -## Prerequisites -Before diving into the migration process, it's essential to establish a clear starting point and outline the capabilities of the existing SwiftUI Audio room application using Dolby.io SDK. The application currently includes the following features: - -- **Join/Leave a Call**: Users can join or leave audio calls, facilitating easy participation in audio conferences. -- **View Participants**: The app displays a list or grid of participants currently in the call, making it easy to see who is involved in the conference. -- **Mute/Unmute Functionality**: Users have the ability to mute and unmute themselves, providing control over their participation in the conversation. -- **Speaking Indicator**: When a participant speaks, their avatar is highlighted, visually indicating who is actively contributing to the conversation. - -As we migrate to Stream SDK, we will not only replicate these existing features but also introduce additional functionalities to enhance the user experience and leverage the advanced capabilities of the Stream SDK. The added features will include: - -1. **Improved Audio Quality**: Leveraging Stream's superior audio processing to provide clearer and more reliable audio communication. -2. **Enhanced User Interface**: A more intuitive and user-friendly interface, taking advantage of SwiftUI's capabilities. -3. **Simpler code**: Implementing an Audio room app with the Stream SDK should be easy and the code should be clear. -4. **Request to speak**: We will implement a simple permission system, where a participants can request to talk and a host can approve or reject their request. - -This guide assumes that you have the following setup and knowledge: - -- A working SwiftUI Audio room application built with Dolby.io SDK (We have built demo that can be found [here](https://github.com/GetStream/audio-room-migration-from-dolby-to-stream)). -- Basic knowledge of audio streaming concepts and audio room functionalities. -- Familiarity with Swift and SwiftUI for iOS app development. -- Access to both Dolby.io and Stream SDKs, along with necessary developer accounts and permissions. - -With these prerequisites in place, you'll be well-prepared to embark on the migration process, transforming your app with enhanced features and capabilities offered by the Stream SDK. - -## Key Differences -Grasping the core differences between Dolby.io SDK and Stream SDK greatly aids in ensuring a smooth migration process. This section emphasizes the significant distinctions that will influence the adaptation of your existing AudioRoom application as you transition to the Stream SDK. - -1. **Latest technologies**: - - **Dolby.io SDK**: This SDK does not natively support Swift concurrency, and its APIs are not fully optimized for SwiftUI. This can result in more complex integration with SwiftUI-based applications and may require additional workarounds to manage asynchronous operations. - - **Stream SDK**: In contrast, Stream SDK is designed with native support for Swift concurrency, offering a more seamless integration with SwiftUI. This results in a more straightforward, efficient coding experience, particularly for developers working with modern Swift development practices. - -2. **Terminology: Conference vs. Call**: - - **Dolby.io SDK**: In Dolby.io, the primary feature of connecting users in an audio session is referred to as a 'Conference'. This terminology is consistent across their documentation and API. - - **Stream SDK**: Stream SDK, on the other hand, uses the term 'Call' to describe similar functionality. This is more than just a semantic difference; it reflects in the API naming conventions and documentation. Being aware of this terminology change is crucial for understanding the Stream SDK's structure and documentation. - -3. **Extensibility and Permission System**: - - **Dolby.io SDK**: While Dolby.io SDK offers a range of features for audio conferencing, it may not provide as much flexibility when it comes to extending the SDK's capabilities, particularly regarding building a custom permission system. - - **Stream SDK**: Stream SDK stands out with its extensibility, especially in implementing advanced features like a permission system. This flexibility allows for more granular control over user roles and permissions, which is particularly beneficial for creating more sophisticated or customized audio room experiences. - -In summary, the Stream SDK offers a more modern and streamlined approach, focusing ease of integration and stability. Its terminology and extensible nature also make it a more versatile choice for building advanced audio and video applications. - -## Step-by-Step Migration -The migration from Dolby.io SDK to Stream SDK involves a series of structured steps designed to ensure a seamless transition of your Audio room application. This section breaks down the migration process into manageable phases, guiding you through each critical aspect of the transition. Follow these steps closely to adapt your existing SwiftUI application to leverage the capabilities of Stream SDK effectively. - -### 5.1 Setup and Initialization -Both SDKs are readily accessible through all major package managers, simplifying their integration into your project. The setup and initialization process for both SDKs involves similar steps, ensuring a smooth transition. You need to make sure that you have updated your `Info.plist` with the required Microphone access key: -- `Privacy - Microphone Usage Description` - "Audio rooms requires microphone access in order to capture and transmit audio - -![Screenshot shows permissions in the .plist file](../assets/permissions.png) - -### 5.2 User Authentication -User authentication is a critical component of both Dolby.io and Stream SDKs. Although both provide demo tokens (from [Dolby.io dasboard](https://dashboard.dolby.io/dashboard/applications/) or the [Audio room tutorial page](https://getstream.io/video/sdk/ios/tutorial/audio-room/#step-3---create--join-a-call)), they require slightly different configurations. Below, we present the configuration steps for each SDK side by side, allowing for an easy comparison and understanding of the differences and similarities. - -Dolby.io SDK utilizes a specific method for user authentication. Follow these steps to configure authentication in your implementation. Here's the code block for setting up user authentication with Dolby.io SDK: - -```swift -let token = "THE_YOU_GOT_TOKEN_FROM_DASHBOARD" -VoxeetSDK.shared.initialize(accessToken: token) { closure, isExpired in - closure(token) -} - -let user = VTParticipantInfo( - externalID: UUID().uuidString, - name: "Obi-Wan Kenobi", - avatarURL: "https://picsum.photos/120" -) - -VoxeetSDK.shared.session.open(info: user) { error in - if let error { - /* Do something with the error */ - } else { - /* Do something with the user */ - } -} -``` - -While Stream's SDK does that with the following code: - -```swift -private var client: StreamVideo -private let apiKey: String = "" // The API key can be found in the Credentials section -private let token: String = "" // The Token can be found in the Credentials section -private let userId: String = "" // The User Id can be found in the Credentials section -private let callId: String = "" // The CallId can be found in the Credentials section - -let user = User( - id: userId, - name: "Obi-Wan Kenobi", // name and imageURL are used in the UI - imageURL: .init(string: "https://picsum.photos/120") -) - - // Initialize Stream Video client -self.client = StreamVideo( - apiKey: apiKey, - user: user, - token: .init(stringLiteral: token) -) -``` - - - -### 5.3 Conference/Call Management -Managing conferences in Dolby.io SDK and calls in Stream SDK involves different approaches, particularly in how the primary object that manages the conference or call is handled and interacted with. - -In the Dolby.io SDK, the management of a conference is centered around the SDK's singleton, which houses a `ConferenceService`. This service is the key to interacting with the currently active conference. Here's a brief overview: - -- **Singleton Pattern**: Dolby.io SDK uses a singleton pattern to manage conferences which handles the Global shared state. Unfortunately, this pattern greatly reduces testability. -- **Conference Service**: The `ConferenceService` within the singleton is responsible for all conference-related operations. -- **Active Conference Interaction**: The `ConferenceService` allows for interaction with the active conference, handling tasks like joining, leaving, or managing the conference. -- **Outdated API**: The API relies on completionHandlers to inform calling points about the result of each call. - -```swift -// Create a new conference -VoxeetSDK.shared - .conference - .create(options: options) { conference in - // Conference was created succesfully - } fail: { - // Conference creation failed - } - -// Join a conference -VoxeetSDK.shared - .conference - .join(conference: conference) { response in - // Succefully joined the conference - } fail: { - // Failed to join the conference - } -``` - -Conversely, the Stream SDK adopts a different approach. It creates Call objects from the StreamVideo client, and these call objects are then used throughout the lifecycle of the call for various operations. - -- **Direct Object Creation**: Stream SDK allows the creation of call objects directly from the StreamVideo client. -- **Lifecycle Management**: The Call object in Stream SDK manages all aspects of the call's lifecycle. -- **Operation Handling**: This object is used for performing operations during the call, including joining, leaving, and managing call features. -- **Swift Concurrency**: Call's API is built with Swift Concurrency in mind, providing full support on all operations using the convenient `try await` syntax. - -```swift - -// Create the call object -self.call = client.call(callType: "audio_room", callId: callId) - -// Join the call -// - Allows you to define the members of this Audio room. -// - Allows you to define custom fields that will be appended on the call object. -try await call.join( - create: true, - options: .init( - members: [ - .init(userId: "john_smith"), - .init(userId: "jane_doe"), - ], - custom: [ - "title": .string("SwiftUI heads"), - "description": .string("Talking about SwiftUI") - ] - ) -) -``` - -For details on joining or creating a call you can visit our docs [here](../../guides/joining-creating-calls). - -### 5.4 Advanced Features -The Stream SDK comes with a built-in permission system, providing a foundational layer for implementing custom user interaction features in your application. This system is particularly useful for managing user roles and permissions dynamically during an audio call. - -- **Built-In Functionality**: The permission system is an integral part of the Stream SDK, eliminating the need for external dependencies or complex custom implementations. -- **Flexibility and Control**: It offers granular control over user roles and actions within a call, enabling a more tailored user experience. - -#### Implementing "Request to Speak" Feature: - -With the Stream SDK's permission system, you can implement a "Request to Speak" feature, where participants can request permission to speak, and the host or moderator can grant or deny this request. This feature enhances the interactivity and orderliness of your Audio room, especially in scenarios with multiple participants. - -- **User Request Handling**: Participants can send a request to speak, which is managed by the SDK's permission system. -- **Moderator Control**: The host or moderator can easily review and respond to these requests, maintaining a smooth flow of conversation. - -Requesting permission to speak is easy. Let's first have a quick look at how the SDK call object exposes this: - -```swift -let response = try await call.request(permissions: [.sendAudio]) -``` - -Permission requests are exposed from `call.state` on the `permissionRequests` published variable. -```swift -if let request = call.state.permissionRequests.first { - // reject it - request.reject() - - // grant it - try await call.grant(request: request) -} -``` - -By integrating these advanced features, your Audio room application not only gains enhanced functionality but also provides a more engaging and controlled environment for users. This makes the Stream SDK a powerful tool for developing sophisticated audio conferencing applications. You can find more details about the permissions in the Stream SDK [here](../../guides/permissions-and-moderation). - -## Sample Code Comparison - -The end result for the app built with Stream SDK is 220 lines (for a fully working Audio room app) -```swift -import StreamVideoSwiftUI -import SwiftUI -import StreamVideo - -@main -struct AudioroomsApp: App { - @State var call: Call - @ObservedObject var state: CallState - @State private var callCreated: Bool = false - - private var client: StreamVideo - private let apiKey: String = "" // The API key can be found in the Credentials section - private let userId: String = "" // The User Id can be found in the Credentials section - private let token: String = "" // The Token can be found in the Credentials section - private let callId: String = "" // The CallId can be found in the Credentials section - - init() { - let user = User( - id: userId, - name: "Obi-Wan Kenobi", // name and imageURL are used in the UI - imageURL: .init(string: "https://picsum.photos/120") - ) - - // Initialize Stream Video client - self.client = StreamVideo( - apiKey: apiKey, - user: user, - token: .init(stringLiteral: token) - ) - - // Initialize the call object - let call = client.call(callType: "audio_room", callId: callId) - - self.call = call - self.state = call.state - } - - var body: some Scene { - WindowGroup { - VStack { - if callCreated { - DescriptionView( - title: call.state.custom["title"]?.stringValue, - description: call.state.custom["description"]?.stringValue, - participants: call.state.participants - ) - ParticipantsView( - participants: call.state.participants - ) - Spacer() - ControlsView(call: call, state: state) - } else { - Text("loading...") - } - }.task { - Task { - guard !callCreated else { return } - try await call.join( - create: true, - options: .init( - members: [ - .init(userId: "john_smith"), - .init(userId: "jane_doe"), - ], - custom: [ - "title": .string("SwiftUI heads"), - "description": .string("talking about SwiftUI") - ] - ) - ) - callCreated = true - } - } - } - } -} - -struct ControlsView: View { - var call: Call - @ObservedObject var state: CallState - - var body: some View { - HStack { - MicButtonView(microphone: call.microphone) - LiveButtonView(call: call, state: state) - } - } -} - -struct DescriptionView: View { - var title: String? - var description: String? - var participants: [CallParticipant] - - var body: some View { - VStack { - VStack { - Text("\(title ?? "")") - .font(.title) - .frame(maxWidth: .infinity, alignment: .leading) - .lineLimit(1) - .padding([.bottom], 8) - - Text("\(description ?? "")") - .font(.body) - .frame(maxWidth: .infinity, alignment: .leading) - .lineLimit(1) - .padding([.bottom], 4) - - Text("\(participants.count) participants") - .font(.caption) - .frame(maxWidth: .infinity, alignment: .leading) - }.padding([.leading, .trailing]) - } - } -} - -struct LiveButtonView: View { - var call: Call - @ObservedObject var state: CallState - - var body: some View { - if state.backstage { - Button { - Task { - try await call.goLive() - } - } label: { - Text("Go Live") - } - .buttonStyle(.borderedProminent).tint(.green) - } else { - Button { - Task { - try await call.stopLive() - } - } label: { - Text("Stop live") - } - .buttonStyle(.borderedProminent).tint(.red) - } - } -} - -struct MicButtonView: View { - @ObservedObject var microphone: MicrophoneManager - - var body: some View { - Button { - Task { - try await microphone.toggle() - } - } label: { - Image(systemName: microphone.status == .enabled ? "mic.circle" : "mic.slash.circle") - .foregroundColor(microphone.status == .enabled ? .red : .primary) - .font(.title) - } - } -} - -struct ParticipantsView: View { - var participants: [CallParticipant] - - var body: some View { - LazyVGrid(columns: [GridItem(.adaptive(minimum: 100))], spacing: 20) { - ForEach(participants) { - ParticipantView(participant: $0) - } - } - } -} - -struct ParticipantView: View { - var participant: CallParticipant - - var body: some View { - VStack{ - ZStack { - Circle() - .fill(participant.isSpeaking ? .green : .white) - .frame(width: 68, height: 68) - AsyncImage( - url: participant.profileImageURL, - content: { image in - image.resizable() - .aspectRatio(contentMode: .fit) - .frame(maxWidth: 64, maxHeight: 64) - .clipShape(Circle()) - }, - placeholder: { - Image(systemName: "person.crop.circle").font(.system(size: 60)) - } - ) - } - Text("\(participant.name)") - } - } -} - -struct PermissionRequestsView: View { - var call: Call - @ObservedObject var state: CallState - - var body: some View { - if let request = state.permissionRequests.first { - HStack { - Text("\(request.user.name) requested to \(request.permission)") - Button { - Task { - try await call.grant(request: request) - } - } label: { - Label("", systemImage: "hand.thumbsup.circle").tint(.green) - } - Button(action: request.reject) { - Label("", systemImage: "hand.thumbsdown.circle.fill").tint(.red) - } - } - } - } -} -``` - -While for the one built with Dolby SDK it's close to 400 lines: - -```swift -import SwiftUI -import VoxeetSDK -import Combine - -@main -struct DolbyAudio roomApp: App { - private static let token: String = "YOUR_DOLBY_TOKEN" - private static let callId: String = "A_CALL_ID" - - @State private var callCreated = false - @StateObject private var viewModel: VoxeetSDKViewModel - - init() { - self._viewModel = .init(wrappedValue: .init(Self.token, callId: Self.callId)) - } - - var body: some Scene { - WindowGroup { - VStack { - DescriptionView( - viewModel: viewModel, - title: viewModel.callId, - description: "Call Description n/a", - participants: viewModel.participants - ) - ParticipantsView( - viewModel: viewModel - ) - Spacer() - ControlsView(viewModel: viewModel) - } - } - } -} - -final class VoxeetSDKViewModel: ObservableObject, VTConferenceDelegate { - - @Published var participants: [VTParticipant] = [] - @Published var isMuted: Bool = false - @Published var speakingParticipants: [VTParticipant] = [] - @Published var hasActiveCall = false { - didSet { - if hasActiveCall { - speakingParticipantsCancellable = Timer - .publish(every: 1, on: .main, in: .default) - .autoconnect() - .receive(on: DispatchQueue.global(qos: .userInteractive)) - .sink { [weak self] _ in self?.updateSpeakingParticipants() } - } else { - speakingParticipantsCancellable?.cancel() - } - } - } - - var localParticipant: VTParticipant? { VoxeetSDK.shared.session.participant } - - private var speakingParticipantsCancellable: AnyCancellable? - let callId: String - - init(_ token: String, callId: String) { - self.callId = callId - VoxeetSDK.shared.initialize(accessToken: token) { [token] closure, isExpired in - closure(token) - } - - VoxeetSDK.shared.notification.push.type = .none - VoxeetSDK.shared.conference.defaultBuiltInSpeaker = true - VoxeetSDK.shared.conference.defaultVideo = false - - VoxeetSDK.shared.conference.delegate = self - } - - @discardableResult - func connectUser(name: String, avatarURL: String? = nil) async throws -> VTParticipantInfo { - try await withUnsafeThrowingContinuation { continuation in - let user = VTParticipantInfo( - externalID: UUID().uuidString, - name: name, - avatarURL: avatarURL - ) - - VoxeetSDK.shared.session.open(info: user) { error in - if let error { - continuation.resume(throwing: error) - } else { - continuation.resume(returning: user) - } - } - } - } - - @discardableResult - func joinCall() async throws -> VTConference { - try await withUnsafeThrowingContinuation { continuation in - let options = VTConferenceOptions() - options.params.dolbyVoice = true - options.alias = callId - - VoxeetSDK.shared - .conference - .create(options: options) { conference in - debugPrint(conference) - VoxeetSDK.shared - .conference - .join(conference: conference) { response in - debugPrint(response) - DispatchQueue.main.async { [weak self] in - self?.isMuted = VoxeetSDK.shared.conference.isMuted() - self?.hasActiveCall = true - } - continuation.resume(returning: conference) - } fail: { continuation.resume(throwing: $0) } - - } fail: { continuation.resume(throwing: $0) } - } - } - - func toggleAudio() { - let newValue = !isMuted - VoxeetSDK.shared.conference.mute(newValue) { [weak self] error in - DispatchQueue.main.async { - if let error { - debugPrint("\(error)") - } else { - self?.isMuted = newValue - } - } - } - } - - func leave() { - VoxeetSDK.shared.conference.leave { error in - if let error { - debugPrint("\(error)") - } else { - DispatchQueue.main.async { [weak self] in - self?.hasActiveCall = false - } - } - } - } - - func statusUpdated(status: VTConferenceStatus) { - - } - - func permissionsUpdated(permissions: [Int]) { - - } - - func participantAdded(participant: VTParticipant) { - updateParticipants() - } - - func participantUpdated(participant: VTParticipant) { - updateParticipants() - } - - func streamAdded(participant: VTParticipant, stream: MediaStream) { - updateParticipants() - } - - func streamUpdated(participant: VTParticipant, stream: MediaStream) { - updateParticipants() - } - - func streamRemoved(participant: VTParticipant, stream: MediaStream) { - updateParticipants() - } - - private func updateParticipants() { - participants = VoxeetSDK.shared.conference - .current? - .participants - .filter({ $0.streams.isEmpty == false }) ?? [] - } - - private func updateSpeakingParticipants() { - let localParticipant = self.localParticipant - let speakingParticipants = participants - .filter { - if $0.id != nil, $0.id == localParticipant?.id { - if !isMuted { - return VoxeetSDK.shared.conference.isSpeaking(participant: $0) - } else { - return false - } - } else { - return VoxeetSDK.shared.conference.isSpeaking(participant: $0) - } - } - - DispatchQueue.main.async { [weak self] in - self?.speakingParticipants = speakingParticipants - } - } -} - -struct DescriptionView: View { - @ObservedObject var viewModel: VoxeetSDKViewModel - var title: String? - var description: String? - var participants: [VTParticipant] - - var body: some View { - VStack { - VStack { - Text("\(title ?? "")") - .font(.title) - .frame(maxWidth: .infinity, alignment: .leading) - .lineLimit(1) - .padding([.bottom], 8) - - if viewModel.hasActiveCall { - Text("\(description ?? "")") - .font(.body) - .frame(maxWidth: .infinity, alignment: .leading) - .lineLimit(1) - .padding([.bottom], 4) - - Text("\(participants.count) participants") - .font(.caption) - .frame(maxWidth: .infinity, alignment: .leading) - } - }.padding([.leading, .trailing]) - } - } -} - -struct ControlsView: View { - @ObservedObject var viewModel: VoxeetSDKViewModel - - var body: some View { - HStack { - MicButtonView(viewModel: viewModel) - LiveButtonView(viewModel: viewModel) - } - } -} - -struct MicButtonView: View { - - @ObservedObject var viewModel: VoxeetSDKViewModel - - var body: some View { - if viewModel.hasActiveCall { - Button { - viewModel.toggleAudio() - } label: { - Image(systemName: viewModel.isMuted ? "mic.slash.circle" : "mic.circle") - .foregroundColor(viewModel.isMuted ? .red : .primary) - .font(.title) - } - } else { - EmptyView() - } - } -} - -struct LiveButtonView: View { - - @ObservedObject var viewModel: VoxeetSDKViewModel - - var body: some View { - if viewModel.hasActiveCall { - Button { - Task { - viewModel.leave() - } - } label: { - Text("Leave") - } - .buttonStyle(.borderedProminent).tint(.red) - } else { - Button { - Task { - do { - try await viewModel.connectUser( - name: "Obi-Wan Kenobi", - avatarURL: "https://picsum.photos/120" - ) - try await viewModel.joinCall() - } catch { - fatalError("\(error)") - } - } - } label: { - Text("Join") - } - } - } -} - -struct ParticipantView: View { - var participant: VTParticipant - let viewModel: VoxeetSDKViewModel - - @State private var isSpeaking = false - - var body: some View { - VStack{ - ZStack { - Circle() - .fill(isSpeaking ? .green : .white) - .frame(width: 68, height: 68) - .overlay( - AsyncImage( - url: .init(string: participant.info.avatarURL ?? ""), - content: { image in - image.resizable() - .aspectRatio(contentMode: .fit) - .frame(maxWidth: 64, maxHeight: 64) - .clipShape(Circle()) - }, - placeholder: { - Image(systemName: "person.crop.circle").font(.system(size: 60)) - } - ) - ) - } - if participant == viewModel.localParticipant { - Text("\(participant.info.name ?? "N/A")(You)") - } else { - Text("\(participant.info.name ?? "N/A")") - } - } - .onReceive(viewModel.$speakingParticipants) { speakingParticipants in - isSpeaking = speakingParticipants.first { $0.id != nil && $0.id == participant.id } != nil - } - } -} - -struct ParticipantsView: View { - @ObservedObject var viewModel: VoxeetSDKViewModel - - var body: some View { - if viewModel.hasActiveCall { - LazyVGrid(columns: [GridItem(.adaptive(minimum: 100))], spacing: 20) { - ForEach(viewModel.participants, id: \.info) { - ParticipantView(participant: $0, viewModel: viewModel) - } - } - } - } -} -``` - -## Support and Resources -For comprehensive guidance on working with the Stream SDK, we recommend exploring the following tutorials provided by Stream. These resources offer in-depth insights into building various types of applications, including Audio room, Livestream, and Video Calling, using the Stream SDK. - -1. **Audio room Tutorial**: - - **Link**: [Stream Audio room Tutorial](https://getstream.io/video/sdk/ios/tutorial/audio-room/) - - **Overview**: This tutorial focuses on building an Audio room application. It covers everything from setup and configuration to implementing key features specific to audio conferencing. - - **Useful For**: Developers looking to build or migrate audio conferencing applications. - -2. **Livestream Tutorial**: - - **Link**: [Stream Livestream Tutorial](https://getstream.io/video/sdk/ios/tutorial/livestreaming/) - - **Overview**: This resource guides you through creating a livestreaming application. It includes details on setting up live video streams and managing interactive features. - - **Useful For**: Those interested in integrating live video streaming functionalities into their applications. - -3. **Video Calling Tutorial**: - - **Link**: [Stream Video Calling Tutorial](https://getstream.io/video/sdk/ios/tutorial/video-calling/) - - **Overview**: This tutorial provides insights into developing a video calling feature using Stream SDK. It details the implementation of video calls, handling call states, and UI integration. - - **Useful For**: Developers aiming to add or enhance video calling capabilities in their apps. - -Leveraging these tutorials will not only assist you in the migration process but also expand your understanding and skills in building diverse applications with the Stream SDK. Each tutorial is designed to address specific use cases and functionalities, making them valuable resources for developers at any skill level. Additionally, for any specific queries or support, the Stream developer community and our support team are available to assist you. - - -## Conclusion - -As we reach the end of this migration guide, it's clear that the Stream SDK stands out as a more feature-rich and user-friendly option compared to Dolby.io SDK, especially for developers working in the modern iOS ecosystem. Stream's commitment to leveraging the latest technologies in Swift and SwiftUI makes it not only a future-proof choice but also ensures a smoother and more efficient development experience. - -Stream SDK's rich features, including a built-in permission system and advanced call management, along with SwiftUI optimization, offer a comprehensive toolkit for sophisticated voice and video applications, enhancing functionality and user engagement. Its use of modern Swift technologies like concurrency streamlines coding, reduces development time, and boosts app performance, making it a highly beneficial choice for developers. - -In summary, migrating to the Stream SDK complements your existing tech stack, seamlessly integrating to enhance your audio conferencing experience. This SDK empowers you to develop more dynamic, scalable, and feature-rich applications with ease, blending effortlessly with both modern and traditional technologies. This transition represents an opportunity to elevate the experience of you Audio room app. - -We hope this guide has been a valuable resource in your migration journey and look forward to seeing the innovative applications you will create with the Stream SDK. diff --git a/docusaurus/docs/iOS/06-advanced/_category_.json b/docusaurus/docs/iOS/06-advanced/_category_.json deleted file mode 100644 index 0e23d1f4f..000000000 --- a/docusaurus/docs/iOS/06-advanced/_category_.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "label": "Advanced Guides" -} diff --git a/docusaurus/docs/iOS/assets/adding-removing-call-buttons.png b/docusaurus/docs/iOS/assets/adding-removing-call-buttons.png deleted file mode 100644 index 18f02a25e..000000000 Binary files a/docusaurus/docs/iOS/assets/adding-removing-call-buttons.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/audio-rooms-add-dependency-step-1.png b/docusaurus/docs/iOS/assets/audio-rooms-add-dependency-step-1.png deleted file mode 100644 index 00123a005..000000000 Binary files a/docusaurus/docs/iOS/assets/audio-rooms-add-dependency-step-1.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/audio-rooms-add-dependency-step-2.png b/docusaurus/docs/iOS/assets/audio-rooms-add-dependency-step-2.png deleted file mode 100644 index af881c688..000000000 Binary files a/docusaurus/docs/iOS/assets/audio-rooms-add-dependency-step-2.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/audio-rooms-create-project-step-1.png b/docusaurus/docs/iOS/assets/audio-rooms-create-project-step-1.png deleted file mode 100644 index 71bc7ae97..000000000 Binary files a/docusaurus/docs/iOS/assets/audio-rooms-create-project-step-1.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/audio-rooms-create-project-step-2.png b/docusaurus/docs/iOS/assets/audio-rooms-create-project-step-2.png deleted file mode 100644 index 4c75e29e6..000000000 Binary files a/docusaurus/docs/iOS/assets/audio-rooms-create-project-step-2.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/audio-rooms-create-project-step-3.png b/docusaurus/docs/iOS/assets/audio-rooms-create-project-step-3.png deleted file mode 100644 index 98a4f58f2..000000000 Binary files a/docusaurus/docs/iOS/assets/audio-rooms-create-project-step-3.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/audio-volume-indicator.png b/docusaurus/docs/iOS/assets/audio-volume-indicator.png deleted file mode 100644 index 812431679..000000000 Binary files a/docusaurus/docs/iOS/assets/audio-volume-indicator.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/audioroom_tutorial/basic.png b/docusaurus/docs/iOS/assets/audioroom_tutorial/basic.png deleted file mode 100644 index e12ac1d07..000000000 Binary files a/docusaurus/docs/iOS/assets/audioroom_tutorial/basic.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/audioroom_tutorial/description.png b/docusaurus/docs/iOS/assets/audioroom_tutorial/description.png deleted file mode 100644 index 738c8c130..000000000 Binary files a/docusaurus/docs/iOS/assets/audioroom_tutorial/description.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/audioroom_tutorial/finish.png b/docusaurus/docs/iOS/assets/audioroom_tutorial/finish.png deleted file mode 100644 index bcd86f5d7..000000000 Binary files a/docusaurus/docs/iOS/assets/audioroom_tutorial/finish.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/audioroom_tutorial/live.png b/docusaurus/docs/iOS/assets/audioroom_tutorial/live.png deleted file mode 100644 index 84169e1dc..000000000 Binary files a/docusaurus/docs/iOS/assets/audioroom_tutorial/live.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/audioroom_tutorial/participants.png b/docusaurus/docs/iOS/assets/audioroom_tutorial/participants.png deleted file mode 100644 index 5ab1c7848..000000000 Binary files a/docusaurus/docs/iOS/assets/audioroom_tutorial/participants.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/audioroom_tutorial/permissions.png b/docusaurus/docs/iOS/assets/audioroom_tutorial/permissions.png deleted file mode 100644 index 8cab8d84b..000000000 Binary files a/docusaurus/docs/iOS/assets/audioroom_tutorial/permissions.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/audioroom_tutorial/split.png b/docusaurus/docs/iOS/assets/audioroom_tutorial/split.png deleted file mode 100644 index 40ff44fa8..000000000 Binary files a/docusaurus/docs/iOS/assets/audioroom_tutorial/split.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/broadcast-extension.png b/docusaurus/docs/iOS/assets/broadcast-extension.png deleted file mode 100644 index 8382a2e7b..000000000 Binary files a/docusaurus/docs/iOS/assets/broadcast-extension.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/call-quality-rating.png b/docusaurus/docs/iOS/assets/call-quality-rating.png deleted file mode 100644 index 268014a54..000000000 Binary files a/docusaurus/docs/iOS/assets/call-quality-rating.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/call_member-grant-joincall.png b/docusaurus/docs/iOS/assets/call_member-grant-joincall.png deleted file mode 100644 index d950f9b30..000000000 Binary files a/docusaurus/docs/iOS/assets/call_member-grant-joincall.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/callkit_01.png b/docusaurus/docs/iOS/assets/callkit_01.png deleted file mode 100644 index 08a072a3f..000000000 Binary files a/docusaurus/docs/iOS/assets/callkit_01.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/callkit_02.png b/docusaurus/docs/iOS/assets/callkit_02.png deleted file mode 100644 index e330dd1bc..000000000 Binary files a/docusaurus/docs/iOS/assets/callkit_02.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/chat-integration.gif b/docusaurus/docs/iOS/assets/chat-integration.gif deleted file mode 100644 index 1827eb8c4..000000000 Binary files a/docusaurus/docs/iOS/assets/chat-integration.gif and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/connection-unstable.png b/docusaurus/docs/iOS/assets/connection-unstable.png deleted file mode 100644 index 4b141e1bb..000000000 Binary files a/docusaurus/docs/iOS/assets/connection-unstable.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/cookbook-call-controls.jpg b/docusaurus/docs/iOS/assets/cookbook-call-controls.jpg deleted file mode 100644 index ad903b0e2..000000000 Binary files a/docusaurus/docs/iOS/assets/cookbook-call-controls.jpg and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/cookbook_01.png b/docusaurus/docs/iOS/assets/cookbook_01.png deleted file mode 100644 index 6d562b8fb..000000000 Binary files a/docusaurus/docs/iOS/assets/cookbook_01.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/cookbook_1.png b/docusaurus/docs/iOS/assets/cookbook_1.png deleted file mode 100644 index 61505ec8a..000000000 Binary files a/docusaurus/docs/iOS/assets/cookbook_1.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/cookbook_gradient.png b/docusaurus/docs/iOS/assets/cookbook_gradient.png deleted file mode 100644 index 8dd9bd7b0..000000000 Binary files a/docusaurus/docs/iOS/assets/cookbook_gradient.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/cookbook_incoming.png b/docusaurus/docs/iOS/assets/cookbook_incoming.png deleted file mode 100644 index a90f13355..000000000 Binary files a/docusaurus/docs/iOS/assets/cookbook_incoming.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/cookbook_label.png b/docusaurus/docs/iOS/assets/cookbook_label.png deleted file mode 100644 index 13d429268..000000000 Binary files a/docusaurus/docs/iOS/assets/cookbook_label.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/custom-video-layout.png b/docusaurus/docs/iOS/assets/custom-video-layout.png deleted file mode 100644 index 4177b379c..000000000 Binary files a/docusaurus/docs/iOS/assets/custom-video-layout.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/developer-console-teamid-keyid-location.png b/docusaurus/docs/iOS/assets/developer-console-teamid-keyid-location.png deleted file mode 100644 index adca2271f..000000000 Binary files a/docusaurus/docs/iOS/assets/developer-console-teamid-keyid-location.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/incoming-call.png b/docusaurus/docs/iOS/assets/incoming-call.png deleted file mode 100644 index 6c51a3cf7..000000000 Binary files a/docusaurus/docs/iOS/assets/incoming-call.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/livestream-live-label.png b/docusaurus/docs/iOS/assets/livestream-live-label.png deleted file mode 100644 index 828e55b8a..000000000 Binary files a/docusaurus/docs/iOS/assets/livestream-live-label.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/livestream-player.png b/docusaurus/docs/iOS/assets/livestream-player.png deleted file mode 100644 index 89ab3c08b..000000000 Binary files a/docusaurus/docs/iOS/assets/livestream-player.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/lobby-preview.png b/docusaurus/docs/iOS/assets/lobby-preview.png deleted file mode 100644 index b33e28908..000000000 Binary files a/docusaurus/docs/iOS/assets/lobby-preview.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/manual-video-quality-preview.png b/docusaurus/docs/iOS/assets/manual-video-quality-preview.png deleted file mode 100644 index 7d77208e3..000000000 Binary files a/docusaurus/docs/iOS/assets/manual-video-quality-preview.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/network-quality.png b/docusaurus/docs/iOS/assets/network-quality.png deleted file mode 100644 index e4ae86f16..000000000 Binary files a/docusaurus/docs/iOS/assets/network-quality.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/new_project.png b/docusaurus/docs/iOS/assets/new_project.png deleted file mode 100644 index 7ec9da47f..000000000 Binary files a/docusaurus/docs/iOS/assets/new_project.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/no-video-fallback-avatar.png b/docusaurus/docs/iOS/assets/no-video-fallback-avatar.png deleted file mode 100644 index e4168bb9b..000000000 Binary files a/docusaurus/docs/iOS/assets/no-video-fallback-avatar.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/permission-requests.png b/docusaurus/docs/iOS/assets/permission-requests.png deleted file mode 100644 index ffd2b323a..000000000 Binary files a/docusaurus/docs/iOS/assets/permission-requests.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/permissions.png b/docusaurus/docs/iOS/assets/permissions.png deleted file mode 100644 index fe120b60c..000000000 Binary files a/docusaurus/docs/iOS/assets/permissions.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/push-notifications-dashboard-menu.png b/docusaurus/docs/iOS/assets/push-notifications-dashboard-menu.png deleted file mode 100644 index 1a8204276..000000000 Binary files a/docusaurus/docs/iOS/assets/push-notifications-dashboard-menu.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/reactions.png b/docusaurus/docs/iOS/assets/reactions.png deleted file mode 100644 index 7053a340b..000000000 Binary files a/docusaurus/docs/iOS/assets/reactions.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/regular-push-configuration-example.png b/docusaurus/docs/iOS/assets/regular-push-configuration-example.png deleted file mode 100644 index aa7463016..000000000 Binary files a/docusaurus/docs/iOS/assets/regular-push-configuration-example.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/removing-label-and-indicators.png b/docusaurus/docs/iOS/assets/removing-label-and-indicators.png deleted file mode 100644 index dbcd403fb..000000000 Binary files a/docusaurus/docs/iOS/assets/removing-label-and-indicators.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/replacing-call-controls.png b/docusaurus/docs/iOS/assets/replacing-call-controls.png deleted file mode 100644 index 8ec1192de..000000000 Binary files a/docusaurus/docs/iOS/assets/replacing-call-controls.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/screensharing-dashboard.png b/docusaurus/docs/iOS/assets/screensharing-dashboard.png deleted file mode 100644 index 36ce4d42b..000000000 Binary files a/docusaurus/docs/iOS/assets/screensharing-dashboard.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/speaking-while-muted.png b/docusaurus/docs/iOS/assets/speaking-while-muted.png deleted file mode 100644 index ec4058898..000000000 Binary files a/docusaurus/docs/iOS/assets/speaking-while-muted.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/spm.png b/docusaurus/docs/iOS/assets/spm.png deleted file mode 100644 index b82153edd..000000000 Binary files a/docusaurus/docs/iOS/assets/spm.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/spm_select.png b/docusaurus/docs/iOS/assets/spm_select.png deleted file mode 100644 index a09856c2f..000000000 Binary files a/docusaurus/docs/iOS/assets/spm_select.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/stream_filter.jpg b/docusaurus/docs/iOS/assets/stream_filter.jpg deleted file mode 100644 index 18ee35745..000000000 Binary files a/docusaurus/docs/iOS/assets/stream_filter.jpg and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/tutorial-livestream.png b/docusaurus/docs/iOS/assets/tutorial-livestream.png deleted file mode 100644 index b04ba089b..000000000 Binary files a/docusaurus/docs/iOS/assets/tutorial-livestream.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/user-revoke-joincall.png b/docusaurus/docs/iOS/assets/user-revoke-joincall.png deleted file mode 100644 index 1aee4570d..000000000 Binary files a/docusaurus/docs/iOS/assets/user-revoke-joincall.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/video-calling-preview-01.png b/docusaurus/docs/iOS/assets/video-calling-preview-01.png deleted file mode 100644 index 2c10a0bac..000000000 Binary files a/docusaurus/docs/iOS/assets/video-calling-preview-01.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/video-calling-preview-02.png b/docusaurus/docs/iOS/assets/video-calling-preview-02.png deleted file mode 100644 index aeda9b33b..000000000 Binary files a/docusaurus/docs/iOS/assets/video-calling-preview-02.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/video-rendering-preview-01.png b/docusaurus/docs/iOS/assets/video-rendering-preview-01.png deleted file mode 100644 index 0f8464324..000000000 Binary files a/docusaurus/docs/iOS/assets/video-rendering-preview-01.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/video-rendering-preview-02.png b/docusaurus/docs/iOS/assets/video-rendering-preview-02.png deleted file mode 100644 index 9fb1923c9..000000000 Binary files a/docusaurus/docs/iOS/assets/video-rendering-preview-02.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/video_tutorial/permissions.png b/docusaurus/docs/iOS/assets/video_tutorial/permissions.png deleted file mode 100644 index 9f6149612..000000000 Binary files a/docusaurus/docs/iOS/assets/video_tutorial/permissions.png and /dev/null differ diff --git a/docusaurus/docs/iOS/assets/voip-push-configuration-example.png b/docusaurus/docs/iOS/assets/voip-push-configuration-example.png deleted file mode 100644 index 0544731c9..000000000 Binary files a/docusaurus/docs/iOS/assets/voip-push-configuration-example.png and /dev/null differ diff --git a/docusaurus/sidebars-ios.js b/docusaurus/sidebars-ios.js deleted file mode 100644 index d0275ca76..000000000 --- a/docusaurus/sidebars-ios.js +++ /dev/null @@ -1,54 +0,0 @@ -module.exports = { - mySidebar: [ - { - type: "category", - label: "Setup", - items: [ - { - type: "autogenerated", - dirName: "01-basics", - }, - ], - }, - { - type: "category", - label: "Core Concepts", - items: [ - { - type: "autogenerated", - dirName: "03-guides", - }, - ], - }, - { - type: "category", - label: "UI Components", - items: [ - { - type: "autogenerated", - dirName: "04-ui-components", - }, - ], - }, - { - type: "category", - label: "UI Cookbook", - items: [ - { - type: "autogenerated", - dirName: "05-ui-cookbook", - }, - ], - }, - { - type: "category", - label: "Advanced Guides", - items: [ - { - type: "autogenerated", - dirName: "06-advanced", - }, - ], - }, - ], -}; \ No newline at end of file diff --git a/fastlane/Fastfile b/fastlane/Fastfile index a85822165..5b14922a4 100644 --- a/fastlane/Fastfile +++ b/fastlane/Fastfile @@ -681,7 +681,7 @@ lane :sources_matrix do uikit: ['Sources', 'StreamVideoUIKitTests', xcode_project], swiftui_sample_apps: ['Sources', 'DemoApp', xcode_project], uikit_sample_apps: ['Sources', 'DemoAppUIKit', xcode_project], - documentation_tests: ['Sources', 'DocumentationTests', 'docusaurus', xcode_project], + documentation_tests: ['Sources', 'DocumentationTests', xcode_project], size: ['Sources', xcode_project], ruby: ['fastlane'] }