From c06d74b210f96986078cccc02f3aff754db40662 Mon Sep 17 00:00:00 2001 From: <> Date: Mon, 21 Oct 2024 14:35:59 +0000 Subject: [PATCH] Deployed fd497ae with MkDocs version: 1.6.1 --- .nojekyll | 0 404.html | 2101 ++++++ Basics/Choosing-A-Network.html | 2389 ++++++ Basics/Getting-Started.html | 2249 ++++++ Basics/Introduction-To-Chat-Rooms.html | 2265 ++++++ Basics/Licensing.html | 2144 ++++++ Basics/Other-Integrations.html | 2210 ++++++ Basics/Quick-Start-DR2.html | 2252 ++++++ Basics/Quick-Start-Forge-Remastered.html | 2237 ++++++ Basics/Quick-Start-Fusion.html | 2256 ++++++ Basics/Quick-Start-Mirror.html | 2259 ++++++ Basics/Quick-Start-Photon-Bolt.html | 2177 ++++++ Basics/Quick-Start-Photon.html | 2244 ++++++ Basics/Quick-Start-PureP2P.html | 2259 ++++++ Basics/Quick-Start-Steamworks.Net-P2P.html | 2166 ++++++ Basics/Quick-Start-TNet3.html | 2272 ++++++ Basics/Quick-Start-UNet-HLAPI.html | 2261 ++++++ Basics/Quick-Start-Unity-NFGO.html | 2241 ++++++ Platforms/Android.html | 2147 ++++++ Platforms/Linux.html | 2145 ++++++ Platforms/MacOS.html | 2155 ++++++ Platforms/Magic Leap.html | 2146 ++++++ Platforms/Oculus OVR.html | 2150 ++++++ Platforms/Windows Desktop.html | 2187 ++++++ Platforms/Windows UWP & Hololens.html | 2145 ++++++ Platforms/iOS.html | 2155 ++++++ Reference/Audio/BaseMicrophoneSubscriber.html | 2201 ++++++ Reference/Audio/IMicrophoneCapture.html | 2272 ++++++ Reference/Audio/IMicrophoneSubscriber.html | 2201 ++++++ Reference/Audio/VoiceSettings.html | 2307 ++++++ Reference/Components/Dissonance-Comms.html | 2326 ++++++ .../Components/Voice-Broadcast-Trigger.html | 2427 ++++++ .../Voice-Proximity-Broadcast-Trigger.html | 2323 ++++++ .../Voice-Proximity-Receipt-Trigger.html | 2247 ++++++ .../Components/Voice-Receipt-Trigger.html | 2198 ++++++ Reference/Networking/Network-Protocol.html | 2482 ++++++ Reference/Other/PlayerChannel.html | 2257 ++++++ Reference/Other/PlayerChannels.html | 2223 ++++++ Reference/Other/RemoteChannel.html | 2261 ++++++ Reference/Other/RoomChannel.html | 2257 ++++++ Reference/Other/RoomChannels.html | 2223 ++++++ Reference/Other/Rooms.html | 2219 ++++++ Reference/Other/TextChat.html | 2208 ++++++ Reference/Other/VoicePlayerState.html | 2463 ++++++ Reference/Other/VoiceSettings.html | 2312 ++++++ Tutorials/Access-Control-Tokens.html | 2211 ++++++ Tutorials/Acoustic-Echo-Cancellation.html | 2320 ++++++ Tutorials/Audio-Mixing.html | 2212 ++++++ Tutorials/Channel-Priority.html | 2217 ++++++ Tutorials/Channel-Volume.html | 2210 ++++++ Tutorials/Collider-Chat-Room.html | 2246 ++++++ Tutorials/Custom-Microphone-Capture.html | 2239 ++++++ Tutorials/Custom-Networking.html | 2652 +++++++ Tutorials/Custom-Position-Tracking.html | 2298 ++++++ Tutorials/Direct-Player-Transmit.html | 2202 ++++++ Tutorials/Directly-Using-Channels.html | 2223 ++++++ Tutorials/Global-Chat-Room.html | 2144 ++++++ Tutorials/Playback-Prefab.html | 2248 ++++++ Tutorials/Player-State.html | 2199 ++++++ Tutorials/Position-Tracking-For-Bolt.html | 2262 ++++++ Tutorials/Position-Tracking.html | 2254 ++++++ Tutorials/Proximity-Chat.html | 2212 ++++++ Tutorials/Push-to-Talk.html | 2193 ++++++ Tutorials/Script-Controlled-Speech.html | 2273 ++++++ Tutorials/Spatializer-Plugin.html | 2137 ++++++ Tutorials/Team-Chat-Rooms.html | 2202 ++++++ Tutorials/Text-Chat.html | 2230 ++++++ Tutorials/UsingIMicrophoneSubscriber.html | 2210 ++++++ assets/images/favicon.png | Bin 0 -> 1870 bytes assets/javascripts/bundle.83f73b43.min.js | 16 + assets/javascripts/bundle.83f73b43.min.js.map | 7 + assets/javascripts/lunr/min/lunr.ar.min.js | 1 + assets/javascripts/lunr/min/lunr.da.min.js | 18 + assets/javascripts/lunr/min/lunr.de.min.js | 18 + assets/javascripts/lunr/min/lunr.du.min.js | 18 + assets/javascripts/lunr/min/lunr.el.min.js | 1 + assets/javascripts/lunr/min/lunr.es.min.js | 18 + assets/javascripts/lunr/min/lunr.fi.min.js | 18 + assets/javascripts/lunr/min/lunr.fr.min.js | 18 + assets/javascripts/lunr/min/lunr.he.min.js | 1 + assets/javascripts/lunr/min/lunr.hi.min.js | 1 + assets/javascripts/lunr/min/lunr.hu.min.js | 18 + assets/javascripts/lunr/min/lunr.hy.min.js | 1 + assets/javascripts/lunr/min/lunr.it.min.js | 18 + assets/javascripts/lunr/min/lunr.ja.min.js | 1 + assets/javascripts/lunr/min/lunr.jp.min.js | 1 + assets/javascripts/lunr/min/lunr.kn.min.js | 1 + assets/javascripts/lunr/min/lunr.ko.min.js | 1 + assets/javascripts/lunr/min/lunr.multi.min.js | 1 + assets/javascripts/lunr/min/lunr.nl.min.js | 18 + assets/javascripts/lunr/min/lunr.no.min.js | 18 + assets/javascripts/lunr/min/lunr.pt.min.js | 18 + assets/javascripts/lunr/min/lunr.ro.min.js | 18 + assets/javascripts/lunr/min/lunr.ru.min.js | 18 + assets/javascripts/lunr/min/lunr.sa.min.js | 1 + .../lunr/min/lunr.stemmer.support.min.js | 1 + assets/javascripts/lunr/min/lunr.sv.min.js | 18 + assets/javascripts/lunr/min/lunr.ta.min.js | 1 + assets/javascripts/lunr/min/lunr.te.min.js | 1 + assets/javascripts/lunr/min/lunr.th.min.js | 1 + assets/javascripts/lunr/min/lunr.tr.min.js | 18 + assets/javascripts/lunr/min/lunr.vi.min.js | 1 + assets/javascripts/lunr/min/lunr.zh.min.js | 1 + assets/javascripts/lunr/tinyseg.js | 206 + assets/javascripts/lunr/wordcut.js | 6708 +++++++++++++++++ .../workers/search.6ce7567c.min.js | 42 + .../workers/search.6ce7567c.min.js.map | 7 + assets/stylesheets/main.0253249f.min.css | 1 + assets/stylesheets/main.0253249f.min.css.map | 1 + assets/stylesheets/palette.06af60db.min.css | 1 + .../stylesheets/palette.06af60db.min.css.map | 1 + css/header_tweaks.css | 11 + css/pygment_tweaks.css | 9 + dissonance.png | Bin 0 -> 281 bytes images/AecSettings.png | Bin 0 -> 12677 bytes images/AecStatus.png | Bin 0 -> 17566 bytes images/AudioMixer_WithAecFilter.png | Bin 0 -> 21071 bytes images/AudioMixing_Distortion.png | Bin 0 -> 27391 bytes images/AudioMixing_Ducking.png | Bin 0 -> 37365 bytes images/AudioPluginDissonance.so.png | Bin 0 -> 38876 bytes images/AudioSource_OutputHighlighted.png | Bin 0 -> 20903 bytes images/AudioSource_SpatializeHighlighted.png | Bin 0 -> 20935 bytes images/Bolt-State.png | Bin 0 -> 11034 bytes images/BoxCollider.png | Bin 0 -> 7498 bytes images/BroadcastToSelf_Inspector.png | Bin 0 -> 31087 bytes images/DissonanceComms_Inspector.png | Bin 0 -> 24583 bytes images/DissonanceComms_Tokens.png | Bin 0 -> 24172 bytes images/GlobalChatRoom_Inspector.png | Bin 0 -> 46776 bytes images/HLAPI_QoS_Channels.png | Bin 0 -> 57542 bytes images/OVR Local Avatar.png | Bin 0 -> 35052 bytes images/PlaybackPrefab_OutputHighlighted.png | Bin 0 -> 31074 bytes images/PlayerPrefab_PositionalAudio.png | Bin 0 -> 28500 bytes images/PlayerProximityChat_Inspector.png | Bin 0 -> 64996 bytes .../ProximityBroadcastTrigger_Inspector.png | Bin 0 -> 68386 bytes images/ProximityReceiptTrigger_Inspector.png | Bin 0 -> 63456 bytes images/RoomConfiguration_Lobby.png | Bin 0 -> 15287 bytes images/RoomTriggerVolume_Inspector.png | Bin 0 -> 18992 bytes images/TeamChat_Inspector.png | Bin 0 -> 61972 bytes images/VAD_States.png | Bin 0 -> 10664 bytes images/VadDebugUI.png | Bin 0 -> 25009 bytes .../VoiceBroadcastTrigger-AmplitudeFaders.png | Bin 0 -> 18702 bytes images/VoiceBroadcastTrigger_Default.png | Bin 0 -> 28406 bytes images/VoiceBroadcastTrigger_DifferentPTT.png | Bin 0 -> 20683 bytes images/VoiceBroadcastTrigger_LobbyRoom.png | Bin 0 -> 28356 bytes images/VoiceBroadcastTrigger_Overview.png | Bin 0 -> 32333 bytes images/VoiceBroadcastTrigger_PTT.png | Bin 0 -> 37104 bytes images/VoiceBroadcastTrigger_Player.png | Bin 0 -> 31125 bytes images/VoiceBroadcastTrigger_Positional.png | Bin 0 -> 31454 bytes images/VoiceBroadcastTrigger_Priority.png | Bin 0 -> 35744 bytes ...eBroadcastTrigger_Section_AccessTokens.png | Bin 0 -> 18770 bytes ...roadcastTrigger_Section_ActivationMode.png | Bin 0 -> 22100 bytes ...oadcastTrigger_Section_AmplitudeFaders.png | Bin 0 -> 18702 bytes ...oadcastTrigger_Section_ChannelMetadata.png | Bin 0 -> 25051 bytes ...ceBroadcastTrigger_Section_ChannelType.png | Bin 0 -> 17451 bytes ...VoiceBroadcastTrigger_SectionsOverview.png | Bin 0 -> 12089 bytes images/VoiceBroadcastTrigger_Targets.png | Bin 0 -> 38751 bytes images/VoiceReceiptTrigger_Default.png | Bin 0 -> 15617 bytes images/VoiceReceiptTrigger_LobbyRoom.png | Bin 0 -> 15315 bytes images/VoiceReceiptTrigger_Overview.png | Bin 0 -> 15020 bytes images/VoiceReceiptTrigger_Rooms.png | Bin 0 -> 17968 bytes images/VoiceReceiptTrigger_Tokens.png | Bin 0 -> 23091 bytes images/VoiceSettings.webp | Bin 0 -> 53780 bytes images/VoiceSettings2.webp | Bin 0 -> 47822 bytes images/VoiceSettings_Editor.png | Bin 0 -> 138102 bytes index.html | 2219 ++++++ javascript/redirector.js | 12 + search/search_index.json | 1 + sitemap.xml | 271 + sitemap.xml.gz | Bin 0 -> 890 bytes 169 files changed, 160013 insertions(+) create mode 100644 .nojekyll create mode 100644 404.html create mode 100644 Basics/Choosing-A-Network.html create mode 100644 Basics/Getting-Started.html create mode 100644 Basics/Introduction-To-Chat-Rooms.html create mode 100644 Basics/Licensing.html create mode 100644 Basics/Other-Integrations.html create mode 100644 Basics/Quick-Start-DR2.html create mode 100644 Basics/Quick-Start-Forge-Remastered.html create mode 100644 Basics/Quick-Start-Fusion.html create mode 100644 Basics/Quick-Start-Mirror.html create mode 100644 Basics/Quick-Start-Photon-Bolt.html create mode 100644 Basics/Quick-Start-Photon.html create mode 100644 Basics/Quick-Start-PureP2P.html create mode 100644 Basics/Quick-Start-Steamworks.Net-P2P.html create mode 100644 Basics/Quick-Start-TNet3.html create mode 100644 Basics/Quick-Start-UNet-HLAPI.html create mode 100644 Basics/Quick-Start-Unity-NFGO.html create mode 100644 Platforms/Android.html create mode 100644 Platforms/Linux.html create mode 100644 Platforms/MacOS.html create mode 100644 Platforms/Magic Leap.html create mode 100644 Platforms/Oculus OVR.html create mode 100644 Platforms/Windows Desktop.html create mode 100644 Platforms/Windows UWP & Hololens.html create mode 100644 Platforms/iOS.html create mode 100644 Reference/Audio/BaseMicrophoneSubscriber.html create mode 100644 Reference/Audio/IMicrophoneCapture.html create mode 100644 Reference/Audio/IMicrophoneSubscriber.html create mode 100644 Reference/Audio/VoiceSettings.html create mode 100644 Reference/Components/Dissonance-Comms.html create mode 100644 Reference/Components/Voice-Broadcast-Trigger.html create mode 100644 Reference/Components/Voice-Proximity-Broadcast-Trigger.html create mode 100644 Reference/Components/Voice-Proximity-Receipt-Trigger.html create mode 100644 Reference/Components/Voice-Receipt-Trigger.html create mode 100644 Reference/Networking/Network-Protocol.html create mode 100644 Reference/Other/PlayerChannel.html create mode 100644 Reference/Other/PlayerChannels.html create mode 100644 Reference/Other/RemoteChannel.html create mode 100644 Reference/Other/RoomChannel.html create mode 100644 Reference/Other/RoomChannels.html create mode 100644 Reference/Other/Rooms.html create mode 100644 Reference/Other/TextChat.html create mode 100644 Reference/Other/VoicePlayerState.html create mode 100644 Reference/Other/VoiceSettings.html create mode 100644 Tutorials/Access-Control-Tokens.html create mode 100644 Tutorials/Acoustic-Echo-Cancellation.html create mode 100644 Tutorials/Audio-Mixing.html create mode 100644 Tutorials/Channel-Priority.html create mode 100644 Tutorials/Channel-Volume.html create mode 100644 Tutorials/Collider-Chat-Room.html create mode 100644 Tutorials/Custom-Microphone-Capture.html create mode 100644 Tutorials/Custom-Networking.html create mode 100644 Tutorials/Custom-Position-Tracking.html create mode 100644 Tutorials/Direct-Player-Transmit.html create mode 100644 Tutorials/Directly-Using-Channels.html create mode 100644 Tutorials/Global-Chat-Room.html create mode 100644 Tutorials/Playback-Prefab.html create mode 100644 Tutorials/Player-State.html create mode 100644 Tutorials/Position-Tracking-For-Bolt.html create mode 100644 Tutorials/Position-Tracking.html create mode 100644 Tutorials/Proximity-Chat.html create mode 100644 Tutorials/Push-to-Talk.html create mode 100644 Tutorials/Script-Controlled-Speech.html create mode 100644 Tutorials/Spatializer-Plugin.html create mode 100644 Tutorials/Team-Chat-Rooms.html create mode 100644 Tutorials/Text-Chat.html create mode 100644 Tutorials/UsingIMicrophoneSubscriber.html create mode 100644 assets/images/favicon.png create mode 100644 assets/javascripts/bundle.83f73b43.min.js create mode 100644 assets/javascripts/bundle.83f73b43.min.js.map create mode 100644 assets/javascripts/lunr/min/lunr.ar.min.js create mode 100644 assets/javascripts/lunr/min/lunr.da.min.js create mode 100644 assets/javascripts/lunr/min/lunr.de.min.js create mode 100644 assets/javascripts/lunr/min/lunr.du.min.js create mode 100644 assets/javascripts/lunr/min/lunr.el.min.js create mode 100644 assets/javascripts/lunr/min/lunr.es.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.he.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hu.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hy.min.js create mode 100644 assets/javascripts/lunr/min/lunr.it.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ja.min.js create mode 100644 assets/javascripts/lunr/min/lunr.jp.min.js create mode 100644 assets/javascripts/lunr/min/lunr.kn.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ko.min.js create mode 100644 assets/javascripts/lunr/min/lunr.multi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.nl.min.js create mode 100644 assets/javascripts/lunr/min/lunr.no.min.js create mode 100644 assets/javascripts/lunr/min/lunr.pt.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ro.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ru.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sa.min.js create mode 100644 assets/javascripts/lunr/min/lunr.stemmer.support.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sv.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ta.min.js create mode 100644 assets/javascripts/lunr/min/lunr.te.min.js create mode 100644 assets/javascripts/lunr/min/lunr.th.min.js create mode 100644 assets/javascripts/lunr/min/lunr.tr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.vi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.zh.min.js create mode 100644 assets/javascripts/lunr/tinyseg.js create mode 100644 assets/javascripts/lunr/wordcut.js create mode 100644 assets/javascripts/workers/search.6ce7567c.min.js create mode 100644 assets/javascripts/workers/search.6ce7567c.min.js.map create mode 100644 assets/stylesheets/main.0253249f.min.css create mode 100644 assets/stylesheets/main.0253249f.min.css.map create mode 100644 assets/stylesheets/palette.06af60db.min.css create mode 100644 assets/stylesheets/palette.06af60db.min.css.map create mode 100644 css/header_tweaks.css create mode 100644 css/pygment_tweaks.css create mode 100644 dissonance.png create mode 100644 images/AecSettings.png create mode 100644 images/AecStatus.png create mode 100644 images/AudioMixer_WithAecFilter.png create mode 100644 images/AudioMixing_Distortion.png create mode 100644 images/AudioMixing_Ducking.png create mode 100644 images/AudioPluginDissonance.so.png create mode 100644 images/AudioSource_OutputHighlighted.png create mode 100644 images/AudioSource_SpatializeHighlighted.png create mode 100644 images/Bolt-State.png create mode 100644 images/BoxCollider.png create mode 100644 images/BroadcastToSelf_Inspector.png create mode 100644 images/DissonanceComms_Inspector.png create mode 100644 images/DissonanceComms_Tokens.png create mode 100644 images/GlobalChatRoom_Inspector.png create mode 100644 images/HLAPI_QoS_Channels.png create mode 100644 images/OVR Local Avatar.png create mode 100644 images/PlaybackPrefab_OutputHighlighted.png create mode 100644 images/PlayerPrefab_PositionalAudio.png create mode 100644 images/PlayerProximityChat_Inspector.png create mode 100644 images/ProximityBroadcastTrigger_Inspector.png create mode 100644 images/ProximityReceiptTrigger_Inspector.png create mode 100644 images/RoomConfiguration_Lobby.png create mode 100644 images/RoomTriggerVolume_Inspector.png create mode 100644 images/TeamChat_Inspector.png create mode 100644 images/VAD_States.png create mode 100644 images/VadDebugUI.png create mode 100644 images/VoiceBroadcastTrigger-AmplitudeFaders.png create mode 100644 images/VoiceBroadcastTrigger_Default.png create mode 100644 images/VoiceBroadcastTrigger_DifferentPTT.png create mode 100644 images/VoiceBroadcastTrigger_LobbyRoom.png create mode 100644 images/VoiceBroadcastTrigger_Overview.png create mode 100644 images/VoiceBroadcastTrigger_PTT.png create mode 100644 images/VoiceBroadcastTrigger_Player.png create mode 100644 images/VoiceBroadcastTrigger_Positional.png create mode 100644 images/VoiceBroadcastTrigger_Priority.png create mode 100644 images/VoiceBroadcastTrigger_Section_AccessTokens.png create mode 100644 images/VoiceBroadcastTrigger_Section_ActivationMode.png create mode 100644 images/VoiceBroadcastTrigger_Section_AmplitudeFaders.png create mode 100644 images/VoiceBroadcastTrigger_Section_ChannelMetadata.png create mode 100644 images/VoiceBroadcastTrigger_Section_ChannelType.png create mode 100644 images/VoiceBroadcastTrigger_SectionsOverview.png create mode 100644 images/VoiceBroadcastTrigger_Targets.png create mode 100644 images/VoiceReceiptTrigger_Default.png create mode 100644 images/VoiceReceiptTrigger_LobbyRoom.png create mode 100644 images/VoiceReceiptTrigger_Overview.png create mode 100644 images/VoiceReceiptTrigger_Rooms.png create mode 100644 images/VoiceReceiptTrigger_Tokens.png create mode 100644 images/VoiceSettings.webp create mode 100644 images/VoiceSettings2.webp create mode 100644 images/VoiceSettings_Editor.png create mode 100644 index.html create mode 100644 javascript/redirector.js create mode 100644 search/search_index.json create mode 100644 sitemap.xml create mode 100644 sitemap.xml.gz diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..7ee5004 --- /dev/null +++ b/404.html @@ -0,0 +1,2101 @@ + + + +
+ + + + + + + + + + + + + + +The core Dissonance package does not include any network - instead Dissonance relies on integrations with other network systems to send and receive data. This gives you a lot of flexibility in choosing how you want voice data to be sent over the network. If none of the existing integrations are suitable for you you can also write a custom network integration.
+Dissonance has support for 10 network systems. All of these packages can be downloaded from the asset store for free and receive support as part of Dissonance.
+There is also a community developed and maintained package for Fish Networking, available from GitHub.
+If you already have a network system set up in your application then simply sending voice through that system is the easiest option. If there is an integration package listed above for your networking system it is recommended to use that.
+However, if there is no available integration package then there are two options. The first option is to build your own custom network integration. Dissonance includes base classes which can be extended to create a new network integration relatively easily - all that is required is writing the code to send packets, receive packets and inform Dissonance about session events (leave/join/connect/disconnect etc). This requires that your networking system supports Unreliable & Unordered packets (e.g. UDP), TCP is not suitable for high quality voice chat.
+The second option is to establish another network session just for voice, using one of the existing integrations. Any integration can be used for this.
+If you do not have any network system already set up in your application then you can choose any supported network integration.
+Mirror is a community built replacement for UNet. If you are just starting out with Unity networking, this is our recommendation.
+Fish Networking is a free open source, easy to use, feature rich networking library for Unity. Dissonance has support for Fish networking through a community developed integration package, available here.
+Netcode for GameObjects is the new multiplayer solution developed by Unity.
+Forge Remastered is a free networking system available on the asset store.
+Dark Rift 2 is a free networking system available on the asset store.
+TNet3 is a networking and serialization system available on the asset store.
+Photon Unity Networking 2 is the upgrade to the very popular Photon Unity Networking asset. It is a free (up to 20 CCU) networking system available on the asset store.
+Photon Fusion is a new network package from the developer of the very popular Photon Unity Networking asset.
+WebRTC Video Chat is a p2p networking, voice and video chat application. This integration uses just the networking part of the asset to carry Dissonance voice traffic - it does not integrate in a way that allows WebRTC Voice Chat
and Dissonance Voice Chat
to understand each other. This can be used to quickly set up a fully p2p chat session requiring only a very lightweight session server (which carries no voice traffic).
UNet is the deprecated Unity networking system. It is not recommended to use this for new applications.
+ + + + + + + + + + + + + +In this tutorial you will create a new project, import Dissonance and change some settings required for Dissonance to work properly.
+Import the Dissonance asset into the project. This will install two folders into your project: Assets/Plugins/Dissonance
contains the main source code of Dissonance, Assets/Dissonance
will contain any integration packages which you install.
When you import a new version of Dissonance a window will pop up with a list of available integrations, you can launch this window again by navigating to Windows > Dissonance > Download Integrations
.
To use Dissonance you must install at a network backend integration - without this Dissonance cannot send anything over the network! Refer to these docs for help on choosing which one to use. Each integration package includes a demo scene, you should run this demo scene once you have installed the package to verify that Dissonance is properly installed and working in your project.
+You may also wish to use some of our other integrations. Refer to these docs for a list of what's available.
+Multiplayer games need to keep running (and processing network packets) even when the game window does not have focus. To do this navigate to Edit -> Project Settings -> Player
and enable Run In Background.
Some platforms have special setup requirements, make sure to read the documentation for the platforms you want to work with:
+That's all you need to get a project set up and ready for Dissonance. Next, follow the appropriate Quick Start tutorial for the network system you plan to use:
+By default when a player speaks no one one will hear them - before players can communicate you need to set up where to send voice to on the speaking end and where to receive voice from on the listening end. Where to send to is controlled by a "Voice Broadcast Trigger" component and where to receive from is controlled by a "Voice Receipt Trigger" component.
+The "Voice Broadcast Trigger" does not only control who sends to where it also controls when voice is transmitted to the given target. This is referred to as "Activation" and is divided into two further section: does the user want to speak and is the user allowed to speak.
+The "Activation Mode" setting on the "Voice Broadcast Trigger" determines how the user indicates if they want to speak, this can be set to: "None", "Voice Activation" and "Push To Talk" (see the Voice Broadcast Trigger reference documentation for further details).
+The "Trigger Activation" setting is the setting for if the user is allowed to speak, an associated trigger volume can enable and disable the broadcast as the player moves in and out of the volume. This can be used to create areas in the scene the player needs to stand inside to be heard (e.g. proximity chat).
+The broadcast trigger component supports three types of target: Room, Player and Self. The setting for this highlighted in the image above.
+When the target of a broadcaster is set to "Room" then the local voice will be sent to the given room. Other players who have subscribed to the same room will hear what is said. If a player is both sending and receiving from the same room they will not hear themselves speaking.
+When the target of a broadcaster is set to "Player" the the local voice will be sent only to the player specified by the "Recipient Player Name" field. The receiving player will automatically receive this without setting up a "Voice Receipt Trigger".
+When the target of a bradocaster is set to "Self" the broadcaster will look for a "Dissonance Player" component attached to the same game object and will send the local voice to the player represented by that component. This is equivalent to the player mode. The receiving player will automatically receive this without setting up a "Voice Receipt Trigger".
+If the sending target is "Player" or "Self" then the receiving player automatically hears anything transmitted to them. However this is not the case for rooms, receiving players need to subscribe to rooms they wish to listen to, this is controlled by the "Voice Receipt Trigger". When the trigger component is activated voice will be received from the given room.
+This system of broadcasters and receivers is very flexible and allows for a variety of different setups. This documentation includes some specific examples but if you have a specific design in mind which is not covered here feel free to raise an issue or discuss it with the community.
+ + + + + + + + + + + + + +Dissonance uses some open source libraries to provide audio preprocessing, postprocessing, encoding and decoding. The distribution requirements for the projects used are all very simple - you must include copies of the license files in distributions of your project.
+ + + + + + + + + + + + + + +Dissonance has optional integrations with some non-networking assets to add/improve certain features.
+SALSA Lip Sync provides real-time lip synchronisation. The SalsaDissonanceLink
integration connects the Dissonance audio system to the lip sync system to provide real-time lip synchronisation for other speakers in the VoIP session.
For a download link and more information, see the full documentation on the SALSA docs.
+FMOD is a powerful alternative audio system for Unity. The FMOD Playback integration package outputs Dissonance audio into the FMOD audio system. This allows you to mix Dissonance audio in the FMOD mixer and to completely disable the Unity audio system.
+If you completely disable the Unity audio system you must also use the FMOD Recording package.
+FMOD is a powerful alternative audio system for Unity. The FMOD Recording integration package provides higher quality and lower latency audio to Dissonance through FMOD.
+Using this integration does not require that you are using FMOD for audio playback. You can install FMOD just for the higher quality audio recording and continue to use the normal Unity audio systems.
+++This Quick Start guide is for those of you integrating Dissonance into a game with Dark Rift 2.
+
This tutorial will guide you through the steps required to get a basic Dissonance setup working in your project. By the end of this tutorial, you will having working voice comms with all users talking in a global chat room.
+Before beginning this tutorial, please refer to the installation guide to learn how to install Dissonance into your project.
+A demo scene for this tutorial can be found in the Dissonance/Integrations/DarkRift2/Demo
folder. Note that to use the demo scene you must install the server plugins (see section 1a).
++Dissonance runs mostly from a single game object, which should be placed somewhere near the root of your scene. This game object contains the main "Dissonance Comms" behaviour, together with the Dark Rift networking script.
+
To place the default Dissonance game object into your scene, drag and drop the DissonanceSetup
prefab from the Dissonance/Integrations/DarkRift2
folder into your scene. This should create a game object with two scripts attached: "Dissonance Comms" and "Dark Rift 2 Comms Network".
Dark Rift has a system of server side plugins to process packets, Dissonance includes two plugins. For the demo scene there is a DissonanceDemoPlugin.dll
, this is a very basic plugin to synchronise the positions of characters in the demo scene. There is also the DissonanceServerPlugin.dll
which runs the Dissonance server logic. The precompiled plugin DLL files are included in the package, simply drop them into the plugins folder on your dark rift server. Sometimes these plugins can confuse Unity because they are not Unity plugins but Unity might try to load them anyway - if you have an error which mentions ... ambiguous between the following methods or properties
then you should remove the server plugins from with the Unity project to prevent Unity from trying to load them.
If you need to modify the code of the plugins contact admin@placeholder-software.co.uk
with your invoice number to request the source code.
You now have a functional Dissonance comms system, but you are not yet transmitting anything.
+Before you can speak to anyone, you need to add a "Voice Broadcast Trigger" script to your scene. This script can be placed anywhere, but for this tutorial, you should simply add it to the DissonanceSetup game object you created in step 1.
+The "Voice Broadcast Trigger" controls when the user's microphone is being transmitted to other players, and to whom the user is talking. There are many configuration options on this script to provide more advanced control of under what situations we should be transmitting and who to, but for this tutorial simply leave the settings at default.
+To set up the broadcast trigger, change the following two settings: +1. Transmit on Voice Activation. This means Dissonance will transmit whenever it detects that the user is speaking. +2. Transmit to the 'Global' chat room.
+Now you are talking into the 'Global' room automatically whenever you speak. However, you still can't hear anyone speaking. This is because you are not listening to the 'Global' room and so you are not receiving any of these transmissions.
+To listen to the 'Global' room, add a "Voice Receipt Trigger" to the scene. Like the "Voice Broadcast Trigger", this script can be placed anywhere, but for this tutorial you should simply add it to the DissonanceSetup game object.
+Again, leave this on the default configuration, which should have trigger activation disabled and be listening to the 'Global' chat room.
+Congratulations, you have now added voice comms to your game! What to do next?
+ + + + + + + + + + + + + + +This tutorial will guide you through the steps required to get a basic Dissonance setup working in your project. By the end of this tutorial, you will having working voice comms with all users talking in a global chat room.
+Before beginning this tutorial, please refer to the installation guide to learn how to install Dissonance into your project.
+A demo scene for this tutorial can be found in the Dissonance/Integrations/ForgeNetworkingRemastered/Demo
folder. Please make sure to read the include readme file before trying the demo scene.
++Dissonance runs mostly from a single game object, which should be placed somewhere near the root of your scene. This game object contains the main "Dissonance Comms" behaviour, together with the Forge networking script.
+
To place the default Dissonance game object into your scene, drag and drop the DissonanceSetup
prefab from the Dissonance/Integrations/ForgeNetworkingRemastered
folder into your scene. This should create a game object with two scripts attached: "Dissonance Comms" and "Forge Remastered Comms Network".
You now have a functional Dissonance comms system, but you are not yet transmitting anything.
+Before you can speak to anyone, you need to add a "Voice Broadcast Trigger" script to our scene. This script can be placed anywhere, but for this tutorial, you should simply add it to the DissonanceSetup game object you created in step 1.
+The "Voice Broadcast Trigger" controls when the user's microphone is being transmitted to other players, and to whom the user is talking. There are many configuration options on this script to provide more advanced control of under what situations we should be transmitting and who to, but for this tutorial simply leave the settings at default.
+To set up the broadcast trigger, change the following two settings: +1. Transmit on Voice Activation. This means Dissonance will transmit whenever it detects that the user is speaking. +2. Transmit to the 'Global' chat room.
+Now you are talking into the 'Global' room automatically whenever you speak. However, you still can't hear anyone speaking. This is because you are not listening to the 'Global' room and so you are not receiving any of these transmissions.
+To listen to the 'Global' room, add a "Voice Receipt Trigger" to the scene. Like the "Voice Broadcast Trigger", this script can be placed anywhere, but for this tutorial you should simply add it to the DissonanceSetup game object.
+Again, leave this on the default configuration, which should have trigger activation disabled and be listening to the 'Global' chat room.
+Congratulations, you have now added voice comms to your game! What to do next?
+ + + + + + + + + + + + + + +++This Quick Start guide is for those of you integrating Dissonance into a game with Photon Fusion.
+
This tutorial will guide you through the steps required to get a basic Dissonance setup working in your project. By the end of this tutorial, you will having working voice comms with all users talking in a global chat room.
+Before beginning this tutorial, please refer to the installation guide to learn how to install Dissonance into your project.
+A demo scene for this tutorial can be found in the Dissonance/Integrations/PhotonFusion/Demo
folder.
Photon Fusion Multi Peer
+Dissonance does not support Fusion Multi Peer Mode.
+Photon Fusion supports multi peer mode which allows multiple clients to run in one Unity editor instance, this is great for rapid testing. However, Dissonance uses some resources that fundamentally cannot be shared (e.g. the microphone) and does not support this mode.
+++Dissonance runs mostly from a single game object, which should be created as a child of your Photon Fusion "Network Runner". This object contains the main "Dissonance Comms" behaviour, together with the Photon Fusion networking script.
+
To place the default Dissonance object into your scene, drag and drop the DissonanceSetup
prefab from the Dissonance/Integrations/PhotonFusion
folder into your "Network Runner" GameObject.
Once you have instantiated the DissonanceSetup
prefab, you should have an object with two scripts attached: DissonanceComms
and FusionCommsNetwork
.
You now have a functional Dissonance comms system, but you are not yet transmitting anything.
+Before you can speak to anyone, you need to add a "Voice Broadcast Trigger" script to our scene. This script can be placed anywhere, but for this tutorial, you should simply add it to the DissonanceSetup game object you created in step 1.
+The "Voice Broadcast Trigger" controls when the user's microphone is being transmitted to other players, and to whom the user is talking. There are many configuration options on this script to provide more advanced control of under what situations we should be transmitting and who to, but for this tutorial simply leave the settings at default.
+To set up the broadcast trigger, change the following two settings: +1. Transmit on Voice Activation. This means Dissonance will transmit whenever it detects that the user is speaking. +2. Transmit to the 'Global' chat room.
+Now you are talking into the 'Global' room automatically whenever you speak. However, you still can't hear anyone speaking. This is because you are not listening to the 'Global' room and so you are not receiving any of these transmissions.
+To listen to the 'Global' room, add a "Voice Receipt Trigger" to the scene. Like the "Voice Broadcast Trigger", this script can be placed anywhere, but for this tutorial you should simply add it to the DissonanceSetup game object.
+Again, leave this on the default configuration, which should have trigger activation disabled and be listening to the 'Global' chat room.
+Congratulations, you have now added voice comms to your game! What to do next?
+ + + + + + + + + + + + + + +++This Quick Start guide is for those of you integrating Dissonance into a game with the Mirror Networking. You must use a network backend which supports unreliable networking such as Ignorance.
+
This tutorial will guide you through the steps required to get a basic Dissonance setup working in your project. By the end of this tutorial, you will having working voice comms with all users talking in a global chat room.
+Before beginning this tutorial, please refer to the installation guide to learn how to install Dissonance into your project.
+A demo scene for this tutorial can be found in the Dissonance/Integrations/MirrorIgnorance/Demo
folder.
++Dissonance runs mostly from a single game object, which should be placed somewhere near the root of your scene. This object contains the main "Dissonance Comms" behaviour, together with the Mirror networking script.
+
To place the default Dissonance object into your scene, drag and drop the DissonanceSetup
prefab from the Dissonance/Integrations/MirrorIgnorance
folder into your scene.
Once you have instantiated the DissonanceSetup
prefab, you should have an object with two scripts attached: "Dissonance Comms" and MirrorIgnoranceCommsNetwork
.
In this configuration Dissonance sends it's network packets through Mirror - this means you need a Mirror session setup for Dissonance to use.
+To create a high level network session add a Network Manager
to your scene, this is a Mirror component which will handle setting up your network. If you need a basic UI for test purposes also add a Network Manager HUD
to your scene, this is another Mirror component which shows a simple UI for creating and joining sessions.
You now have a functional Dissonance comms system, but you are not yet transmitting anything.
+Before you can speak to anyone, you need to add a "Voice Broadcast Trigger" script to our scene. This script can be placed anywhere, but for this tutorial, you should simply add it to the DissonanceSetup game object you created in step 1.
+The "Voice Broadcast Trigger" controls when the user's microphone is being transmitted to other players, and to whom the user is talking. There are many configuration options on this script to provide more advanced control of under what situations we should be transmitting and who to, but for this tutorial simply leave the settings at default.
+To set up the broadcast trigger, change the following two settings: +1. Transmit on Voice Activation. This means Dissonance will transmit whenever it detects that the user is speaking. +2. Transmit to the 'Global' chat room.
+Now you are talking into the 'Global' room automatically whenever you speak. However, you still can't hear anyone speaking. This is because you are not listening to the 'Global' room and so you are not receiving any of these transmissions.
+To listen to the 'Global' room, add a "Voice Receipt Trigger" to the scene. Like the "Voice Broadcast Trigger", this script can be placed anywhere, but for this tutorial you should simply add it to the DissonanceSetup game object.
+Again, leave this on the default configuration, which should have trigger activation disabled and be listening to the 'Global' chat room.
+Congratulations, you have now added voice comms to your game! What to do next?
+ + + + + + + + + + + + + + +++This Quick Start guide is for those of you integrating Dissonance into a game with the Photon BOLT networking asset
+
This tutorial will guide you through the steps required to get a basic Dissonance setup working in your project. By the end of this tutorial, you will having working voice comms with all users talking in a global chat room.
+Before beginning this tutorial, please refer to the installation guide to learn how to install Dissonance into your project.
+A demo scene for this tutorial can be found in the Dissonance/Integrations/PhotonBolt/Demo
folder.
Photon BOLT requires defining the packet types it can send and receive in the Unity editor. Add two new Events (both with no properties):
+For the Demo scene to correctly synchronise player positions you will also need to add a new "State", details of this are in the README in the demo folder.
+++Dissonance runs mostly from a single game object, which should be placed somewhere near the root of your scene. This object contains the main "Dissonance Comms" behaviour, together with the Photon networking script.
+
To place the default Dissonance object into your scene, drag and drop the DissonanceSetup
prefab from the Dissonance/Integrations/PhotonBolt
folder into your scene.
Once you have instantiated the DissonanceSetup
prefab, you should have an object with two scripts attached: "Dissonance Comms" and Bolt Comms Network
.
++The Photon BOLT integration will automatically route Dissonance traffic through an established BOLT session.
+
You now have a functional Dissonance comms system, but you are not yet transmitting anything.
+Before you can speak to anyone, you need to add a "Voice Broadcast Trigger" script to our scene. This script can be placed anywhere, but for this tutorial, you should simply add it to the DissonanceSetup game object you created in step 1.
+The "Voice Broadcast Trigger" controls when the user's microphone is being transmitted to other players, and to whom the user is talking. There are many configuration options on this script to provide more advanced control of under what situations we should be transmitting and who to, but for this tutorial simply leave the settings at default.
+To set up the broadcast trigger, change the following two settings: +1. Transmit on Voice Activation. This means Dissonance will transmit whenever it detects that the user is speaking. +2. Transmit to the 'Global' chat room.
+Now you are talking into the 'Global' room automatically whenever you speak. However, you still can't hear anyone speaking. This is because you are not listening to the 'Global' room and so you are not receiving any of these transmissions.
+To listen to the 'Global' room, add a "Voice Receipt Trigger" to the scene. Like the "Voice Broadcast Trigger", this script can be placed anywhere, but for this tutorial you should simply add it to the DissonanceSetup game object.
+Again, leave this on the default configuration, which should have trigger activation disabled and be listening to the 'Global' chat room.
+Congratulations, you have now added voice comms to your game! What to do next?
+Note that setting up positional audio for bolt requires some extra steps, see this article for more detail:
+ + + + + + + + + + + + + + +++This Quick Start guide is for those of you integrating Dissonance into a game with Photon Unity Networking.
+
This tutorial will guide you through the steps required to get a basic Dissonance setup working in your project. By the end of this tutorial, you will having working voice comms with all users talking in a global chat room.
+Before beginning this tutorial, please refer to the installation guide to learn how to install Dissonance into your project.
+A demo scene for this tutorial can be found in the Dissonance/Integrations/Photon/Demo
folder.
++Dissonance runs mostly from a single game object, which should be placed somewhere near the root of your scene. This object contains the main "Dissonance Comms" behaviour, together with the Photon networking script.
+
To place the default Dissonance object into your scene, drag and drop the DissonanceSetup
prefab from the Dissonance/Integrations/Photon
folder into your scene.
Once you have instantiated the DissonanceSetup
prefab, you should have an object with two scripts attached: "Dissonance Comms" and PhotonCommsNetwork
.
++The Photon integration will automatically route Dissonance traffic through the Photon cloud network.
+
You now have a functional Dissonance comms system, but you are not yet transmitting anything.
+Before you can speak to anyone, you need to add a "Voice Broadcast Trigger" script to our scene. This script can be placed anywhere, but for this tutorial, you should simply add it to the DissonanceSetup game object you created in step 1.
+The "Voice Broadcast Trigger" controls when the user's microphone is being transmitted to other players, and to whom the user is talking. There are many configuration options on this script to provide more advanced control of under what situations we should be transmitting and who to, but for this tutorial simply leave the settings at default.
+To set up the broadcast trigger, change the following two settings: +1. Transmit on Voice Activation. This means Dissonance will transmit whenever it detects that the user is speaking. +2. Transmit to the 'Global' chat room.
+Now you are talking into the 'Global' room automatically whenever you speak. However, you still can't hear anyone speaking. This is because you are not listening to the 'Global' room and so you are not receiving any of these transmissions.
+To listen to the 'Global' room, add a "Voice Receipt Trigger" to the scene. Like the "Voice Broadcast Trigger", this script can be placed anywhere, but for this tutorial you should simply add it to the DissonanceSetup game object.
+Again, leave this on the default configuration, which should have trigger activation disabled and be listening to the 'Global' chat room.
+Congratulations, you have now added voice comms to your game! What to do next?
+ + + + + + + + + + + + + + +++This Quick Start guide is for those of you integrating Dissonance into a game with the WebRTC Network asset.
+This integration requires Dissonance 6.2.5 or greater.
+
This tutorial will guide you through the steps required to get a basic Dissonance setup working in your project. By the end of this tutorial, you will having working voice comms with all users talking in a global chat room.
+Before beginning this tutorial, please refer to the installation guide to learn how to install Dissonance into your project.
+A demo scene for this tutorial can be found in the Assets/Dissonance/Integrations/PureP2P/Demo
folder.
++Dissonance runs mostly from a single game object, which should be placed somewhere near the root of your scene. This object contains the main "Dissonance Comms" behaviour, together with the PureP2P networking script.
+
To place the default Dissonance object into your scene, drag and drop the DissonanceSetup
prefab from the Dissonance/Integrations/PureP2P
folder into your scene.
Once you have instantiated the DissonanceSetup
prefab, you should have an object with two scripts attached: Dissonance Comms
and Pure P2P Comms Network
.
Dissonance internally manages the WebRTC network session, automatically hosting a session and connecting to other peers as they join. From your script you simply need to call InitializeAsServer
or InitializeAsClient
on the PureP2PCommsNetwork
component and supply the same session ID to both calls. If the peer leaves the session you will need to start a new server with a new session ID and connect all the clients again.
You now have a functional Dissonance comms system, but you are not yet transmitting anything.
+Before you can speak to anyone, you need to add a "Voice Broadcast Trigger" script to our scene. This script can be placed anywhere, but for this tutorial, you should simply add it to the DissonanceSetup game object you created in step 1.
+The "Voice Broadcast Trigger" controls when the user's microphone is being transmitted to other players, and to whom the user is talking. There are many configuration options on this script to provide more advanced control of under what situations we should be transmitting and who to, but for this tutorial simply leave the settings at default.
+To set up the broadcast trigger, change the following two settings: +1. Transmit on Voice Activation. This means Dissonance will transmit whenever it detects that the user is speaking. +2. Transmit to the 'Global' chat room.
+Now you are talking into the 'Global' room automatically whenever you speak. However, you still can't hear anyone speaking. This is because you are not listening to the 'Global' room and so you are not receiving any of these transmissions.
+To listen to the 'Global' room, add a "Voice Receipt Trigger" to the scene. Like the "Voice Broadcast Trigger", this script can be placed anywhere, but for this tutorial you should simply add it to the DissonanceSetup game object.
+Again, leave this on the default configuration, which should have trigger activation disabled and be listening to the 'Global' chat room.
+Congratulations, you have now added voice comms to your game! What to do next?
+ + + + + + + + + + + + + + +++This Quick Start guide is for those of you integrating Dissonance into a game with the Steamworks.NET P2P API
+
This tutorial will guide you through the steps required to get a basic Dissonance setup working in your project. By the end of this tutorial, you will having working voice comms with all users talking in a global chat room.
+Before beginning this tutorial, please refer to the installation guide to learn how to install Dissonance into your project.
+A demo scene for this tutorial can be found in the Dissonance/Integrations/SteamworksP2P/Demo
folder.
++Dissonance runs mostly from a single game object, which should be placed somewhere near the root of your scene. This object contains the main "Dissonance Comms" behaviour, together with the Steamworks P2P networking script.
+
To place the default Dissonance object into your scene, drag and drop the DissonanceSetup
prefab from the Dissonance/Integrations/SteamworksP2P
folder into your scene.
Once you have instantiated the DissonanceSetup
prefab, you should have an object with two scripts attached: "Dissonance Comms" and SteamworksP2PCommsNetwork
.
Dissonance does not manage your steamworks session, instead it uses whatever session you have already setup. This gives you maximum control over how you want the network session to be configured. Refer to the Steamworks Networking Documentation for details on how to setup a session. You can see example code in the SteamworksDemoUi
component in the Assets/Dissonance/Integrations/SteamworksP2P/Demo
folder.
Once you have a Steamworks session running you need to inform Dissonance about the state of the session when it changes. When you have a session running you need to start Dissonance, call one of InitializeAsDedicatedServer
, InitializeAsServer
or InitializeAsClient
on the SteamworksP2PCommsNetwork
component. The server is the central control point of the session, if it leaves the game you must stop Dissonance and pick a new server. When a player joins the session you must call the PeerConnected
method. When a player leaves the session you must call the PeerDisconnected
method.
You now have a functional Dissonance comms system, but you are not yet transmitting anything.
+Before you can speak to anyone, you need to add a "Voice Broadcast Trigger" script to our scene. This script can be placed anywhere, but for this tutorial, you should simply add it to the DissonanceSetup game object you created in step 1.
+The "Voice Broadcast Trigger" controls when the user's microphone is being transmitted to other players, and to whom the user is talking. There are many configuration options on this script to provide more advanced control of under what situations we should be transmitting and who to, but for this tutorial simply leave the settings at default.
+To set up the broadcast trigger, change the following two settings: +1. Transmit on Voice Activation. This means Dissonance will transmit whenever it detects that the user is speaking. +2. Transmit to the 'Global' chat room.
+Now you are talking into the 'Global' room automatically whenever you speak. However, you still can't hear anyone speaking. This is because you are not listening to the 'Global' room and so you are not receiving any of these transmissions.
+To listen to the 'Global' room, add a "Voice Receipt Trigger" to the scene. Like the "Voice Broadcast Trigger", this script can be placed anywhere, but for this tutorial you should simply add it to the DissonanceSetup game object.
+Again, leave this on the default configuration, which should have trigger activation disabled and be listening to the 'Global' chat room.
+Congratulations, you have now added voice comms to your game! What to do next?
+ + + + + + + + + + + + + + +++This Quick Start guide is for those of you integrating Dissonance into a game with the TNet3 asset.
+This integration requires Dissonance 6.4.2 or greater.
+
This tutorial will guide you through the steps required to get a basic Dissonance setup working in your project. By the end of this tutorial, you will having working voice comms with all users talking in a global chat room.
+Before beginning this tutorial, please refer to the installation guide to learn how to install Dissonance into your project.
+A demo scene for this tutorial can be found in the Assets/Dissonance/Integrations/TNet3/Demo
folder.
++Dissonance runs mostly from a single game object, which should be placed somewhere near the root of your scene. This object contains the main "Dissonance Comms" behaviour, together with the TNet3 networking script.
+
To place the default Dissonance object into your scene, drag and drop the DissonanceSetup
prefab from the Dissonance/Integrations/TNet3
folder into your scene.
Once you have instantiated the DissonanceSetup
prefab, you should have an object with two scripts attached: Dissonance Comms
and Tnet3 Comms Network
.
A single TNet3 server hosts multiple channels at once, players can only send and receive packets to channels they have joined. The Dissonance integration automatically hosts a voice session in a separate channel - this means you can potentially host multiple completely independent voice chat sessions on a single TNet3 server. Once your game is started you need to choose which voice session to host/join for each peer that connects:
+To begin hosting a new session in a channel:
+var tcnc = FindObjectOfType<TasharenCommsNetwork>();
+
+tncn.HostVoiceChannel(channel_id, max_players, "password", is_dedicated_server);
+
This will begin hosting a new Dissonance voice chat session in the channel identified by channel_id
. If is_dedicated_server
is true then the local instance will not be able to send or receive audio - it is simpl acting as a network host.
To join an existing session:
+var tcnc = FindObjectOfType<TasharenCommsNetwork>();
+
+tcnc.JoinVoiceChannel(channel_id, "password");
+
This will attempt to join a session in the specified channel_id
.
These methods can be called again at any time to change the mode of Dissonance.
+You now have a functional Dissonance comms system, but you are not yet transmitting anything.
+Before you can speak to anyone, you need to add a "Voice Broadcast Trigger" script to the scene. This script can be placed anywhere, but for this tutorial, you should simply add it to the DissonanceSetup game object you created in step 1.
+The "Voice Broadcast Trigger" controls when the user's microphone is being transmitted to other players, and to whom the user is talking. There are many configuration options on this script to provide more advanced control of under what situations we should be transmitting and who to, but for this tutorial simply leave the settings at default.
+To set up the broadcast trigger, change the following two settings: +1. Transmit on Voice Activation. This means Dissonance will transmit whenever it detects that the user is speaking. +2. Transmit to the 'Global' chat room.
+Now you are talking into the 'Global' room automatically whenever you speak. However, you still can't hear anyone speaking. This is because you are not listening to the 'Global' room and so you are not receiving any of these transmissions.
+To listen to the 'Global' room, add a "Voice Receipt Trigger" to the scene. Like the "Voice Broadcast Trigger", this script can be placed anywhere, but for this tutorial you should simply add it to the DissonanceSetup game object.
+Again, leave this on the default configuration, which should have trigger activation disabled and be listening to the 'Global' chat room.
+Congratulations, you have now added voice comms to your game! What to do next?
+ + + + + + + + + + + + + + +++This Quick Start guide is for those of you integrating Dissonance into a game with the Unity Networking High Level API.
+
This tutorial will guide you through the steps required to get a basic Dissonance setup working in your project. By the end of this tutorial, you will having working voice comms with all users talking in a global chat room.
+Before beginning this tutorial, please refer to the installation guide to learn how to install Dissonance into your project.
+A demo scene for this tutorial can be found in the Dissonance/Integrations/UNet_HLAPI/Demo
folder.
++Dissonance runs mostly from a single game object, which should be placed somewhere near the root of your scene. This object contains the main "Dissonance Comms" behaviour, together with the UNet HLAPI networking script.
+
To place the default Dissonance object into your scene, drag and drop the DissonanceSetup
prefab from the Dissonance/Integrations/UNet_HLAPI
folder into your scene.
Once you have instantiated the DissonanceSetup
prefab, you should have an object with two scripts attached: "Dissonance Comms" and HlapiCommsNetwork
.
In this configuration Dissonance sends it's network packets through the UNet High Level API - this means you need a high level network session setup for Dissonance to use.
+To create a high level network session add a Network Manager
to your scene, this is a Unity component which will handle setting up your network. If you need a basic UI for test purposes also add a Network Manager HUD
to your scene, this is another Unity component which shows a simple UI for creating and joining sessions.
Dissonance needs two network channels to send it's data through. On the Network Manager
component check the Advanced Configuration
checkbox and add two new channels, configure one as Reliable Sequenced
and the other as Unreliable
. In the Dissonance Hlapi Comms Network
inspector check the Reliable Channel
and Unreliable Channel
fields correspond to the channels numbers in the Network Manager
.
You now have a functional Dissonance comms system, but you are not yet transmitting anything.
+Before you can speak to anyone, you need to add a "Voice Broadcast Trigger" script to our scene. This script can be placed anywhere, but for this tutorial, you should simply add it to the DissonanceSetup game object you created in step 1.
+The "Voice Broadcast Trigger" controls when the user's microphone is being transmitted to other players, and to whom the user is talking. There are many configuration options on this script to provide more advanced control of under what situations we should be transmitting and who to, but for this tutorial simply leave the settings at default.
+To set up the broadcast trigger, change the following two settings: +1. Transmit on Voice Activation. This means Dissonance will transmit whenever it detects that the user is speaking. +2. Transmit to the 'Global' chat room.
+Now you are talking into the 'Global' room automatically whenever you speak. However, you still can't hear anyone speaking. This is because you are not listening to the 'Global' room and so you are not receiving any of these transmissions.
+To listen to the 'Global' room, add a "Voice Receipt Trigger" to the scene. Like the "Voice Broadcast Trigger", this script can be placed anywhere, but for this tutorial you should simply add it to the DissonanceSetup game object.
+Again, leave this on the default configuration, which should have trigger activation disabled and be listening to the 'Global' chat room.
+Congratulations, you have now added voice comms to your game! What to do next?
+ + + + + + + + + + + + + + +++This Quick Start guide is for those of you integrating Dissonance into a game with Unity Netcode For GameObjects.
+
This tutorial will guide you through the steps required to get a basic Dissonance setup working in your project. By the end of this tutorial, you will having working voice comms with all users talking in a global chat room.
+Before beginning this tutorial, please refer to the installation guide to learn how to install Dissonance into your project.
+A demo scene for this tutorial can be found in the Dissonance/Integrations/UNet_NFGO/Demo
folder.
++Dissonance runs mostly from a single game object, which should be placed somewhere near the root of your scene. This object contains the main "Dissonance Comms" behaviour, together with the UNet HLAPI networking script.
+
To place the default Dissonance object into your scene, drag and drop the DissonanceSetup
prefab from the Dissonance/Integrations/UNet_NFGO
folder into your scene.
Once you have instantiated the DissonanceSetup
prefab, you should have an object with two scripts attached: DissonanceComms
and NfgoCommsNetwork
.
You now have a functional Dissonance comms system, but you are not yet transmitting anything.
+Before you can speak to anyone, you need to add a "Voice Broadcast Trigger" script to our scene. This script can be placed anywhere, but for this tutorial, you should simply add it to the DissonanceSetup game object you created in step 1.
+The "Voice Broadcast Trigger" controls when the user's microphone is being transmitted to other players, and to whom the user is talking. There are many configuration options on this script to provide more advanced control of under what situations we should be transmitting and who to, but for this tutorial simply leave the settings at default.
+To set up the broadcast trigger, change the following two settings: +1. Transmit on Voice Activation. This means Dissonance will transmit whenever it detects that the user is speaking. +2. Transmit to the 'Global' chat room.
+Now you are talking into the 'Global' room automatically whenever you speak. However, you still can't hear anyone speaking. This is because you are not listening to the 'Global' room and so you are not receiving any of these transmissions.
+To listen to the 'Global' room, add a "Voice Receipt Trigger" to the scene. Like the "Voice Broadcast Trigger", this script can be placed anywhere, but for this tutorial you should simply add it to the DissonanceSetup game object.
+Again, leave this on the default configuration, which should have trigger activation disabled and be listening to the 'Global' chat room.
+Congratulations, you have now added voice comms to your game! What to do next?
+ + + + + + + + + + + + + + +On Android the user must grant permission before the Microphone is accessed. Since Dissonance uses the Unity Microphone
class the Record_Audio
permission should have already been added to the app manifest. When the application is started the user is asked for all of the permissions in the manifest in one dialog.
If Microphone
permission is not granted when Dissonance is started it will operate in listen-only mode.
The Runtime Permission System may be be used to request permission from the user again at any time. If permission is granted after Dissonance has already started you should call DissonanceComms.ResetMicrophoneCapture to restart the microphone recording system.
+ + + + + + + + + + + + + +Running Dissonance on a Linux PC has no runtime dependencies.
+ + + + + + + + + + + + + +Running Dissonance on MacOS has no runtime dependencies.
+To record audio a MacOS app requires permission from the user. See the Unity docs here and the Apple docs here on how to request permission.
+All applications running on MacOS Catalina 10.15 require "notarization". See the Apple docs here and a tutorial on permissions and notarization for Unity apps here.
+To access the microphone you will need to add two keys to the entitlements file:
+You may need to add other keys to enable access to networking.
+ + + + + + + + + + + + + +Dissonance includes support for LuminOS on Magic leap devices. To enable this you must change the import settings of Assets\Plugins\Dissonance\Plugins\Magic Leap\libAudioPluginDissonance.so
and Assets\Plugins\Dissonance\Plugins\Android\libs\ARM64\libopus.so
to be included in Magic Leap builds.
Acoustic Echo Cancellation (AEC) and Noise Suppression (NS) are built into the magic leap device. To prevent interference the Dissonance AEC and NS systems are disabled when deployed to a Magic Leap headset, any configuration settings in Dissonance for these two systems will be ignored.
+ + + + + + + + + + + + + +If you are using the Oculus OVR SDK you must disable the Ovr Avatar
component from taking exclusive control of the microphone (which prevents Dissonance from using it).
LocalAvatar
GameObjectCan Own Microphone
in the inspector for the Ovr Avatar
componentTo run Dissonance on a Windows PC requires Visual Studio 2019 Redist. It's recommended that you package this with your application and install it as part of your install process.
+On systems older than Windows 10, users must also install this Windows Update.
+If you are distributing your application through Steam you can set the redistributable as a dependency. It will be installed when the application is installed. This can be done through the app panel: App Panel > Installation > Redistributables > Visual C++ Redist 2019
.
To run Dissonance on a Windows PC within a UWP application requires Visual Studio 2017 v141 Redist. It's recommended that you package this with your application and install it as part of your install process.
+ + + + + + + + + + + + + +Running Dissonance on iOS has no runtime dependencies.
+In the Player Settings for iOS there are four settings relevant to voice chat:
+Microphone Usage Description
: You must enter the reason for accessing the microphone on the iOS device.Prepare iOS for Recording
: You must enable this setting to enable low latency audio recording.Mute Other Audio Sources
: You may enable this to ensure that background audio does not interfere with voice audio.Force iOS Speakers when Recording
: You may enable this to force audio to the speakers even when headphones are plugged in. If this is not enabled all application audio will be played through the call speakers instead of the main speakers as soon as recording starts.This script makes it easier to directly access the recorded audio data. All methods on this script are called on the main thread.
+Implement a new script with BaseMicrophoneSubscriber
as the base class instead of MonoBehaviour
, once you have done this register an instance of the behaviour to receive data by calling DissonanceComms.SubscribeToRecordedAudio
.
See also IMicrophoneSubscriber which does not have any of the ease-of-use features of this script, providing more direct access.
+This method is called by Dissonance whenever the audio stream is being reset, or the audio format is changing. The waveFormat
argument indicates the format of the next data which will be delivered.
When this is called you should immediately finish any work you were doing with the audio and prepare for more audio to be delivered soon. For example if you are recording audio to a file you would flush the file writer and close the file handle.
+This method is called by Dissonance for every frame of recorded audio data. The data
argument contains raw PCM audio data.
After this method has finished executing you must not hold any references to the data
argument. Any data that you want to store for processing later must be copied out of the data
.
This interface designates a behaviour as a microphone capture system which feeds audio into Dissonance.
+When the DissonanceComms component Start
method is called (just once when the component is first enabled) Dissonance will look for a sibling component which implements the IMicrophoneCapture
interface. If it does not find a suitable component it will create a BasicMicrophoneCapture
component which internally uses the Unity Microphone API. To use your custom microphone script simply drop it onto the same gameObject as DissonanceComms.
This property indicates if the microphone component is currently recording.
+While this is true audio must be delivered to subscribers at approximately realtime rates. Delivering audio too quickly will cause buffer overflows (these are handled by Dissonance, but audio will be lost). If your capture system does not have audio available when Update
is called it is ok to not supply audio for one or two calls (as long as a subsequent call delivers enough audio to make up for that dead time). However, if the capture system has truly stopping supply audio you must either return true
from Update
(to force a reset) or just return silence (to cover up for the problem).
This property should give the estimated latency of the microphone capture system. This is the total time from audio physically hitting the microphone hardware to the data being passed on to subscribers.
+Attempt to begin recording. The return value indicates what format the captured data will be. If microphone capture cannot be started for any reason this method should return null. The microphone name is passed through from the UI, how it is interpreted depends upon what kind of audio capture system you are using.
+Once this method has returned a non-null value IsRecording
must be set to true
until StopCapture
is called.
Immediately stops microphone capture, discarding any data remaining in internal buffers. IsRecording
must be set to false
after this is called.
Subscribes a new object to receive raw microphone data.
+Attempts to remove a previously subscribed object. Returns whether the object was found (if it was found it is assumed it was successfully removed).
+Pass buffered data on to the subscribers.
+Returns true if the microphone needs to be reset. In this case Stop will immediately be called (and start may be called afterwards).
+ + + + + + + + + + + + + +This interface allows a script to be added into the Dissonance audio recording pipeline, giving you direct access to the audio data.
+Once you have implemented the IMicrophoneSubscriber
on a class you can register an instance of the class to receive data by calling DissonanceComms.SubscribeToRecordedAudio
. See this tutorial for more information.
The methods on this interface are automatically called by Dissonance, they are not called on the main thread. You must be careful in implementations of this interface to handle
+See also BaseMicrophoneSubscriber which implements this interface in an more convenient package.
+This method is called by Dissonance whenever the audio pipeline is being reset. When this is called you should immediately finish any work you were doing with the audio and prepare for more audio to be delivered soon. For example if you are recording audio to a file you would flush the file writer and close the file handle.
+This method is called by Dissonance for every frame of recorded audio data. The buffer
argument contains raw PCM audio data. The format
argument indicates the format of the data in the buffer, this will only change after Reset
has been called.
After this method has finished executing you must not hold any references to the buffer
argument. Any data that you want to store for processing later must be copied out of the buffer
.
Voice settings is a central place to control various audio settings Dissonance uses. Voice settings can be accessed through Window > Dissonance > Quality Settings
.
These settings are serialized into an asset file at Assets/Plugins/Dissonance/Resources/VoiceSettings.asset
and are also saved into PlayerPrefs. PlayerPrefs
override the values saved in the asset.
Because the settings are saved into an asset the values you choose will be built into your game and will be the default values used by all players.
+Because settings are saved into PlayerPrefs
you can expose the settings to end users in your UI and the values will be saved on a per user basis.
This setting determines how much voice data is sent in a single network packet. There is some overhead associated with each individual packet - using larger values will send less packets-per-second and thus reduce CPU load and network usage. However, larger packets introduce more latency (more delay between speaking and hearing). Latency is a very important aspect of perceived voice quality and lowering this will improve the flow of conversations.
+The Tiny
option (10ms packets) is the lowest latency option. However due to the very high rate of packets (100/second) it is not suitable for use over the internet, only use it in a local area network when latency is very important (e.g. shared space VR).
This setting determines the bitrate the encoder will target - higher values result in higher audio quality but slightly more CPU load and network usage.
+Forward error correction includes extra information in audio packets when network conditions are bad. When a packet is lost due to bad network conditions the audio decoder can use this extra information in other packets to reconstruct a lower quality version of the lost audio. This can almost completely conceal small amounts of packet loss at the cost of ~10% more data used when bad network conditions are detected.
+This setting determines how much noise suppression will be applied to the microphone signal before transmission. Noise in this sense is any sound which is not speech such as computer fans or microphone hiss. Noise suppression will not remove echoes other other voices playing through your speakers.
+Noise suppression is not perfect and may sometimes distort speech, higher levels will remove more background noise but also risk more distortion of speech. However, the risk is fairly low - the distortion is quite minor and the noise suppressor is adaptive so it will only apply really high noise suppression when there is a lot of background noise.
+This setting determines how sensitive the voice detector is. This settings is a tradeoff between accuracy (never classifying non-voice audio as voice) and sensitivity (never classifying voice audio as non-voice). Higher values will send more non-voice background noise.
+These settings control the acoustic echo canceller, this observes sounds coming out of the speakers and then attempts to remove these sounds from the microphone signal after a short delay. It automatically calibrates the delay so expect a short period (10-40 seconds) where no echoes will be cancelled, if there is no sounds coming out of the speakers at all (or the microphone is not detecting those sounds) it will not be able to calibrate the delay. Refer to these docs for a tutorial on correctly setting up the acoustic echo canceller.
+This controls how much the echo canceller tries to cancel echo on mobile devices.
+This controls how much the echo canceller tries to cancel echo on desktop PCs.
+This controls how much remote voices will be attenuated by (reduced in volume) when the local speaker is transmitting.
+ + + + + + + + + + + + + +The Dissonance Comms component is the central place to configure Dissonance. There must be an active one within the scene for Dissonance to work.
+This is a prefab for the audio playback system. For every remote player who is in the voice session Dissonance will instantiate this prefab, and use it to play the voice from that player. If left blank the default playback prefab included with Dissonance will be used. Read more about the playback prefab and how you can customise it here.
+This will prevent the local player from sending any voice.
+This is the set of access tokens which the local player has.
+Clicking this button opens an inspector where audio settings relating to voice may be changed.
+Clicking this button opens an inspector where rooms can be created or deleted.
+Clicking this button opens an inspector where Dissonance diagnostic settings may be changed (e.g. log levels).
+Dissonance Comms is also the central place to access Dissonance from scripts.
+Indicates if the Dissonance network has been successfully initialised yet.
+An object which exposes various properties and methods to do with rooms the local player is listening to. See further documentation here.
+An object which exposes various properties and method to do with players the local player is speaking to. See further documentation here.
+An object which exposes various properties and methods to do with room the local player is speaking to. See further documentation here.
+An object which exposes various properties and methods to do with text chat. See further documentation here.
+A list of VoicePlayerState
objects, one for each remote player currently in the session. See further documentation on VoicePlayerState
here.
The highest priority of all remote players currently speaking in the session.
+The set of tokens which the local player possesses.
+The microphone capture object which Dissonance is currently using. This may be null if Dissonance has not initialised yet or if the local instance is a dedicated server.
+The name of the local player, this will be initialised to a unique ID per player when Dissonance starts. This may not be changed once Dissonance has started.
+The priority of the local player, if a channel is opened with no priority set this priority will be used as a default.
+Get or set the name of the microphone to use to capture voice. This may be changed at any time, if the microphone has already begun recording with a different name it will be reset to use the new name.
+Get or set the playback prefab which Dissonance will use to play back remote voices. This may not be changed once Dissonance has started.
+Get or set if the local player is muted (i.e. prevented from sending any voice transmissions).
+Get or set if the local player is deafened (i.e. prevented from hearing any remote voice transmissions).
+This event runs whenever a new player joins the Dissonance voice chat session. It is passed the object which represents the new player.
+This event runs whenever a player leaves the Dissonance voice chat session. It is passed the object which represents the player. The object will never be touched by Dissonance again - if the player rejoins a new object will be created for them.
+This event runs whenever a remote player begins speaking in a channel which the local player can hear.
+This event runs whenever a remote player stops speaking in all channels which the local player can hear.
+This may not indicate that the remote player has actually stopped talking completely, it is possible that the local player simply stopped listening. For example if you are listening to Room A and they are talking to Room A and Room B, then when you stop listening to Room A you will receive this event (even though they are still talking to Room B) because they have stopped speaking from your point of view.
+This event runs whenever a remote player begins listening to a new room. It is passed the object which represents the player and the name of the room.
+This event runs whenever a remote player stops listening to a room. It is passed the object which represents the player and the name of the room.
+This event runs whenever the local player name is changed. Local player name may only be changed before the DissonanceComms component has been started.
+An event which runs whenever a token is added to the local player.
+An event which runs whenever a token is removed from the local player.
+Attempt to find the player with the given Dissonance ID. Will return null if no such player can be found.
+Subscribes the given listener object to the voice activation detector (VAD) for the local player. When VAD detects speech the VoiceActivationStart
method will be called. When the VAD stops detecting speech the VoiceActivationStop
method will be called.
Unsubscribes a previously subscribed listener object from the VAD.
+Subscribes the given listener object to the microphone recorded audio after it has been preprocessed. This will receive all audio recorded by the mic whether or not it is being sent over the network. Use DissonanceComms.RoomChannels.Count
and DissonanceComms.PlayerChannels.Count
to determine if the audio is being sent anywhere.
Unsubscribes a previously subscribed listener object from the microphone audio stream.
+Begins position tracking for the player represented by the given object.
+Stops position tracking for the player represented by the given object.
+Adds a token to the local player.
+Removes a token from the local player and returns a bool indicating if that token was removed. This will return false if the player never had the token in the first place.
+Returns a boolean value indicating if the local player has the token with the given name.
+Returns a boolean value indicating if the local player has any of the tokens in the given TokenSet.
+Forces a complete restart of the audio capture pipeline.
+Get a list of available microphones. Microphone names will be added to the output
list.
The Voice Broadcast Trigger controls when and where the local voice data is sent to.
+This section controls which channel voice data will be sent to with this trigger. There are three channel type options, which one you choose will change the rest of the channel type UI to match. Channel type can be set from scripts by modifying the ChannelType
property.
When using the "Room" channel type broadcaster sends voice to the specified room (a single room may have multiple speakers and multiple listeners). Available rooms are listed in a dropdown box and new rooms may be added by clicking the "Config Rooms" button. The target room name can be set from scripts by modifying the RoomName
property.
When set to "Player" the broadcaster sends voice directly to the specified player. The inspector will show a text box to enter the name of the player to send to. The target player name can be configured from a script by modifying the PlayerId
field.
To get the names of other players inspect the DissonanceComms component at runtime - when in a session it will show a list of all players in the session. To get the name of players by script enumerate the DissonanceComms:Players
property.
If you have set up Position Tracking then your player object will have an IDissonancePlayer
component which identifies it to Dissonance. You can take advantage of this to send directly to players without having to know their name. When set to "Self" the broadcast component will look for an IDissonancePlayer
component in this GameObject or any ancestor and will send directly to that player.
This section controls which metadata is sent along with the channel.
+This determines whether the playback of the data sent through this broadcaster should use 3D audio playback (i.e. voice will sound as if it is coming from a certain position in space). Positional audio requires some additional setup (but does not use any additional CPU or network bandwidth at all when enabled). See the Position Tracking tutorial for information about how to set up your project for position tracking of player objects. This option can be set from a script by modifying the BroadcastPosition
field.
This determines the priority which this voice has for playback. Everyone who receives audio will compare the priority of all the audio streams they are receiving and will only play out the streams with the highest priority.
+"None" is a special value which indicates that this broadcast trigger is setting no particular priority - the default priority for this player will be used instead. The default priority is set on the DissonanceComms component with the PlayerPriority
property (if you do not set it it will have the priority Default
). The priority values have this order:
When set to true
this trigger will never activate.
This can be used to create a UI push-to-talk activated broadcast trigger - set the Activation Mode
to Voice Activation
and toggle the IsMuted
property with a button. When muted by the UI no voice will be sent, when unmuted the trigger will automatically transmit when speech is detected.
This section controls how the broadcast trigger decides when to send voice. There are three activation options, which one you choose will change the rest of the activation mode UI to match. Activation mode can be set from scripts by modifying the Mode
property.
When set to "None" the broadcaster will never broadcast any voice.
+When set to "Voice Activation" the broadcaster will automatically transmit when voice is detected in the microphone signal.
+When set to "Push To Talk" the broadcaster will transmit when a given input axis is chosen. You must set the name of a Unity input axis and then configure it in the Unity input manager.
+When set to "Open" the broadcaster will constantly transmit (unless muted).
+Using collider volume activation requires Position Tracking to be set up. When active the broadcast trigger will find a sibling physics trigger volume and will only send voice if the local player (as defined by the IDissonancePlayer
component) is inside the volume.
This section controls which Access Tokens are required to send with this broadcaster.
+The DissonanceComms component keeps a set of tokens which the local player has. The broadcast trigger will only send voice if the player has one or more of the necessary tokens.
+This section controls the amplitude and soft fading of voice sent with this trigger.
+"PushToTalk Fade" (precise name depends upon which activation mode you have chosen) applies a soft fade in and out every time speech is started or stopped (e.g. every time the push to talk key is pressed or released). This setting should be used with care; applying any fade in is inadvisable as it will cut off the start of what is being said. This fader can be configured from scripts by accessing the ActivationFader
property.
"Volume Trigger Fade" (only available when volume trigger is in use) applies a soft fade every time the player enters or exits the volume. This fader can be configured from scripts by accessing the ColliderTriggerFader
property.
Since there are two faders this means the trigger will have two different volumes to use, they will be multiplied together and the result is used as the actual volume value.
+This controls the maximum volume of the fader. This should be used with care; raising the amplification above one will cause clipping, which severely reduces audio quality.
+This controls how long it takes this fader to increase volume from 0 to the Channel Volume
slider level.
This controls how long it takes this fader to decrease volume from Channel Volume
slider level to 0. Note that this means voice will continue to be transmitted for this long even after the user has stopped pressing the push to talk key.
The Voice Proximity Broadcast Trigger sends voice to an infinite grid of "virtual rooms", creating a proximity chat system by placing nearby players into the same rooms.
+This section controls which room the trigger sends to.
+It is possible to have several proximity broadcast systems running simultaneously (e.g. one per team), the room name uniquely identifies this room.
+Set the distance to transmit voice, players within this distance will be considered "near" and will be placed into the same room.
+Hint
+The range must be exactly the same for the broadcast trigger and the receipt trigger!
+This determines the priority which this voice has for playback. Everyone who receives audio will compare the priority of all the audio streams they are receiving and will only play out the streams with the highest priority.
+"None" is a special value which indicates that this broadcast trigger is setting no particular priority - the default priority for this player will be used instead. The default priority is set on the DissonanceComms component with the PlayerPriority
property (if you do not set it it will have the priority Default
). The priority values have this order:
When set to true
this trigger will never activate.
Hint
+This can be used to create a UI push-to-talk activated broadcast trigger - set the Activation Mode
to Voice Activation
and toggle the IsMuted
property with a button. When muted by the UI no voice will be sent, when unmuted the trigger will automatically transmit when speech is detected.
This controls how the broadcast trigger decides when to send voice. Activation mode can be set from scripts by modifying the Mode
property.
None
: Never broadcast any voice.Voice Activation
: Automatically broadcast when voice is detected.Push To Talk
: Broadcast when a given input axis is pressed. You must set the name of a Unity input axis and then configure it in the Unity input manager.Open
: Constantly broadcast unless muted.Only broadcast when the local player is inside a sibling collider volume.
+Add Access Tokens which are required for this broadcaster to broadcast voice. This trigger will only broadcast if the DissonanceComms component has one or more of the necessary tokens.
+ + + + + + + + + + + + + +The Voice Proximity Receipt Trigger receives voice to an infinite grid of "virtual rooms", creating a proximity chat system by placing nearby players into the same rooms.
+This section controls which room the trigger receives from.
+It is possible to have several proximity broadcast systems running simultaneously (e.g. one per team), the room name uniquely identifies this room.
+Set the distance to receive voice, players within this distance will be considered "near" and will be placed into the same room.
+Hint
+The range must be exactly the same for the broadcast trigger and the receipt trigger!
+Add Access Tokens which are required for this broadcaster to broadcast voice. This trigger will only receive if the DissonanceComms component has one or more of the necessary tokens.
+Only receive when the local player is inside a sibling collider volume.
+ + + + + + + + + + + + + +The Voice Receipt Trigger controls which rooms are being listened to by the local player.
+This determines whether the receiver will only receive when the local player is within a trigger zone. See the Unity documentation on Trigger Zones. Using trigger activation requires the same basic setup as using Positional Audio. This setting can be configured from a script by modifying the UseTrigger
field.
Determines which room this receiver is for. Available rooms are listed in a dropdown box and new rooms may be added by clicking the "Config Rooms" button. The target room name can be configured from a script by modifying the RoomName
field.
Network Protocol
+Knowledge of the network format is not necessary to work with Dissonance in most cases. This documentation is only required if you want to interact with Dissonance over the network from your own non-Unity code. For example writing a Dissonance server in another language.
+The Dissonance network system manages three main bits of data:
+This document will give you an overview of how the Dissonance network system manages this data. To see the exact packet format look at PacketWriter.cs
and PacketReader.cs
in the Dissonance package, these structs have a method for writing/reading each different packet type.
Every different machine in the session is a peer. This include both the server and the client.
+A peer which is recording and playing voice.
+A which manages the organisation of the session and relays voice to clients.
+A peer which is both a server and a client.
+A server which is not a client (i.e. no auto recording or playback).
+Some packets are sent reliably. This means that the packets will arrive at their destination in the order they were sent, there are no lost packets. This is used for all non-voice packets.
+Some packets are sent unreliably. This means that the packets may be lost in transport or arrive in a different order. This is always used for voice packets.
+Audio is recorded, processed and played back in frames, this is a buffer of 10-40ms of audio. Every frame is packed into a single network packet.
+A room is a type of channel which requires the listener to explicitly subscribe to the room to hear any audio sent to that room. Rooms have a name (a string) but on the network rooms are generally referred to by a 16 bit ID which is calculated by the ToRoomId(string name)
method.
All packets contain a header which is used to check that the packet is valid.
+The first 16 bits of every Dissonance packet are a 16 bit magic number 0x8BC7
. This is read from the start of the packet and if it's incorrect the packet is immediately discarded. If something goes wrong and non-Dissonance packets are sent to Dissonance this prevents them from being decoded.
The next 8 bits are the packet type, this tells Dissonance how the contents of the packet should be decoded. The values used for this are defined in MessageTypes.cs
.
After that all packets (except HandshakeRequest
) have a 32 bit session number, this is a unique number randomly generated by the server when it starts a new session. If the session number does not match the packet is immediately discarded. If something goes wrong and packets from one Dissonance session are sent to another Dissonance session this prevents them from being decoded.
If a packet with an incorrect session number is received by the server it will send back an ErrorWrongSession
packet to the client which contains the session number being used by the server. If the client is not using this session number it will disconnect and reconnect to the server.
HandshakeRequest
message to the server. This tells the server the codec settings in use by this client as well as it's name.The server replies with a HandshakeResponse
message. This sends the complete state of the server to the client:
the session ID. A unique value prepended to all packets.
+Listeners list. A list of clients and the rooms which they are currently listening to.
+The client replies with a ClientState
message. This tells the server the complete state of the client:
Name
+Info
+The HandshakeResponse
message contains data about all clients currently in the session. In a very large session this can cause a problem with oversize packets. It is valid for the server to send some/none of the client data in the initial HandshakeResponse
packet and instead to send it in individual ClientState
messages immediately after the HandshakeResponse
The server maintains a list of which rooms every client is currently listening to. Sending a complete ClientState
message every time a client join or leaves a room would be wasteful, instead a DeltaClientState
message is sent. This contains:
joining
or leaving
The update messages ClientState
and DeltaClientState
are sent from clients to the server, which updates it's internal state. The server also broadcasts these messages out to all clients which update their own state. This means that every client has exactly the same list of who is listening to which rooms.
It's possible for peers to communicate directly. When this is setup the metadata messages are still sent to the server but voice packets are sent directly from one client to another.
+To set this up a client sends a HandshakeP2P
message to every peer which it knows how to directly contact. The HandshakeP2P
message contains the ID of the sending client. When a client receives a HandshakeP2P
message from another client it can take note of the connection which that message came through, send back a HandshakeP2P
message in response over that connection, and now the two peers can communicate directly.
Each client records audio, preprocesses it to improve audio quality, encodes it (using opus) and then sends the packet. The client decides who to send the packet to based on it's knowledge of who is listening to what. The client sends the voice packet via P2P to as many client as possible. The remaining packets are relayed via the server.
+The VoiceData
packet contains:
The 8 bit bitfield contains:
+ - The MSB is always set to 1
+ - The remaining 7 bits contain a wrapping counter which increments every time the "channel session" changes. The "channel session" changes whenever all sending channels are closed (i.e. there is an interruption in the voice stream).
When packets cannot be sent directly with P2P they can be relayed via the server. The ServerRelayReliable
and ServerRelayUnreliable
packets are used for this purpose. These packets contain a list of destination client IDs and then an array of bytes.
When the server receives one of these packets it sends the array of bytes out to all of the listed clients. The server will discard attempts to relay HandshakeP2P
packets.
Text packets can be sent through the Dissonance session, unlike voice they are always relayed via the server. The TextData
packet contains:
When a client leaves the session the server sends a RemoveClient
message out to all clients. This simply contains the ID of the client which is leaving the session.
This object represents a single speech channel directly to another player opened with the PlayerChannels API. The other player will receive the local voice without having to take any action.
+Closes this channel.
+Get the unique ID of this channel. This is only unique among the set of open channels - once this channel is closed the ID may be re-used by another channel.
+Get the name of the player this channel is sending voice to.
+Get a value indicating if this channel is currently open. Once a channel is closed you should release the channel struct - it is useless (re-opening the channel will create a new PlayerChannel struct).
+Once IsOpen becomes false then accessing most other properties will immediately throw an exception.
+Get or set whether audio sent through this channel should use positional playback.
+If there are multiple channels open sending the same voice then playback will only be positional if all channels are set to use positional playback.
+Get or set the priority of voice sent with this channel.
+If priority is set to None
then it will fall back to using the priority set on the local DissonanceComms component in the PlayerPriority
property.
If there are multiple channels open sending the same voice data then playback will use the highest priority.
+Get or set the volume to play back the voice sent through this channel. Volume is a direct multiplier on the audio data and should be between 0 and 1.
+If there are multiple channels open sending the same voice then playback will use the loudest volume.
+ + + + + + + + + + + + + +This object exposes properties and methods to do with players that the local player is speaking to.
+The number of players which the local player is a speaking to.
+Returns a boolean value indicating if the local player is speaking to the given channel.
+Opens a channel to begin speaking to the given player and returns a PlayerChannel object which represents this open channel (and can be used to close it).
+Takes three optional parameters. +1. A boolean value indicating if this channel should use positional playback +2. A ChannelPriority which indicates the priority of this channel +3. A float which indicates the volume to play back this channel with
+Closes the given channel and returns a boolean indicating if the channel was open in the first place.
+ + + + + + + + + + + + + +A RemoteChannel
struct represents a snapshot of information about a channel which a player is speaking through.
Get the type of this channel. A channel is either to a Room (in which case the local player will only hear voices if they are subscribed to the Room) or to a player (in which case the appropriate player will hear the voice without needing to subscribe to it).
+Get the PlaybackOptions
which have been set for this channel. The actual playback options used are an aggregation of the options set on all the channels the local player is receiving voice from the given player through.
Get whether this channel should be played with positional audio. Actual audio playback will be positional only if all channels from the given player are set to positional playback.
+Get the amplitude multiplier to apply to audio through this channel. The maximum multiplier from all channels from the given player will be used.
+Get the priority of audio through this channel. The maximum priority from all channels from the given player will be used.
+Get the name of the target of this channel. This is either a room name or a player name, depending upon the Type
property.
This object represents a single speech channel to a room opened with the PlayerChannels API. Other players will only receive the voice if they have joined the room.
+Closes this channel.
+Get the unique ID of this channel. This is only unique among the set of open channels - once this channel is closed the ID may be re-used by another channel.
+Get the name of the room this channel is sending voice to.
+Get a value indicating if this channel is currently open. Once a channel is closed you should release the channel struct - it is useless (re-opening the channel will create a new PlayerChannel struct).
+Once IsOpen becomes false then accessing most other properties will immediately throw an exception.
+Get or set whether audio sent through this channel should use positional playback.
+If there are multiple channels open sending the same voice then playback will only be positional if all channels are set to use positional playback.
+Get or set the priority of voice sent with this channel.
+If priority is set to None
then it will fall back to using the priority set on the local DissonanceComms component in the PlayerPriority
property.
If there are multiple channels open sending the same voice data then playback will use the highest priority.
+Get or set the volume to play back the voice sent through this channel. Volume is a direct multiplier on the audio data and should be between 0 and 1.
+If there are multiple channels open sending the same voice then playback will use the loudest volume.
+ + + + + + + + + + + + + +This object exposes properties and method to do with rooms that the local player is speaking to. For rooms the player is listening to see this documentation intead.
+The number of rooms which the local player is a speaking to.
+Returns a boolean value indicating if the local player is speaking to the given channel.
+Opens a channel to begin speaking to the given room and returns a RoomChannel which represents this open channel (and can be used to close it).
+Takes three optional parameters. +1. A boolean value indicating if this channel should use positional playback +2. A ChannelPriority which indicates the priority of this channel +3. A float which indicates the volume to play back this channel with
+Closes the given channel and returns a boolean indicating if the channel was open in the first place.
+ + + + + + + + + + + + + +This object exposes properties and method to do with rooms that the local player is listening to. For rooms the player is speaking to see this documentation intead.
+The number of rooms which the local player is a listening to.
+Returns a boolean value indicating if the local player is listening to a room with the given name.
+Begin listening to the room with the given name. Returns a "RoomMembership" object which can be used to stop listening to the room.
+Stop listening to the room represented by the given RoomMembership.
+ + + + + + + + + + + + + +This object exposes properties and method to do with text chat within a Dissonance session.
+Send a message to the given room.
+Send a message to the given player.
+An event which indicates a text message was received from another player. The TextMessage
object contains information about who send the message, how they sent it and what the message is.
This object exposes properties to do with other players in a Dissonance session. There is one of these objects per player (including the local player) in the Players
property on the DissonanceComms component. You can also get one of these objects for a specific player with the FindPlayer
method on the DissonanceComms component.
//Get your comms component
+DissonanceComms comms;
+
+//Get a specific player
+VoicePlayerState player = comms.FindPlayer("Player ID");
+
+//Enumerate all players in the session
+for (var i = 0; i < comms.Players.Count; i++) {
+ VoicePlayerState player = comms.Players[i];
+}
+
This event is raised every time this player starts speaking. It is passed the state object for this player.
+VoicePlayerState.OnStartedSpeaking += player => {
+ Debug.Log("Player " + player.Name + " Started Speaking");
+}
+
This event is raised every time this player stops speaking. It is passed the state object for this player.
+VoicePlayerState.OnStoppedSpeaking += player => {
+ Debug.Log("Player " + player.Name + " Stopped Speaking");
+}
+
This event is raised every time this player begins listening to a new room. It is passed the state object for this player and the name of the room.
+VoicePlayerState.OnEnteredRoom += (player, room) => {
+ Debug.Log("Player " + player.Name + " began listening to room " + room);
+}
+
This event is raised every time this player stops listening to a room. It is passed the state object for this player and the name of the room.
+VoicePlayerState.OnExitedRoom += (player, room) => {
+ Debug.Log("Player " + player.Name + " stopped listening to room " + room);
+}
+
This event is raised when the player leaves the session. After this the session object will never be used again. Even if the same player re-joins with the same name, they will be assigned a new state object.
+VoicePlayerState.OnLeftSession += player => {
+ Debug.Log("Player " + player.Name + " Left Session");
+}
+
The name of this player. This is the value in the DissonanceComms:LocalPlayerName
property for that player.
DissonanceComms comms;
+VoicePlayerState aPlayer;
+if (aPlayer.Name == comms.LocalPlayerName) {
+ Debug.Log(aPlayer.Name + " is the local player");
+}
+
Get a value indicating if this player is currently in the session.
+Get a value indicating if this player is currently speaking
+Get the current amplitude of the speech from this player. Value is in the range of 0 to 1. When using this value remember that 1 is the loudest value that can possibly be produced by the audio system - in most circumstances a speech signal will be very quiet (0 to 0.05 or less).
+Get the current priority of speech from this speaker. Null if the player is not speaking.
+Get the list of rooms this player is currently listening to.
+Get the estimated packet loss (0 to 1) to/from this player. May be null if the player has disconnected or packet loss has not yet been measured.
+Get the VoicePlayback
component associated with this player. May be null if Dissonance is still setting up playback for this player, or the player has left the session.
Get the IDissonancePlayer
component associated with this player. May be null if Dissonance is still setting up tracking for this player, this player does not have a IDissonancePlayer
component, or the player has left the session.
Get or set the Volume which speech from the player should be played at. The value is a direct multiplier applied to the audio and should be in the range 0 to 1.
+Get or set if this player is locally muted and will not produce any audio on the local machine.
+Get a snapshot of the channels you are hearing this speaker through. If they are not speaking to you then this will return no results. The channels
parameter passed in must not be null, the list will be cleared and then filled with the current snapshot.
Various Dissonance audio settings can be tweaked through the VoiceSettings asset. This asset can be quickly accessed through Window > Dissonance > Quality Settings
.
Warning
+The default settings are usually the best option. Don't change these options without understanding exactly what the trade-off is.
+All of these settings can be accessed at runtime from a script through Dissonance.Config.VoiceSettings.Instance
. Any settings which are changed by script are automatically saved into PlayerPrefs
and override the default settings (stored in the asset). This means you can configure these settings in your menus and they will persist when your application is closed and re-opened.
Controls how much audio is packed into a single network packet. Smaller frames reduce recording latency but send more packets over the network per second, which consumes more network data and slightly more CPU power.
+Warning
+The smallest option (Tiny
) is not suitable for use over the internet or over a wireless network. This option should only be used in very special cases where all clients will be connected to the same wired local area network.
The exact frame size at each setting is:
+Controls how many bits-per-second (on average) the audio codec will use to encode audio. Higher bitrates sound better but use more network data and slightly more CPU power.
+The data rate used by each quality setting is:
+Controls if the codec is using Forward Error Correction
which improves audio quality when packets are lost. When network conditions are good this makes no difference to network data used. When network conditions are bad this slightly increases the total data used (by about 10%) and massively improves audio quality (it can almost completely mask ~5% packet loss).
Warning
+It is very highly recommended to keep FEC enabled. It is a huge quality increase for a very small increase in network data usage.
+Controls how much the audio pre-processor removes noise from the signal. Higher values will remove more noise but may also make speech quieter.
+Info
+Sounds such as people talking in the background are not noise and will not be removed by the noise suppressor. This system removes non-voice sounds such as fans hum, keyboard clatter, or fuzz from a poor quality microphone.
+Enables RNNoise, an ML based background sound removal system. When there is a lot of background sound (e.g. people talking, dogs barking, keyboard clatter, fan noise, loud breathing) this system will remove it, but will distort speech much more than the basic Noise Suppression
system. Dissonance can run both noise removal systems at once, which reduces the amount of distortion present even in very noisy environments.
The intensity slider limits the amount of background sound that can be removed and also limits the maximum amount of distortion even in the worst case. Set it higher to cancel more noise.
+It is recommended to enable this system if you are building an application where there is likely to be a lot of environmental noise (e.g. a mobile app, where the user is expected to be on-the-move while talking) or an intense VR game (where the user may be breathing heavily while talking).
+The voice detector detects speech and activates Voice Broadcast Trigger components configured with Activation Mode: Voice Activation
. This settings controls a tradeoff between accuracy (not activating when no one is speaking) and sensitivity (always activating when someone is speaking).
A low sensitivity voice detector will not activate when there is non-speech audio (e.g. keyboard clatter), but it sometimes may not activate when there is speech (e.g. quiet speech).
+A high sensitivity voice detector will activate when there is speech, but it may also activate when there is non-speech audio.
+Info
+Acoustic Echo Cancellation requires some extra setup before it can be used. See this tutorial.
+Controls how much echo (feedback) the acoustic cancellation system attempts to remove from recorded audio. Higher values will remove more echo but may also severely distort recorded speech.
+Dissonance includes two completely different AEC algorithms which are used on Mobile and Desktop platforms. For Mobile Echo Cancellation the configuration value should approximately match the setup of the platform it is used on.
+Controls how much received Dissonance audio will be attenuated by when any VoiceBroadcastTrigger is activated (i.e. speech is being transmitted). This can help prevent feedback of recorded audio into the microphone. The AEC system is not perfect - even if you have AEC setup and working it is still worth using audio ducking.
+The default value configured in Dissonance is a very mild (almost imperceptible) level of audio ducking. Much smaller values can reasonably be used, particularly on mobile platforms or VR headsets where feedback (due to speakers and microphones in close proximity) is a much more common problem.
+ + + + + + + + + + + + + +Video
+See this video about access tokens.
+Access control tokens can be added to both Broadcast Triggers and Receipt Triggers. The trigger will not function unless the local player has one of the required tokens.
+Tokens can be added and removed through the inspector:
+This receipt trigger will not function unless the local player has one of the two access tokens - 'TopSecretPassword' or 'mysocratesnote'. Tokens can also be managed with scripts:
+var receiver = FindObjectOfType<VoiceReceiptTrigger>();
+
+receiver.AddToken("correcthorsebatterystaple"); // Add
+if (receiver.ContainsToken("correcthorsebatterystaple")) // Query
+ receiver.RemoveToken("correcthorsebatterystaple"); // Remove
+
Once triggers have been configured to require tokens you will need to add some tokens to the local player. This can be done in the inspector in the same way as for triggers. Tokens added in the inspector will apply to all players so they can only be used as the default tokens everyone starts with.
+You are more likely to want to manage tokens through scripts. When you create a player and do something which requires restricting their access to channels (e.g. joining a team) you should add the appropriate tokens to the local player:
+var local = FindObjectOfType<DissonanceComms>();
+
+local.AddToken("Green Team");
+
Assuming you have transmitters and receivers set up for every team, each one with a different token, this gives you a simple way to ensure that the player is speaking and listening to the right team channels.
+ + + + + + + + + + + + + +When playing audio from speakers and recording audio from a nearby microphone you will often encounter problems when the microphone picks up the audio from the speakers. In a voice session a single person doing this can cause annoying echoes to be transmitted and multiple people doing this simultaneously can cause painful feedback which persists until everyone stops transmitting. This can be particularly problematic when using Voice Activation Detection (VAD) because the VAD automatically transmits back all speech it detects, causing constant echoes of everything other people say. It can also be very bad on platforms where the mic and the speaker are very close together such as VR headsets and mobile phones. Acoustic Echo Cancellation (AEC) is a system to automatically remove these echoes from the transmitted voice signal.
+Echo is caused by sound which leaves the speaker, bounces off some of the nearby environment and re-enters the mic. The AEC filter is attached the the audio mixer on the output, this filter knows what sounds are leaving the speakers and this knowledge can be used to detect and remove that echo in the microphone preprocessor:
+Audio Output -> **Audio Postprocessor** -> Speakers -> Echo -> Microphone -> Audio Preprocessor
+
The most complex part of this system is working out what the delay is between the Audio Postprocessor
and the Audio Preprocessor
. This is achieved automatically but it is important to understand that the AEC system can take several seconds to work out the correct delay value - until it has done this no echo will be cancelled. The AEC cannot be calculating the delay value while there is no sound being played and it will slowly "forget" the delay value during long periods of silence.
In most scenarios this is not a problem - game sound effects and background music will be enough to keep the AEC synchronised with a suitable delay value. However if you are encountering problems with the AEC not working you should consider adding some sound effects for the AEC to process - e.g. a short jingle when a user joins a session, or ringing sound when joining a session.
+Attach the Dissonance Echo Cancellation
audio filter to the very last audio mixer in the mixing system and disable the Auto Mixer Suspend
option for this mixer. If you were not already using audio mixers simply create a new mixer in Window > Audio Mixer
and attach the filter to that.
The filter will only process audio which passes through the mixer it is attached to - how to achieve this depends on what kind of audio mixing system you already had setup before using AEC.
+Dissonance Echo Cancellation
filter attached.AudioSource
components to output to the new mixer you created in the previous step.You can check that you have done this correctly by running the game and watching the audio mixer window. The dB meter on the mixer should move when non-voice audio is playing.
+Voice audio also needs to be re-routed to pass through the mixer with the filter attached. To change where voice audio is sent you need to create a custom playback prefab. Create a prefab with a VoicePlayback
component and an AudioSource
component. Set the output of the AudioSource to the correct mixer. Finally drop the prefab into the Playback Prefab
field of the Dissonance Comms
component.
If you were already using audio mixers then you may want to consider creating a mixer specifically for voice and outputting this mixer to the root mixer. This will allow you to attach sound effects specifically to voices.
+If you were not using audio mixers then you should just send the voice data to the mixer you created in step 1.
+Now that all the audio is routed to pass through the filter AEC can run. Open the Dissonance quality settings menu Window > Dissonance > Quality Settings
to set the amount of echo suppression applied. Desktop platforms and mobile platforms use different AEC systems internally and are configured separately. Dissonance will automatically switch to using the mobile AEC (AECM) when a mobile platform is detected.
These settings can be set in the editor - they will be saved into an asset and used as the default values at runtime. They can be changed at runtime by accessing the VoiceSettings
class:
//Change amount of AEC applied on Desktop
+VoiceSettings.Instance.AecSuppressionAmount = AecSuppressionLevels.Moderate;
+
+//Change amount of AEC applied on Mobile
+VoiceSettings.Instance.AecmRoutingMode = AecmRoutingMode.Speakerphone;
+
Only the two settings shown above can be changed while Dissonance is running, doing so will trigger a reset of the audio input system (causing a small hitch in transmitted audio). Changes to any other AEC related settings will be ignored until the next time the audio input system is reset (e.g. by changing the settings above).
+You should start with low AEC settings and ask the user to increase them if echo becomes a problem - excessive levels of AEC can very badly distort voices.
+Once you have set all of this up you may want to test that AEC is working as intended. To do so simply add an AudioSource
component to your scene playing some loud music - make sure it's routed through the correct mixer! Now run the scene in the editor and select the filter attached to the audio mixer, this will show a status screen for the AEC:
When the filter first starts all of the stats will be labelled as "initialising...", this indicates that the filter has not yet converged and will not yet be removing any echo. Once the AEC is running and has converged remote speakers in the session should not be able to hear the music you are playing. In our own tests we have had music playing loudly enough to drown out voices but even that was still cancelled!
+AEC is not a perfect system and there will usually be some echo which is not cancelled out. Certain conditions such a high background noise or a very large delay (e.g. Bluetooth headphones/microphones) can make this much worse or even not work at all. Therefore it's important to have other mitigations to reduce the impact of bad echo.
+Voice Ducking automatically reduces the volume of received voices when voice is being transmitted. This reduces the chance of feedback occurring since incoming voices are quieter and less likely to be picked up by the mic.
+Audio Ducking can be set up in the Unity mixer to reduce the volume of all non-voice sounds when any voice audio is being received. Because the other sound effects are quieter this allows you to reduce the volume of all voices, reducing the chance of the mic recording them.
+Background Sound Removal automatically removes non-voice sounds from the microphone signal. This cannot fix echoed voices, but it can improve other non-voice sounds that are recorded by the microphone.
+A Low Pass Filter can be set up in the Unity mixer on the received voices, with a Cutoff Frequency
of around 6000Hz. This is above the range of normal human speech. If the worst kind of feedback happens (very high pitched squealing) this will reduce the volume and prevent it from getting any worse.
Audio effect Dissonance Echo Cancellation could not be found.
Error (iOS)⚓︎To fix this problem on iOS you must manually register the audio effect with the Unity audio pipeline.
+AudioPluginInterface.h
from the Unity native audio plugin SDK and add it to your XCode project.#import "AudioPluginInterface.h";
to UnityAppController.mm
in XCode.preStartUnity
method and add the line UnityRegisterAudioPlugin(&UnityGetAudioEffectDefinitions);
If this does not fix the issue, please add a comment to this issue.
+ + + + + + + + + + + + + +Video
+See this video about audio mixing.
+Dissonance does not have any special support built in for audio mixing, because unity already has a powerful mixing system built in which dissonance audio is routed through. You can find out more about the unity audio mixing system here. This tutorial offers advice about the best way to use the unity audio pipeline for VoIP.
+It can be tempting to mix voice signals in the same way as any other audio signal in your game and to add various sound effects to the voice for realism/immersion. Things such as drowning out teammates with loud gunfire, deafening players when they're hit by a flashbang or adding extreme radio fuzz when the enemy team use jammers might all sound immersive but in reality will just force people not to use the in game VoIP. Generally any audio mixing done to the voice signal should be done to improve the voice quality.
+Games frequently have very loud sound effects such as explosions and gunfire which can drown out other sounds in the game. However, it would interrupt conversations if these noises also drowned out the voice signal. A naïve solution would be to increase the volume of the voice signal far above the game sounds but doing this would cause clipping and sound terrible. An alternative solution would be to reduce the volume of the game audio far below the voice signal, but doing this would cause the game sounds to lack impact even when no one is talking. The best solution is to play game sounds at full volume when no one is talking but then when someone starts talking simply "duck" the volume so the voice can be clearly heard over the game sounds.
+Above is an example audio mixer for a game. Highlighted in red are all the groups of non voice data, highlighted in blue is a groups of NPC voice audio and highlighted in green is a groups of human voice data. If you do not have any groups like this then all you need to add is a single groups of "non-voice" and make sure all the game sounds play to this group. To make dissonance play to the "Human Voice" group you need to modify your playback prefab, simply drag the "Human Voice" group into the AudioSource of the prefab and all voice will play to that group.
+The yellow arrows indicate "sends", a send sends audio from one signal processor to another. At the receiving end of the sends is a "Duck Volume" effect, this reduces the volume of the group relative to the volume of the signal it receives via a send. The setup shown above has two volume ducks and three sends. The human voice sends to "Non-Voice" and "NPC Voice", this means than when a human speaks both NPC voices and other sounds get quieter. The "NPC Voice" has a single send to the "Non-Voice" group, this means that when an NPC speaks other sounds get quieter.
+As mentioned above you should be very cautious about applying any sound effects to the voice signal which are not for the purpose of enhancing the voice quality. However there are some situations where applying sound effects to voices could sound good, for example keeping allied communications clean, but adding a subtle radio distortion effect to enemy communications. Applying an effect is very simple, simply click add on the audio group and select the effect you want.
+Above is an example of an audio group with a distortion effect applied.
+ + + + + + + + + + + + + +Channel priority can be used to automatically mute low priority channels while high priority channels are speaking. For example muting the global voice chat room whilst someone in the team chat room is talking.
+There are 4 priority levels which can be set on a channel:
+1. Low
+2. Default
+3. Medium
+4. High
+
If a player is receiving voice from multiple sources then the sources with the highest priority will play and all others will be muted.
+There is another priority option: None
. If this used then the priority falls back to the default value for this player, which is set in DissonanceComms.PlayerPriority
. If None
is specified as the default player priority then Default
is used instead.
The priority of a channel can be defined in a number of ways. The inspector for the Voice Broadcast Trigger allows you to set the priority for voice sent with this trigger:
+In scripts you can change the priority for a VoiceBroadcastTrigger with the Priority
property:
var trigger = GetComponent<VoiceBroadcastTrigger>();
+
+trigger.Priority = ChannelPriority.High;
+
Alternatively if you are directly using channels from scripts instead of using the trigger components you can set the priority when the channel is created, and then modify it from the channel object at any time:
+var comms = GetComponent<DissonanceComms>();
+
+//Create the channel with an explicit priority
+var channel = comms.RoomChannels.Open("Room Name", priority: ChannelPriority.High);
+
+//Change the priority
+channel.Priority = ChannelPriority.Medium;
+
+//Close the channel
+channel.Dispose();
+
The playback volume can be set by the speaker per broadcast channel. This can be used to individually reduce the volume of a speaker. For example fading off voice over a small period of time when someone stops speaking.
+The broadcaster trigger component exposes 2 amplitude settings in the inspector; activation fade and trigger fade.
+Activation Fade
applies a fade in/out every time speech is started or stopped. For example every time push-to-talk is pressed/released. This setting should be used with care; applying any fade-in is inadvisable as it will almost certainly cause the start of what is being said to be cut off.
Volume Trigger Fade
applies only to broadcast triggers which are using physics based volume triggers. This fade will be applied every time the player enters or exits the trigger area.
Both faders have the same three controls:
+The Channel Volume
slider controls the amplitude which will be reached after the fade in time has passed. This is a direct multiplier applied to the audio, values between 0 to 1 will reduce playback amplitude, values between 1 to 2 will increase playback amplitude. If both faders are in use the values will be multiplied together.
The Fade In Time
slider controls how long it takes the playback amplitude to increase from zero (silent) to the Channel Volume
slider value.
The Fade Out Time
slider controls how long it takes the playback amplitude to decrease from the Channel Volume
slider value to zero (silent).
If you are controlling channels directly from your own scripts you can control volume on the channel object.
+var comms = GetComponent<DissonanceComms>();
+
+//Create a channel with explicit amplitude
+var channel = comms.RoomsChannels.Open("Room Name", amplitudeMultiplier: 0.5f);
+
+//At any time while the channel is open change the amplitude
+channel.AmplitudeMultiplier = 1.0f;
+
+//Close the channel
+channel.Dispose();
+
Video
+See this video about collider chat rooms.
+This tutorial will introduce volume triggers for transmission and receipt triggers, and how they can be used to implement localised chat rooms which allow users standing within the same area in your game world to chat with each other. This tutorial builds upon the setup in the Position tracking guide.
+++Position Tracking must be set up to allow Dissonance to track player positions for collider chat rooms to function.
+
A demo scene for this tutorial can be found in Dissonance/Demos
.
Imagine your game has multiple physical lobby rooms all connected to a central corridor. You decide that we want players to be able to speak to and hear the other players in whatever room they are in, and for this to dynamically update as they move from room to room.
+The first thing you will need to do is define the volume which represents your lobby room using a Unity trigger volume.
+Create a new game object called "LobbyChatRoom". Add a "Box Collider" to the game object, set it's size to the size of your lobby room and check "Is Trigger".
+To allow you to hear the users talking in the lobby chat room you will need to add a "Voice Receipt Trigger" to the game object. Unlike the "Global" chat channel in the quick start guide, here you will add this to the same game object as the "Box Collider".
+Enable "Trigger Activation" on the "Voice Receipt Trigger" to tell the script to only listen to the room when the player is within the collider attached to the game object.
+Right now, the receipt trigger is listening to the "Global" chat room, not the chat room for the lobby.
+On the inspector for the "Voice Receipt Trigger" click "Config Rooms" to go to Dissonance's room configuration. By default, Dissonance creates three chat rooms; "Global", "Red Team" and "Blue Team". Click "Add Room", and rename the new room to "Lobby".
+Now, go back to the receipt trigger, and change the selection in the "Chat Room" drop down to the new "Lobby" room.
+++Chat rooms can be named dynamically when configuring the triggers programmatically.
+
You now have a receiver configured to hear other people talking in the lobby room, but no one is saying anything! You need to add a broadcast trigger to the room.
+Add a "Voice Broadcast Trigger" script to the game object. Use a Channel Type of "Room", and choose the "Lobby" room.
+You now have a trigger box set up as a chat room. Players standing within the collider can talk to each other in the "Lobby" chat room, players who walk out of the volume will not be able to speak to or hear from the lobby room.
+ + + + + + + + + + + + + +By default Dissonance uses the BasicMicrophoneCapture
behaviour to record audio from a microphone using the Unity Microphone API and feed it into Dissonance. However this script is not ideal for all use cases. You can replace the microphone capture system in Dissonance by creating a new behaviour which implements IMicrophoneCapture
and adding the script to the same gameObject as the DissonanceComms
behaviour.
This tutorial will explain how to build a replacement capture system which streams audio from a file. before following the tutorial make sure you've read the reference docs so you understand what the IMicrophoneCapture
interface means.
First you need to create a new script with the IMicrophoneCapture interface on it and drop it onto the same GameObject as the DissonanceComms component.
+Here is an example script. If you run the scene with this you should see a single exception printed to the console coming from the StartCapture
method.
Now you need to properly start and stop the script without throwing exceptions.
+StartCapture
should return the format of the audio you will be providing. This must be mono (i.e. 1 channel) and any sample rate is acceptable (just use whatever is most convenient for you). If your capture system is not ready you can return null to prevent start-up. If you return a non-null value you must set IsRecording
to true
and you should set Latency
to an appropriate value. The Latency
value indicates an estimate of the time between sound physically hitting the microphone to submitting the audio to Dissonance, if you don't know this value leave it set to zero.
StopCapture
should do whatever you need to stop the underlying capture system. Once this is done you must set IsRecording
to false
.
Subscribe
and Unsubscribe
should simply keep a list of subscribers. You can implement this as a List<IMicrophoneSubscriber>
where Subscribe
just calls Add
and Unsubscribe
just calls Remove
and returns the value.
here is an example script. If you run the scene with this you should see exceptions printed to the console every frame coming from the UpdateSubscribers
method.
Now you need to stream some audio to Dissonance to stop the script throwing exceptions every frame.
+When IsRecording
is true
(i.e. after StartCapture
has been called and before StopCapture
has been called) your capture script must provide audio at approximately a realtime rate. Dissonance will try to handle slight "bumps" (e.g. audio arriving slightly early or late) but overall you must supply audio at the correct rate. For example the BasicMicrophoneCapture
script assumes that the microphone supplies audio at the correct rate, if you're reading from some kind of recording hardware this is probably a good assumption to make. The basic process for the microphone capture (which you may be able to replicate in your custom system) is:
For this step we won't interact with any hardware, instead we'll just submit silence to Dissonance at the correct rate.
+here is an example script which implements UpdateSubscribers by simply submitting silence at the correct rate. If you run this everything should work as expected in Dissonance (no exceptions), but of course you will not hear anything. This works by preallocating an array of 960 float
, which represents 20ms of audio at 48kHz sample rate. Every time 20ms have elapsed, the buffer is submitted to Dissonance. Note that time is measured using unscaledDeltaTime
, since audio needs to run at real time rate.
Finally we'll add a basic file streaming system, this will read an audio file and play it into Dissonance. For simplicity this will not handle decoding of the audio from any well know format, instead you should transcode the audio into raw samples. You can do this with ffmpeg:
+++ffmpeg.exe -re -i AudioFile.wav -f f32le -ar 48000 -ac 1 output.raw
+
here is an example script which implements this. The basic process is the same as the silence system:
+unscaledDeltaTime
Dissonance is built to be completely decoupled from the underlying networking system, this allows Dissonance to run on top of various different Unity networking assets (e.g. UNet, Forge, Photon etc) just by swapping which Dissonance network component is used. If none of the existing integrations work for your application then you may need to build a custom network integration.
+Dissonance includes a set of base classes which implement most of the networking logic for you:
+BaseCommsNetwork
- This is the main comms network component which you place into your scene. It manages the networking, starting and stopping Dissonance networking in response to network events.BaseServer
- This is a class created by the comms network component on one of the peers in the session. It manages the session as other peers join and leave. You will extend this class to implement your server logic.BaseClient
- This is a class created by the comms network component on all the peers in the session. It manages sending and receiving voice. You will extend this class to implement your client logic.Create the CustomCommsNetwork
class which extends BaseCommsNetwork
:
public class CustomCommsNetwork
+ : BaseCommsNetwork<
+ CustomServer, // A class which implements BaseServer
+ CustomClient, // A class which implements BaseClient
+ CustomPeer, // A struct which represents a network connection
+ Unit, // Nothing
+ Unit // Nothing
+ >
+{
+}
+
As you can see BaseCommsNetwork
requires 5 type parameters, which specify all the parts of your custom network integration:
CustomServer
- This is a class you will create which extends BaseServer
CustomClient
- This is a class you will create which extends BaseClient
CustomPeer
- This is a struct you will create which represents another peer in the network session.CustomClientParam
- This is a struct you will create which contains the data necessary create a network connection (e.g. an IP address). If your network does not need this (e.g. it is already running before Dissonance is started) then just pass Unit
.CustomServerParam
- This is a struct you will create which contains the data necessary host a network session (e.g. a port number). If your network does not need this (e.g. it is already running before Dissonance is started) then just pass Unit
.To create all these types define two new classes:
+class CustomCommsNetwork : BaseCommsNetwork {}
class CustomClient : BaseClient {}
class CustomServer : BaseServer {}
And three new structs:
+struct CustomPeer : IEquatable<CustomPeer> {}
struct CustomServerParam {}
struct CustomClientParam {}
Once you have done this you will have a large number of build errors like "abstract member [...] not implemented" - these are the things you must implement before the network integration can work.
+CustomCommsNetwork : BaseCommsNetwork
⚓︎In your custom comms network class you will need to create your custom client and custom server objects. Dissonance will call these methods when a server or client needs to be created. You shouldn't connect to the network in this method, simply create the objects.
+protected override CustomServer CreateServer(CustomServerParam details)
+{
+ return new CustomServer(this, details);
+}
+
+protected override CustomClient CreateClient(CustomClientParam details)
+{
+ return new CustomClient(this, details);
+}
+
If you need to do any other setup work for your network system you can override the Initialize
method.
protected override void Initialize()
+{
+ Network.DoSomethingImportant();
+
+ // Don't forget to call base.Initialize!
+ base.Initialize();
+}
+
Finally you need to start the network and tell it to connect, there are two main techniques for this. Some integrations (e.g. Mirror/Photon) have a network system which is already connected and Dissonance can use that, other integrations (e.g. WebRTC) host a network session specifically for voice chat.
+If you are using the first technique then you need to monitor the external network system and make sure that Dissonance is running in the same way as the network system by calling RunAsHost
, RunAsDedicatedServer
, RunAsClient
or Stop
. Here is how this is implemented in the HLAPI network integration:
// Check every frame
+protected override void Update()
+{
+ // Check if Dissonance is ready
+ if (IsInitialized)
+ {
+ // Check if the HLAPI is ready
+ var networkActive = NetworkManager.singleton.isNetworkActive && (NetworkServer.active || NetworkClient.active);
+ if (networkActive)
+ {
+ // Check what mode the HLAPI is in
+ var server = NetworkServer.active;
+ var client = NetworkClient.active;
+
+ // Check what mode Dissonance is in and if
+ // they're different then call the correct method
+ if (Mode.IsServerEnabled() != server || Mode.IsClientEnabled() != client)
+ {
+ // HLAPI is server and client, so run as a non-dedicated
+ // host (passing in the correct parameters)
+ if (server && client)
+ RunAsHost(Unit.None, Unit.None);
+
+ // HLAPI is just a server, so run as a dedicated host
+ else if (server)
+ RunAsDedicatedServer(Unit.None);
+
+ // HLAPI is just a client, so run as a client
+ else if (client)
+ RunAsClient(Unit.None);
+ }
+ }
+ else if (Mode != NetworkMode.None)
+ {
+ //Network is not active, make sure Dissonance is not active
+ Stop();
+ }
+ }
+
+ base.Update();
+}
+
If you are using the second technique then you will need to decide when to call RunAsHost
, RunAsDedicatedServer
, RunAsClient
or Stop
at the appropriate times.
CustomClient : BaseClient
⚓︎This class handles all of the client side logic of Dissonance, one of these will be created on every single peer in the session (including the host). There will be two build errors to fix in this class.
+Base class [...] doesn't contain a parameterless constructor
. To fix this simply add a constructor which passes a CustomCommsNetwork
to the base class:
public CustomClient(CustomCommsNetwork network, CustomClientParam details)
+ : base(network)
+{
+}
+
abstract member [...] not implemented
. There will be four of these errors to fix:
This will be called when you need to connect to the session. You should start connecting to the network when this is called, once you have finished connecting (which may take a long time) you must call base.Connected()
. In systems where there is already a network connection setup you may just immediately call base.Connected()
.
This will be called periodically to poll messages from the network system. Any packets you receive must be passed to base.NetworkPacketReceived
.
This method sends a packet to the server using a reliable and in order channel (e.g. TCP). Packets sent with this method are not latency sensitive but MUST arrive in order. If you detect that a reliable packet has been lost you should immediately stop the Dissonance network session.
+This methods sends a packet to the server using an unreliable and unordered channel (e.g. UDP). Packets sent with this method are extremely latency sensitive and must arrive as soon as possible or not at all. It is expected that some packets sent using this method may be lost or arrive out of order.
+CustomServer : BaseServer
⚓︎This class handles all the server side logic of Dissonance, one of these will be created on a single peer in the session and handles managing the session. In the basic configuration all voice data is relayed via this peer (see P2P section for details on how to avoid this). There will be five "abstract member [...] not implemented" errors to fix:
+public override void Connect()
⚓︎This will be called when you need to host a new network session (e.g. open a socket).
+public override void Disconnect()
⚓︎This will be called when you need to stop hosting a session (e.g. close the socket).
+protected override void ReadMessages()
⚓︎This will be called periodically to poll messages from the network system. Any packets you receive should be to base.NetworkPacketReceived
. The base.NetworkPacketReceived
method on the server requires an instance of your CustomPeer
type which indicates who sent the message.
protected override void SendReliable(CustomPeer destination, ArraySegment<byte> packet)
⚓︎This method sends a reliable packet to another peer using a reliable and in order channel (e.g. TCP). Packets sent with this method are not latency sensitive but MUST arrive in order. If you detect that a reliable packet has been lost you should immediately stop the Dissonance network session.
+If you need some extra information about who the packet is being sent to, you should add it to the CustomPeer
struct. Remember to go to the ReadMessages
method and add that information to the CustomPeer
struct you passed in to NetworkPacketReceived
.
protected override void SendUnreliable(CustomPeer destination, ArraySegment<byte> packet)
⚓︎This methods sends a packet to the server using an unreliable and unordered channel (e.g. UDP). Packets sent with this method are extremely latency sensitive and must arrive as soon as possible or not at all. It is expected that some packets sent using this method may be lost or arrive out of order.
+ClientDisconnected
⚓︎When a peer disconnects from the server you must call ClientDisconnected
to notify the server.
Finally you should create an inspector for your CustomCommsNetwork. Doing this is very simple, extend the BaseDissonanceCommsNetworkEditor
class and pass the same 5 generic types you defined above. Attach the CustomEditor
attribute to the class.
[CustomEditor(typeof(CustomCommsNetwork))]
+public class CustomCommsNetworkEditor
+ : BaseDissonnanceCommsNetworkEditor<
+ CustomCommsNetwork,
+ CustomServer,
+ CustomClient,
+ CustomConn,
+ CustomClientParam,
+ CustomServerParam
+ >
+{
+}
+
This will set up a basic inspector for you.
+At this point you should have a basic voice chat system functioning with your custom network. You should set up a test scene to test it. While the test scene is running check these things:
+CustomCommsNetwork
component.Mode
should shows "Server & Client", "Client" or "Server" depending on the mode this peer is running in.Connection Status
should show "Connected"DissonanceComms
component. It shows a list of client in the session, disconnect a client and make sure they disappear.The Dissonance networking system create a CustomClient
and a CustomServer
on the host machine (unless running a dedicated server). The server must be able to send and receive message to this local peer the same as any other peer. This can cause complications with some network systems which do not handle this kind of "loopback" correctly. You must also be careful to make sure you can distinguish messages from other peers to the host - make sure that they don't get processed by the host client object.
To handle this many of the Dissonance integrations have a special check for loopback. For example in the HLAPI integration there is a HlapiCommsNetwork:PreprocessPacketToClient
method which is given all packets sent from the server to the client, it checks if the packet is a loopback packet and if so it passes it directly to the client and HLAPI itself never has to deal with this packet.
internal bool PreprocessPacketToClient(ArraySegment<byte> packet,
+ HlapiConn destination)
+{
+ // No client means this can't be loopback
+ if (Client == null)
+ return false;
+
+ // HLAPI way to check if this is loopback.
+ if (NetworkManager.singleton.client.connection != destination.Connection)
+ return false;
+
+ // This is loopback!
+
+ // check that we have a valid local client,
+ // in cases of startup or in-progress shutdowns
+ if (Client != null)
+ {
+ // Don't immediately deliver the packet, add it to a queue and
+ // deliver it next frame. This prevents the local client from
+ // executing "within" the local server which can cause
+ // confusing stack traces.
+ _loopbackQueue.Add(packet.CopyTo(_loopbackBuffers.Get()));
+ }
+
+ return true;
+}
+
Currently the network integration you have built sends all packets to the server, which then relays them to other clients. If possible you may want to implement peer to peer voice communications. However, you should consider the bandwidth of your game before implementing peer to peer as it is not always beneficial to use it.
+In a non P2P setup voice follows a path like:
+Speaker -> Server -> Listener #1
+ -> Listener #2
+ -> Listener #3
+
In this case the bandwidth used by the speaker is 1 voice stream ~20 kilobits/second
. The bandwidth used by each listener is 1 voice stream ~20 kilobits/second
. The bandwidth used by the server is (Speakers + Listeners) * Bandwidth = (1 + 3) * ~20 = ~80 kilobits/second
. In this setup the bandwidth of each client (speaker or listener) is the minimum possible. If your game uses client devices with tight bandwidth limits this may be the best setup.
In a P2P setup the voice follows a different path:
+Speaker -> Listener #1
+ -> Listener #2
+ -> Listener #3
+
The bandwidth on the server has been reduced (to zero). However, the total bandwidth for the speaker client is now Listeners * Bandwidth = 3 * ~20 = ~60 kilobits/second
.
If you have decided to use peer to peer you need to modify your CustomClient
class. Wherever you call NetworkReceivePacket
you should modify it to capture the return value of the method call, if the value is not call ReceiveHandshakeP2P
with it and a CustomPeer
object for the sender of the message. For example in the Photon Unity Networking (PUN) integration the receiving code is implemented like this:
// This event is called by PUN when a packet arrives
+public void PacketDelivered(byte eventcode, ArraySegment<byte> data,
+ int senderid)
+{
+ // Skip events we don't care about
+ if (eventcode != _network.EventCodeToClient)
+ return;
+
+ // Receive the packet, capture return value
+ var id = NetworkReceivedPacket(data);
+
+ // If the value is not null
+ // pass to handshake method with the `senderid` of this packet
+ if (id.HasValue)
+ ReceiveHandshakeP2P(id.Value, senderid);
+}
+
You now need to implement two more methods for sending packets:
+SendReliableP2P(List<ClientInfo<TPeer?>> destinations, ArraySegment<byte> packet)
⚓︎SendUnreliableP2P(List<ClientInfo<TPeer?>> destinations, ArraySegment<byte> packet)
⚓︎These methods send a packet to a list of destinations. You should send the packet to as many of these destinations as possible and remove them from the list. Once you are done call the base method with the remaining items in the list, they will be sent via the server as usual. For example the PUN implementation of this is:
+private void SendUnreliableP2P(IList<ClientInfo<int?>> destinations,
+ ArraySegment<byte> packet)
+{
+ // Build a list of destinations we know how to send to
+ // i.e. have a non-null Connection object
+ var dests = new List<int>();
+ foreach (var item in destinations)
+ if (item.Connection.HasValue)
+ dests.Add(item.Connection);
+
+ // Remove all the ones we can send to from the input list
+ destinations.RemoveAll(dests);
+
+ // Send the packets to the list of destinations through PUN
+ _network.Send(packet, dests, reliable: false);
+
+ // Call base to do server relay for all the peers we don't
+ // know how to contact
+ base.SendUnreliableP2P(destinations, packet);
+}
+
Because there is a fall-back mechanism you can mix P2P and non-P2P packets as necessary. For example you start by sending everything via the server, establish a p2p connection between clients and if it fails (e.g. due to firewall or NAT settings) you can simply keep on sending via relay for that specific pair of clients. Alternatively you could monitor client bandwidth and send via P2P if there is spare bandwidth - falling back to server relay if the client is close to reaching it's bandwidth limit.
+Finally you need to start establishing p2p connections. Override the OnServerAssignedSessionId
method, when this is called you should send a "handshake" packet to every peer you know how to contact directly. This will tell those peers that you are available for p2p communication. For example in the PUN integration this is implemented as:
protected override void OnServerAssignedSessionId(uint session, ushort id)
+{
+ base.OnServerAssignedSessionId(session, id);
+
+ // Create the handshake packet to send
+ var packet = new ArraySegment<byte>(WriteHandshakeP2P(session, id));
+
+ // Send this to everyone else in the session through PUN
+ _network.Send(packet, _network.EventCodeToClient, new RaiseEventOptions {
+ Receivers = ReceiverGroup.Others,
+ }, true);
+}
+
This tutorial will explain how to write a scripts necessary to extend the Dissonance position tracking system to more advanced scenarios. The basics of position tracking are explained in this tutorial.
+Dissonance tracks the position of players through a behaviour which implements the IDissonancePlayer interface. This interface exposes the necessary information for Dissonance to play back voices in the correct locations.
+public interface IDissonancePlayer
+{
+ string PlayerId { get; }
+ Vector3 Position { get; }
+ Quaternion Rotation { get; }
+ NetworkPlayerType Type { get; }
+}
+
Once you have implemented these four properties on your tracker you must register it with Dissonance, To do this simply call FindObjectOfType<DissonanceComms>().TrackPlayerPosition(this);
at some point after tracking has started. Once this tracker is no longer in use you must unregister your tracker from Dissonance, to do this simply call FindObjectOfType<DissonanceComms>().StopTracking(this);
.
This is the ID of the player which this object represents. For the local player this is the value in the LocalPlayerName
property on your DissonanceComms
object. This value should be synchronised across the network. How this works will depend upon your networking system. For example here is how the HLAPI integration does it:
private string _playerId;
+
+// This property implements the PlayerId part of the interface
+public string PlayerId { get { return _playerId; } }
+
+// When the network system starts this behaviour, this method runs
+public override void OnStartAuthority()
+{
+ base.OnStartAuthority();
+
+ // Get the local DissonanceComms object
+ var comms = FindObjectOfType<DissonanceComms>();
+
+ // Call set player name, to sync the name across all peers
+ SetPlayerName(FindObjectOfType<DissonanceComms>().LocalPlayerName);
+
+ // Make sure that if the local name is changed the
+ // changed is synced the change across the network
+ comms.LocalPlayerNameChanged += SetPlayerName;
+}
+
+private void SetPlayerName(string playerName)
+{
+ CmdSetPlayerName(playerName);
+}
+
+// This is a "Command" which means that it is run on *all* peers when run.
+// This is does the actual synchronisation of the name across the network.
+[Command]
+private void CmdSetPlayerName(string playerName)
+{
+ _playerId = playerName;
+}
+
These properties supply the location information which is used by Dissonance to properly play positional audio. If the behaviour is attached to the object which represents the player position then implementing this is trivial:
+public Vector3 Position
+{
+ get { return transform.position; }
+}
+
+public Quaternion Rotation
+{
+ get { return transform.rotation; }
+}
+
If you wanted to represent a slightly different location (e.g. your player is made of multiple objects, one of which represents the head) then you would need to change the implementation of the properties slightly:
+private MonoBehaviour _head;
+
+public Vector3 Position
+{
+ get { return _head.transform.position; }
+}
+
+public Quaternion Rotation
+{
+ get { return _head.transform.rotation; }
+}
+
+// When this behaviour is enabled, find the other object we want to get the position from
+public void OnEnable()
+{
+ _head = GetEntityWhichRepresentsTheHead();
+}
+
This indicates to Dissonance if this object represents the (singular) local player or one of the (multiple) remote players. How you implement this property depends upon your network system. Using the HLAPI integration as an example again:
+public NetworkPlayerType Type
+{
+ get { return isLocalPlayer ? NetworkPlayerType.Local : NetworkPlayerType.Remote; }
+}
+
This assumes that the component is attached to a player object. Therefore if it is not the local player then it must be a remote player.
+ + + + + + + + + + + + + +Video
+See this video about direct player messaging.
+This tutorial will explain how to broadcast a voice message directly to a specific player, rather than to all players in a room. There are two ways to achieve this.
+To transmit to a specific player, change the Channel Type option on the VoiceBroadcastTrigger
to "Player", then give the player name for Recipient Player Name.
To change the targetted player at run time modify the PlayerId
field of the VoiceBroadcastTrigger
behaviour.
GetComponent<VoiceBroadcastTrigger>().PlayerId = "TheNewRemotePlayerName";
+
If you have set up Dissonance position tracking in your game then the game objects which represent your players will all have a behaviour on them which implements the IDissonancePlayer
interface. For example if you are using the Forge Networking integration this is the ForgePlayer
component.
To transmit to this player change the Channel Type option on a VoiceBroadcastTrigger
attached to the same game object to "Self".
This tutorial will explain how to use the channel API for fine grained control over when and where voice is sent. Channels are the system which are used internally by the transmit triggers which come with Dissonance. Direct use of channels requires writing scripts.
+Using a channel is quite simple - when a channel is open voice will be sent to whoever is appropriate. A single client may have multiple channels open at once, potentially all sending to the same remote player. The remote playback system will correctly handle this situation and will only play the voice back once. There are two kinds of channels, which correspond to two different types of receivers.
+When a player channel is opened the local voice is sent to the player associated with that channel. The receiving player does not need to take any action to receive the voice. This is accessed through the PlayerChannels
property on the DissonanceComms object.
DissonanceComms comms;
+PlayerChannel channel = comms.PlayerChannels.Open(string playerId, bool positional, ChannelPriority priority);
+
When a room channel is opened no voice is sent anywhere by default. Receiving players must take an action to indicate that they wish to receive the voice (i.e. join the room). This is accessed through the RoomChannels
property on the DissonanceComms object (to open a sending channel) and the Rooms
property (to control receipt).
DissonanceComms comms;
+RoomChannel channel = comms.RoomChannels.Open(string roomId, bool positional, ChannelPriority priority);
+
DissonanceComms comms;
+comms.Rooms.Join(string roomId);
+
When you open a channel you receive back an object which represents that channel. This object allows you to control the channel while it is still open.
+bool IsOpen { get; }
This property indicates if the channel is open. A channel will remain open until you explicitly close it.
+bool Positional { get; set; }
This property indicates if this channel should be played back with positional data. You may change this value at any time.
+When a channel is using positional audio the remote playeback system will position the playback in space so that it sounds like the player voice is coming from the correct direction. If a channel is not using positional audio the voice will be non-directional.
+ChannelPriority Priority { get; set; }
This property indicates the priority associated with data sent over this channel. You may change this value at any time.
+When a receiver is receiving multiple channels simultaneously it will only play the highest priority channel(s) it is currently receiving.
+Dispose()
Close the channel.
+ + + + + + + + + + + + + +A global chat room is just a single room which all users talk to and listen to. This is a very simple system to create using Dissonance.
+Both components will activate when the scene loads (on each different computer in the network session) and all players will be in the room.
+Find out more about the broadcast trigger and the receipt trigger.
+ + + + + + + + + + + + + +The playback prefab is how Dissonance plays the audio signal from each player. A copy of the prefab is instantiated for each player and then moved into the correct position for positional audio to work. Creating your own playback prefab allows you to customise the AudioSource settings used for voice or attach your own script to the prefab. To use a custom prefab drag the prefab into the Playback Prefab
field on the Dissonance Comms component inspector.
If no prefab is set Dissonance will automatically use a default prefab.
+The playback prefab must include a VoicePlayback
component (part of Dissonance).
You may also attach a Unity AudioSource
component, in which case you can adjust some of the settings to change how voice will be played back. However, the following settings will be overwritten by Dissonance:
When writing your own scripts to attach to the playback prefab it is important to remember that the lifetime is managed entirely by Dissonance. Prefab instances are recycled to reduce the amount of garbage created. This means that your custom script attached to the prefab must be able to handle being re-assigned from one player to another.
+When there are no instances available to use, a new one is created:
+When the player for an instance leaves the prefab is recycled:
+When another player joins an instance is retrieved and re-used:
+To handle this in your script simply use the normal Unity lifecycle events:
+void Awake()
+{
+ // This only runs once. Use this to perform one-time setup.
+
+ // e.g. Find some Dissonance components
+ _playbackComponent = GetComponent<VoicePlayback>();
+ _dissonanceComms = FindObjectOfType<DissonanceComms>();
+}
+
+void OnEnable()
+{
+ // This runs every time the script is activated. Use this to perform per-player setup
+
+ // e.g. find information about this player
+ _playerState = _dissonanceComms.FindPlayer(_playbackComponent.PlayerName);
+}
+
+void Update()
+{
+ // This will run every frame while the script is active
+}
+
+void OnDisable()
+{
+ // This runs every time the script is deactivated. Use this to perform per-player cleanup
+
+ // e.g. Remove the things which were initialised in OnEnable
+ _playerState = null;
+}
+
Dissonance offers an easy to use API for finding out information about other players in the session.
+There are two ways to discover who is in the Dissonance session - events and polling. To get a list of players currently in the session, you can access the Players
property on the DissonanceComms object:
var comms = FindObjectOfType<DissonanceComms>();
+foreach (var player in comms.Players)
+{
+ Debug.Log("Player " + player.Name + " is in the game");
+}
+
This will give you a set of VoicePlayerState objects (including one for the local player). These objects will stay valid forever and will be updated with new information as necessary.
+Dissonance also exposes some events which will get invoked when certain things happen, for example a new player joining the session.
+var comms = FindObjectOfType<DissonanceComms>();
+comms.OnPlayerJoinedSession += player => {
+ Debug.Log("Player " + player.Name + " Joined session");
+}
+
+comms.OnPlayerLeftSession += player => {
+ Debug.Log("Player " + player.Name + " Left session");
+}
+
The player
objects passed to the event handlers here are VoicePlayerState objects which expose a lot of useful data about the players such as if they are currently talking and a live readout of the amplitue.
Video
+See this video about position tracking.
+This tutorial will explain how to configure your project to track the position of players. This is required for 3D positional audio playback of remote player voice chat and collider trigger support for VoiceBroadcastTrigger
and VoiceReceiptTrigger
. There are some additional steps required for this to work with Photon BOLT, if you are not using that network integration instead see the more general position tracking tutorial.
First you need to modify the bolt state which you use for your player; add a new string property called DissonancePlayerId
.
Now you need to create a new script which will use this state. Dissonance includes a base class which does most of the work for you.
+using Dissonance.Integrations.PhotonBolt;
+
+public class DissonancePlayerTracking
+ : BoltPlayer< ??? > // <-- See below
+{
+ public DissonancePlayerTracking()
+ : base("DissonancePlayerId", state => state.DissonancePlayerId, (state, id) => state.DissonancePlayerId = id)
+ {
+ }
+}
+
The ???
in the example needs to be replaced with the state which bolt has generated for your player.
To setup position tracking you simply need to attach the DissonancePlayerTracking component to the game object which represents each player.
+Ensure that this component is attached to all entities in the scene which represent a player (both the local player and all remote players). If you have a prefab which is used to construct your players you can simply attach the behaviour to this prefab.
+When positional audio is enabled the voice from remote players will sound like it is coming from the correct position. To enable this simply tick the "use positional data" checkbox on the voice broadcast trigger.
+Voice broadcaster triggers and voice receipt triggers can be configured to only send/receive audio when the local player is inside a certain volume. See this tutorial for how to achieve this.
+When position tracking is enable transmitting to a specific player is simplified. If a Voice Broadcast Trigger
is attached to a player entity it can be configured to transmit to the player represented by the game object. See this tutorial for details.
Video
+See this video about position tracking.
+This tutorial will explain how to configure your project to track the position of players. This is required for 3D positional audio playback of remote player voice chat and collider trigger support for VoiceBroadcastTrigger
and VoiceReceiptTrigger
.
To setup position tracking you need to attach a single behaviour to all your player gameObjects. The behaviour can be found in the folder for the network integration you are using, for example for HLAPI it is located at Assets/Dissonance/Integrations/UNet_HLAPI/HlapiPlayer.cs
. Ensure that this component is attached to all gameObjects in the scene which represent a player (the local player and all remote players). If you have a prefab which is used to construct your players you can simply attach the behaviour to this prefab.
Some network integrations do not include player tracking scripts. In this case you will need to implement it yourself. View the documentation for custom position tracking here.
+Dissonance does not send any extra data across the network when position tracking is enabled - instead it relies on your game objects already being in the right place on every client and simply plays the audio from wherever they are in space. Enabling position tracking does not use any extra bandwidth.
+When positional audio is enabled the voice from remote players will sound like it is coming from the correct position. To enable this simply tick the "use positional data" checkbox on the voice broadcast trigger.
+Voice broadcaster triggers and voice receipt triggers can be configured to only send/receive audio when the local player is inside a certain volume. See this tutorial for how to achieve this.
+When position tracking is enable transmitting to a specific player is simplified. If a Voice Broadcast Trigger
is attached to a player entity it can be configured to transmit to the player represented by the game object. See this tutorial for details.
Dissonance includes two ways to set up proximity chat: Grid Proximity
and Collider Proximity
.
Grid Proximity uses spatial hashing to place nearby players all into the same room. This is simple to setup and scales very well even to a large number of players (no additional bandwidth is consumed no matter how many players are in proximity).
+Collider proximity gives much more exact control over proximity - players are considered to be in proximity based on a Unity collider trigger which can be dynamically changed in shape and sizes by your scripts. Use this if you need very precise control of proximity chat.
+Before any kind of proximity chat can work you must set up position tracking to tell Dissonance where each player is. Each player in the scene should have a player tracker attached to them, the transform of this tracker is the position that Dissonance uses as the position of that player.
+Voice Proximity Broadcast Trigger
to the scene. This controls when voice will be sent.Chat Room
for this trigger.Activation Mode
which decides when voice should be transmitted.Voice Proximity Receipt Trigger
to the scene. This controls when voice will be received.Chat Room
to the same value as the broadcast trigger.++is still supported, but should be considered deprecated. Prefer to use Grid Proximity where possible.
+
A more precise system of proximity chat can be set up by combining direct player transmission and collider chat rooms. Each player in your game should have a voice broadcast trigger attached to it (set to broadcast directly to that player) and configured as a collider chat room with a suitable collision volume (e.g. a large sphere). When two players stand close to one another they will enter each others transmission trigger volumes and begin talking to one another.
+In this example channel type is set to "Self", this means the broadcast trigger searches for one of the Dissonance position tracking behaviours and transmits directly to the player which that represents.
+ + + + + + + + + + + + + +Video
+See this video about push to talk activation.
+When a broadcast trigger is in Push-To-Talk (PTT) mode voice will only be transmitted while the "talk" button is pressed.
+To set a broadcast trigger to use PTT simply change the "Activation Mode" to "Push To Talk" and choose which input axis must be pressed for voice to be transmitted. See the Unity documentation for how to define a new input axis.
+Instead of using an input axis you may want to trigger push-to-talk from a UI button.
+Activation Mode
to Open
. This constantly transmits voice.Mute
to true
. This prevents any voice from being transmitted.ToggleMute
method. This inverts the Mute
setting each time it is called.With this setup clicking the UI button once will unmute the trigger and speech will be transmitted, clicking the button again will mute the trigger and stop speech from being transmitted.
+ + + + + + + + + + + + + +There are several options for controlling speech from scripts, depending on what you want to achieve.
+If you want to completely prevent a player from speaking you can set the IsMuted
property on the DissonanceComms
component to true.
DissonanceComms comms;
+comms.IsMuted = true;
+
+// User cannot speak
+
+comms.IsMuted = false;
+
+// User can speak
+
If you want to completely prevent the local player from hearing any speech you can set the IsDeafened
property on the DissonanceComms
component to true.
DissonanceComms comms;
+comms.IsDeafened = true;
+
+// User cannot hear
+
+comms.IsDeafened = false;
+
+//User can hear
+
If you want to locally mute a remote player (prevent yourself from hearing them talk) you can set the IsLocallyMuted
property on their player object.
DissonanceComms comms;
+var player = comms.FindPlayer(player_id);
+player.IsLocallyMuted = true;
+
+// You will not hear user when they speak
+
+player.IsLocallyMuted = false;
+
+// You will hear user when they speak
+
The VoiceBroadcastTrigger
is the normal way to trigger voice transmission. Simply disabling this component will prevent it from triggering any voice transmissions until it is enabled again.
VoiceBroadcastTrigger trigger;
+
+trigger.enabled = false;
+
+// This trigger cannot send voice
+
+trigger.enabled = true;
+
+// This trigger can send voice
+
The most general way to control player voice transmission from scripts is to open and close channels, for more information about channels see this tutorial. To start talking open a channel, to stop talking dispose the channel:
+DissonanceComms comms;
+
+var channel = comms.RoomChannels.Open("Room ID", true, ChannelPriority.Default);
+
+//Player speech will be transmitted to the room named "Room ID"
+
+channel.Dispose();
+
+//Player speech will no longer be transmitted by this channel
+
Since Dissonance 7.0.2 Dissonance no longer supports external spatialization plugins. A change in how Unity applies spatialization has made support for this feature impossible. Unity have acknowledged this bug and we will add support back into Dissonance as soon as possible.
+ + + + + + + + + + + + + +A team chat room is a set of rooms where all users on the same team talk to and listen to the same room. To create a setup like this requires a small amount of scripting as it depends on how your game defines what a "team" actually is!
+To create a team chat setup first create multiple pairs of broadcasters and receivers, one for each team.
+With the setup as shown here every player will speak and and listen to every team channel. To fix this add a unique token to each pair of triggers (e.g. the team name), once you have done this none of the triggers will activate and no one will speak or listen to any of the team rooms.
+Finally, when you create a player and assign them to a team run a script which adds the appropriate token to the local player. Exactly how this code works depends a lot on exactly how your game defines what a team is, feel free to ask for help. Here is some example code:
+void OnAssignPlayerToTeam(string teamName)
+{
+ //Find local comms object
+ var comms = FindObjectOfType<DissonanceComms>();
+
+ //Sanity check that we found what we're looking for
+ if (comms == null)
+ {
+ Debug.Log("Cannot find voice components for team '{0}'", teamName);
+ return;
+ }
+
+ //Add the token for the team
+ comms.AddToken(teamName);
+}
+
If you want to still have a global voice chat room and have per team chat rooms this can be achieved by simply having the normal global chat room configuration with a different activation mode (e.g. a different push-to-talk input axis, such as 'v' to team chat and 'b' to global chat).
+Dissonance allows text chat messages to be routed through the network to the same players and chat rooms used by voice. This tutorial will demonstrate the APIs provided to send and receive text chat messages with Dissonance.
+// get the DissonanceComms script from the Dissonance game object
+var dissonance = GetComponent<DissonanceComms>();
+
+// send a text message to the Party chat channel
+dissonance.Text.Send("Party", "Who just pulled the boss?")
+
// get the DissonanceComms script from the Dissonance game object
+var dissonance = GetComponent<DissonanceComms>();
+
+// send a text message to a specific player
+dissonance.Text.Whisper("hunter", "Did you just pull the boss?")
+
Dissonance will only send you text messages if they are directly addressed to you or to a room which you are listening to. To listen to a room you can use a voice receipt trigger voice receipt trigger, or directly use the Dissonance API from scripts to enter the room.
+// get the DissonanceComms script from the Dissonance game object
+var dissonance = GetComponent<DissonanceComms>();
+
+//If necessary, enter a room using the scripting API
+dissonance.Rooms.Join("Room Name");
+
+dissonance.Text.MessageRecieved += message => {
+
+ //This code will run every time you receive a text message
+
+ var format = "[{0}] {1}: {2}";
+ if (message.RecipientType == ChannelType.Player)
+ format = "{1} whispers: {2}";
+
+ chatLog.Write(string.Format(format, message.Recipient, message.Sender, message.Message));
+};
+
Dissonance includes a mechanism for directly accessing the stream of recorded audio, this can be used to drive features such as recording the mic input to file or passing it through a Speech-To-Text system.
+There are two ways to implement this, direct low-level access through IMicrophoneSubscriber and easier access through BaseMicrophoneSubscriber.
+Once you have created a script which uses either of these systems you must register it to receive data from Dissonance by calling FindObjectOfType<DissonanceComms>().SubscribeToRecordedAudio(your_script)
and passing in your script as your_script
.
This provides easy access to the microphone audio stream. This script handles capturing and buffering the data, it is delivered in batches on the main thread.
+This provides direct access to the microphone audio stream as directly as possible. Audio is delivered to the ReceiveMicrophoneData
method on the background audio processing thread not the main thread. Only use this if BaseMicrophoneProvider
does not meet your use case!