The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). Additional documentation and release notes are available at [Multiplayer Documentation](https://docs-multiplayer.unity3d.com). ## [1.3.1] - 2023-03-27 ### Added - Added detection and graceful handling of corrupt packets for additional safety. (#2419) ### Changed - The UTP component UI has been updated to be more user-friendly for new users by adding a simple toggle to switch between local-only (127.0.0.1) and remote (0.0.0.0) binding modes, using the toggle "Allow Remote Connections" (#2408) - Updated `UnityTransport` dependency on `com.unity.transport` to 1.3.3. (#2450) - `NetworkShow()` of `NetworkObject`s are delayed until the end of the frame to ensure consistency of delta-driven variables like `NetworkList`. - Dirty `NetworkObject` are reset at end-of-frame and not at serialization time. - `NetworkHide()` of an object that was just `NetworkShow()`n produces a warning, as remote clients will _not_ get a spawn/despawn pair. - Renamed the NetworkTransform.SetState parameter `shouldGhostsInterpolate` to `teleportDisabled` for better clarity of what that parameter does. (#2228) - Network prefabs are now stored in a ScriptableObject that can be shared between NetworkManagers, and have been exposed for public access. By default, a Default Prefabs List is created that contains all NetworkObject prefabs in the project, and new NetworkManagers will default to using that unless that option is turned off in the Netcode for GameObjects settings. Existing NetworkManagers will maintain their existing lists, which can be migrated to the new format via a button in their inspector. (#2322) ### Fixed - Fixed issue where changes to a layer's weight would not synchronize unless a state transition was occurring.(#2399) - Fixed issue where `NetworkManager.LocalClientId` was returning the `NetworkTransport.ServerClientId` as opposed to the `NetworkManager.m_LocalClientId`. (#2398) - Fixed issue where a dynamically spawned `NetworkObject` parented under an in-scene placed `NetworkObject` would have its `InScenePlaced` value changed to `true`. This would result in a soft synchronization error for late joining clients. (#2396) - Fixed a UTP test that was failing when you install Unity Transport package 2.0.0 or newer. (#2347) - Fixed issue where `NetcodeSettingsProvider` would throw an exception in Unity 2020.3.x versions. (#2345) - Fixed server side issue where, depending upon component ordering, some NetworkBehaviour components might not have their OnNetworkDespawn method invoked if the client side disconnected. (#2323) - Fixed a case where data corruption could occur when using UnityTransport when reaching a certain level of send throughput. (#2332) - Fixed an issue in `UnityTransport` where an exception would be thrown if starting a Relay host/server on WebGL. This exception should only be thrown if using direct connections (where WebGL can't act as a host/server). (#2321) - Fixed `NetworkAnimator` issue where it was not checking for `AnimatorStateTtansition.destinationStateMachine` and any possible sub-states defined within it. (#2309) - Fixed `NetworkAnimator` issue where the host client was receiving the ClientRpc animation updates when the host was the owner.(#2309) - Fixed `NetworkAnimator` issue with using pooled objects and when specific properties are cleaned during despawn and destroy.(#2309) - Fixed issue where `NetworkAnimator` was checking for animation changes when the associated `NetworkObject` was not spawned.(#2309) - Corrected an issue with the documentation for BufferSerializer (#2401)
297 lines
12 KiB
C#
297 lines
12 KiB
C#
using System;
|
|
using Unity.Collections;
|
|
using Unity.Collections.LowLevel.Unsafe;
|
|
using Unity.Networking.Transport;
|
|
|
|
namespace Unity.Netcode.Transports.UTP
|
|
{
|
|
/// <summary>Queue for batched messages meant to be sent through UTP.</summary>
|
|
/// <remarks>
|
|
/// Messages should be pushed on the queue with <see cref="PushMessage"/>. To send batched
|
|
/// messages, call <see cref="FillWriterWithMessages"/> or <see cref="FillWriterWithBytes"/>
|
|
/// with the <see cref="DataStreamWriter"/> obtained from <see cref="NetworkDriver.BeginSend"/>.
|
|
/// This will fill the writer with as many messages/bytes as possible. If the send is
|
|
/// successful, call <see cref="Consume"/> to remove the data from the queue.
|
|
///
|
|
/// This is meant as a companion to <see cref="BatchedReceiveQueue"/>, which should be used to
|
|
/// read messages sent with this queue.
|
|
/// </remarks>
|
|
internal struct BatchedSendQueue : IDisposable
|
|
{
|
|
// Note that we're using NativeList basically like a growable NativeArray, where the length
|
|
// of the list is the capacity of our array. (We can't use the capacity of the list as our
|
|
// queue capacity because NativeList may elect to set it higher than what we'd set it to
|
|
// with SetCapacity, which breaks the logic of our code.)
|
|
private NativeList<byte> m_Data;
|
|
private NativeArray<int> m_HeadTailIndices;
|
|
private int m_MaximumCapacity;
|
|
private int m_MinimumCapacity;
|
|
|
|
/// <summary>Overhead that is added to each message in the queue.</summary>
|
|
public const int PerMessageOverhead = sizeof(int);
|
|
|
|
internal const int MinimumMinimumCapacity = 4096;
|
|
|
|
// Indices into m_HeadTailIndicies.
|
|
private const int k_HeadInternalIndex = 0;
|
|
private const int k_TailInternalIndex = 1;
|
|
|
|
/// <summary>Index of the first byte of the oldest data in the queue.</summary>
|
|
private int HeadIndex
|
|
{
|
|
get { return m_HeadTailIndices[k_HeadInternalIndex]; }
|
|
set { m_HeadTailIndices[k_HeadInternalIndex] = value; }
|
|
}
|
|
|
|
/// <summary>Index one past the last byte of the most recent data in the queue.</summary>
|
|
private int TailIndex
|
|
{
|
|
get { return m_HeadTailIndices[k_TailInternalIndex]; }
|
|
set { m_HeadTailIndices[k_TailInternalIndex] = value; }
|
|
}
|
|
|
|
public int Length => TailIndex - HeadIndex;
|
|
public int Capacity => m_Data.Length;
|
|
public bool IsEmpty => HeadIndex == TailIndex;
|
|
public bool IsCreated => m_Data.IsCreated;
|
|
|
|
/// <summary>Construct a new empty send queue.</summary>
|
|
/// <param name="capacity">Maximum capacity of the send queue.</param>
|
|
public BatchedSendQueue(int capacity)
|
|
{
|
|
// Make sure the maximum capacity will be even.
|
|
m_MaximumCapacity = capacity + (capacity & 1);
|
|
|
|
// We pick the minimum capacity such that if we keep doubling it, we'll eventually hit
|
|
// the maximum capacity exactly. The alternative would be to use capacities that are
|
|
// powers of 2, but this can lead to over-allocating quite a bit of memory (especially
|
|
// since we expect maximum capacities to be in the megabytes range). The approach taken
|
|
// here avoids this issue, at the cost of not having allocations of nice round sizes.
|
|
m_MinimumCapacity = m_MaximumCapacity;
|
|
while (m_MinimumCapacity / 2 >= MinimumMinimumCapacity)
|
|
{
|
|
m_MinimumCapacity /= 2;
|
|
}
|
|
|
|
m_Data = new NativeList<byte>(m_MinimumCapacity, Allocator.Persistent);
|
|
m_HeadTailIndices = new NativeArray<int>(2, Allocator.Persistent);
|
|
|
|
m_Data.ResizeUninitialized(m_MinimumCapacity);
|
|
|
|
HeadIndex = 0;
|
|
TailIndex = 0;
|
|
}
|
|
|
|
public void Dispose()
|
|
{
|
|
if (IsCreated)
|
|
{
|
|
m_Data.Dispose();
|
|
m_HeadTailIndices.Dispose();
|
|
}
|
|
}
|
|
|
|
/// <summary>Write a raw buffer to a DataStreamWriter.</summary>
|
|
private unsafe void WriteBytes(ref DataStreamWriter writer, byte* data, int length)
|
|
{
|
|
#if UTP_TRANSPORT_2_0_ABOVE
|
|
writer.WriteBytesUnsafe(data, length);
|
|
#else
|
|
writer.WriteBytes(data, length);
|
|
#endif
|
|
}
|
|
|
|
/// <summary>Append data at the tail of the queue. No safety checks.</summary>
|
|
private void AppendDataAtTail(ArraySegment<byte> data)
|
|
{
|
|
unsafe
|
|
{
|
|
var writer = new DataStreamWriter((byte*)m_Data.GetUnsafePtr() + TailIndex, Capacity - TailIndex);
|
|
|
|
writer.WriteInt(data.Count);
|
|
|
|
fixed (byte* dataPtr = data.Array)
|
|
{
|
|
WriteBytes(ref writer, dataPtr + data.Offset, data.Count);
|
|
}
|
|
}
|
|
|
|
TailIndex += sizeof(int) + data.Count;
|
|
}
|
|
|
|
/// <summary>Append a new message to the queue.</summary>
|
|
/// <param name="message">Message to append to the queue.</param>
|
|
/// <returns>
|
|
/// Whether the message was appended successfully. The only way it can fail is if there's
|
|
/// no more room in the queue. On failure, nothing is written to the queue.
|
|
/// </returns>
|
|
public bool PushMessage(ArraySegment<byte> message)
|
|
{
|
|
if (!IsCreated)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
// Check if there's enough room after the current tail index.
|
|
if (Capacity - TailIndex >= sizeof(int) + message.Count)
|
|
{
|
|
AppendDataAtTail(message);
|
|
return true;
|
|
}
|
|
|
|
// Move the data at the beginning of of m_Data. Either it will leave enough space for
|
|
// the message, or we'll grow m_Data and will want the data at the beginning anyway.
|
|
if (HeadIndex > 0 && Length > 0)
|
|
{
|
|
unsafe
|
|
{
|
|
UnsafeUtility.MemMove(m_Data.GetUnsafePtr(), (byte*)m_Data.GetUnsafePtr() + HeadIndex, Length);
|
|
}
|
|
|
|
TailIndex = Length;
|
|
HeadIndex = 0;
|
|
}
|
|
|
|
// If there's enough space left at the end for the message, now is a good time to trim
|
|
// the capacity of m_Data if it got very large. We define "very large" here as having
|
|
// more than 75% of m_Data unused after adding the new message.
|
|
if (Capacity - TailIndex >= sizeof(int) + message.Count)
|
|
{
|
|
AppendDataAtTail(message);
|
|
|
|
while (TailIndex < Capacity / 4 && Capacity > m_MinimumCapacity)
|
|
{
|
|
m_Data.ResizeUninitialized(Capacity / 2);
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
// If we get here we need to grow m_Data until the data fits (or it's too large).
|
|
while (Capacity - TailIndex < sizeof(int) + message.Count)
|
|
{
|
|
// Can't grow m_Data anymore. Message simply won't fit.
|
|
if (Capacity * 2 > m_MaximumCapacity)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
m_Data.ResizeUninitialized(Capacity * 2);
|
|
}
|
|
|
|
// If we get here we know there's now enough room for the message.
|
|
AppendDataAtTail(message);
|
|
return true;
|
|
}
|
|
|
|
/// <summary>
|
|
/// Fill as much of a <see cref="DataStreamWriter"/> as possible with data from the head of
|
|
/// the queue. Only full messages (and their length) are written to the writer.
|
|
/// </summary>
|
|
/// <remarks>
|
|
/// This does NOT actually consume anything from the queue. That is, calling this method
|
|
/// does not reduce the length of the queue. Callers are expected to call
|
|
/// <see cref="Consume"/> with the value returned by this method afterwards if the data can
|
|
/// be safely removed from the queue (e.g. if it was sent successfully).
|
|
///
|
|
/// This method should not be used together with <see cref="FillWriterWithBytes"> since this
|
|
/// could lead to a corrupted queue.
|
|
/// </remarks>
|
|
/// <param name="writer">The <see cref="DataStreamWriter"/> to write to.</param>
|
|
/// <returns>How many bytes were written to the writer.</returns>
|
|
public int FillWriterWithMessages(ref DataStreamWriter writer)
|
|
{
|
|
if (!IsCreated || Length == 0)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
unsafe
|
|
{
|
|
var reader = new DataStreamReader(m_Data.AsArray());
|
|
|
|
var writerAvailable = writer.Capacity;
|
|
var readerOffset = HeadIndex;
|
|
|
|
while (readerOffset < TailIndex)
|
|
{
|
|
reader.SeekSet(readerOffset);
|
|
var messageLength = reader.ReadInt();
|
|
|
|
if (writerAvailable < sizeof(int) + messageLength)
|
|
{
|
|
break;
|
|
}
|
|
else
|
|
{
|
|
writer.WriteInt(messageLength);
|
|
|
|
var messageOffset = reader.GetBytesRead();
|
|
WriteBytes(ref writer, (byte*)m_Data.GetUnsafePtr() + messageOffset, messageLength);
|
|
|
|
writerAvailable -= sizeof(int) + messageLength;
|
|
readerOffset += sizeof(int) + messageLength;
|
|
}
|
|
}
|
|
|
|
return writer.Capacity - writerAvailable;
|
|
}
|
|
}
|
|
|
|
/// <summary>
|
|
/// Fill the given <see cref="DataStreamWriter"/> with as many bytes from the queue as
|
|
/// possible, disregarding message boundaries.
|
|
/// </summary>
|
|
///<remarks>
|
|
/// This does NOT actually consume anything from the queue. That is, calling this method
|
|
/// does not reduce the length of the queue. Callers are expected to call
|
|
/// <see cref="Consume"/> with the value returned by this method afterwards if the data can
|
|
/// be safely removed from the queue (e.g. if it was sent successfully).
|
|
///
|
|
/// This method should not be used together with <see cref="FillWriterWithMessages"/> since
|
|
/// this could lead to reading messages from a corrupted queue.
|
|
/// </remarks>
|
|
/// <param name="writer">The <see cref="DataStreamWriter"/> to write to.</param>
|
|
/// <returns>How many bytes were written to the writer.</returns>
|
|
public int FillWriterWithBytes(ref DataStreamWriter writer)
|
|
{
|
|
if (!IsCreated || Length == 0)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
var copyLength = Math.Min(writer.Capacity, Length);
|
|
|
|
unsafe
|
|
{
|
|
WriteBytes(ref writer, (byte*)m_Data.GetUnsafePtr() + HeadIndex, copyLength);
|
|
}
|
|
|
|
return copyLength;
|
|
}
|
|
|
|
/// <summary>Consume a number of bytes from the head of the queue.</summary>
|
|
/// <remarks>
|
|
/// This should only be called with a size that matches the last value returned by
|
|
/// <see cref="FillWriter"/>. Anything else will result in a corrupted queue.
|
|
/// </remarks>
|
|
/// <param name="size">Number of bytes to consume from the queue.</param>
|
|
public void Consume(int size)
|
|
{
|
|
// Adjust the head/tail indices such that we consume the given size.
|
|
if (size >= Length)
|
|
{
|
|
HeadIndex = 0;
|
|
TailIndex = 0;
|
|
|
|
// This is a no-op if m_Data is already at minimum capacity.
|
|
m_Data.ResizeUninitialized(m_MinimumCapacity);
|
|
}
|
|
else
|
|
{
|
|
HeadIndex += size;
|
|
}
|
|
}
|
|
}
|
|
}
|