diff --git a/.clang-format b/.clang-format
index d6b6f78..f7f1224 100644
--- a/.clang-format
+++ b/.clang-format
@@ -1,96 +1,17 @@
----
-Language: Cpp
-# BasedOnStyle: LLVM
-AccessModifierOffset: -4
-AlignAfterOpenBracket: Align
+Language: Cpp
+BasedOnStyle: Mozilla
+IndentWidth: 4
+ColumnLimit: 120
+Standard: c++20
+IndentExternBlock: NoIndent
+AlwaysBreakAfterDefinitionReturnType: None
+BreakAfterReturnType: None
+AllowShortFunctionsOnASingleLine: All
AlignConsecutiveAssignments: true
+AlignConsecutiveBitFields: true
AlignConsecutiveDeclarations: true
-AlignEscapedNewlines: Left
-AlignOperands: true
+AlignConsecutiveMacros: true
+AlignConsecutiveShortCaseStatements: { Enabled: true }
+AlignEscapedNewlines: LeftWithLastLine
AlignTrailingComments: true
-AllowAllParametersOfDeclarationOnNextLine: false
-AllowShortBlocksOnASingleLine: false
-AllowShortCaseLabelsOnASingleLine: false
-AllowShortFunctionsOnASingleLine: Inline
-AllowShortIfStatementsOnASingleLine: Never
-AllowShortLoopsOnASingleLine: false
-AlwaysBreakAfterDefinitionReturnType: None
-AlwaysBreakAfterReturnType: None
-AlwaysBreakBeforeMultilineStrings: false
-AlwaysBreakTemplateDeclarations: Yes
-BinPackArguments: false
-BinPackParameters: false
-BraceWrapping:
- AfterCaseLabel: true
- AfterClass: true
- AfterControlStatement: true
- AfterEnum: true
- AfterFunction: true
- AfterNamespace: true
- AfterStruct: true
- AfterUnion: true
- BeforeCatch: true
- BeforeElse: true
- IndentBraces: false
- SplitEmptyFunction: false
- SplitEmptyRecord: false
- SplitEmptyNamespace: false
- AfterExternBlock: false # Keeps the contents un-indented.
-BreakBeforeBinaryOperators: None
-BreakBeforeBraces: Custom
-BreakBeforeTernaryOperators: true
-BreakConstructorInitializers: AfterColon
-# BreakInheritanceList: AfterColon
-BreakStringLiterals: true
-ColumnLimit: 120
-CommentPragmas: '^ (coverity|pragma:)'
-CompactNamespaces: false
-ConstructorInitializerAllOnOneLineOrOnePerLine: true
-ConstructorInitializerIndentWidth: 4
-ContinuationIndentWidth: 4
-Cpp11BracedListStyle: true
-DerivePointerAlignment: false
-DisableFormat: false
-ExperimentalAutoDetectBinPacking: false
-FixNamespaceComments: true
-ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ]
-IncludeBlocks: Preserve
-IndentCaseLabels: false
-IndentPPDirectives: AfterHash
-IndentWidth: 4
-IndentWrappedFunctionNames: false
-KeepEmptyLinesAtTheStartOfBlocks: false
-MacroBlockBegin: ''
-MacroBlockEnd: ''
-MaxEmptyLinesToKeep: 1
-NamespaceIndentation: None
-PenaltyBreakAssignment: 2
-PenaltyBreakBeforeFirstCallParameter: 10000 # Raised intentionally; prefer breaking all
-PenaltyBreakComment: 300
-PenaltyBreakFirstLessLess: 120
-PenaltyBreakString: 1000
-PenaltyExcessCharacter: 1000000
-PenaltyReturnTypeOnItsOwnLine: 10000 # Raised intentionally because it hurts readability
-PointerAlignment: Left
-ReflowComments: true
-SortIncludes: false
-SortUsingDeclarations: false
-SpaceAfterCStyleCast: true
-SpaceAfterTemplateKeyword: true
-SpaceBeforeAssignmentOperators: true
-SpaceBeforeCpp11BracedList: false
-SpaceBeforeInheritanceColon: true
-SpaceBeforeParens: ControlStatements
-SpaceBeforeCtorInitializerColon: true
-SpaceBeforeRangeBasedForLoopColon: true
-SpaceInEmptyParentheses: false
-SpacesBeforeTrailingComments: 2
-SpacesInAngles: false
-SpacesInCStyleCastParentheses: false
-SpacesInContainerLiterals: false
-SpacesInParentheses: false
-SpacesInSquareBrackets: false
-Standard: Cpp11
-TabWidth: 8
-UseTab: Never
-...
+SortIncludes: false
diff --git a/.clang-tidy b/.clang-tidy
index eae1f9f..503d9af 100644
--- a/.clang-tidy
+++ b/.clang-tidy
@@ -22,11 +22,18 @@ Checks: >-
-boost-use-ranges,
-hicpp-static-assert,
-misc-static-assert,
- -modernize-macro-to-enum,
- -cppcoreguidelines-macro-to-enum,
- -bugprone-casting-through-void,
+ -*-macro-to-enum,
+ -*-macro-usage,
+ -*-enum-size,
+ -*-use-using,
+ -*-casting-through-void,
-misc-include-cleaner,
-cppcoreguidelines-avoid-do-while,
+ -*-magic-numbers,
+ -*-use-enum-class,
+ -*-use-trailing-return-type,
+ -*-deprecated-headers,
+ -*-avoid-c-arrays,
CheckOptions:
- key: readability-function-cognitive-complexity.Threshold
value: '99'
diff --git a/.gdbinit b/.gdbinit
new file mode 100644
index 0000000..b27c408
--- /dev/null
+++ b/.gdbinit
@@ -0,0 +1,3 @@
+skip make_frame_base
+skip make_frame
+skip cavl2_min
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index ab97081..c7efd7c 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -9,7 +9,7 @@ jobs:
container: ghcr.io/opencyphal/toolshed:ts24.4.3
strategy:
matrix:
- toolchain: [ 'clang', 'gcc' ]
+ toolchain: [ "clang", "gcc" ]
include:
- toolchain: gcc
c-compiler: gcc
@@ -24,23 +24,28 @@ jobs:
# language=bash
- run: >
cmake
- -B ${{ github.workspace }}/build
+ -B $GITHUB_WORKSPACE/build
-DCMAKE_BUILD_TYPE=Debug
-DCMAKE_C_COMPILER=${{ matrix.c-compiler }}
-DCMAKE_CXX_COMPILER=${{ matrix.cxx-compiler }}
.
# language=bash
- run: |
- cd ${{ github.workspace }}/build
+ cd $GITHUB_WORKSPACE/build
make VERBOSE=1 -j$(nproc)
make test ARGS="--verbose"
+ - name: Archive workspace
+ if: always()
+ run: |
+ cd $GITHUB_WORKSPACE
+ tar --use-compress-program="gzip -9" -cf $RUNNER_TEMP/workspace.tar.gz .
- uses: actions/upload-artifact@v4
if: always()
with:
# The matrix is shown for convenience but this is fragile because the values may not be string-convertible.
# Shall it break one day, feel free to remove the matrix from here.
name: ${{github.job}}-#${{strategy.job-index}}-${{job.status}}-${{join(matrix.*, ',')}}
- path: ${{github.workspace}}/**/*
+ path: ${{runner.temp}}/workspace.tar.gz
retention-days: 2
optimizations:
@@ -49,13 +54,13 @@ jobs:
container: ghcr.io/opencyphal/toolshed:ts24.4.3
strategy:
matrix:
- toolchain: [ 'clang', 'gcc' ]
+ toolchain: [ "clang", "gcc" ]
build_type: [ Release, MinSizeRel ]
include:
- toolchain: gcc
c-compiler: gcc
cxx-compiler: g++
- cxx-flags: -fno-strict-aliasing # GCC in MinSizeRel C++20 mode misoptimizes the Cavl test.
+ cxx-flags: ""
- toolchain: clang
c-compiler: clang
cxx-compiler: clang++
@@ -66,7 +71,7 @@ jobs:
# language=bash
- run: >
cmake
- -B ${{ github.workspace }}/build
+ -B $GITHUB_WORKSPACE/build
-DCMAKE_BUILD_TYPE=${{ matrix.build_type }}
-DCMAKE_C_COMPILER=${{ matrix.c-compiler }}
-DCMAKE_CXX_COMPILER=${{ matrix.cxx-compiler }}
@@ -75,35 +80,68 @@ jobs:
.
# language=bash
- run: |
- cd ${{ github.workspace }}/build
+ cd $GITHUB_WORKSPACE/build
make VERBOSE=1 -j$(nproc)
make test ARGS="--verbose"
+ - name: Archive workspace
+ if: always()
+ run: |
+ cd $GITHUB_WORKSPACE
+ tar --use-compress-program="gzip -9" -cf $RUNNER_TEMP/workspace.tar.gz .
- uses: actions/upload-artifact@v4
if: always()
with:
# The matrix is shown for convenience but this is fragile because the values may not be string-convertible.
# Shall it break one day, feel free to remove the matrix from here.
name: ${{github.job}}-#${{strategy.job-index}}-${{job.status}}-${{join(matrix.*, ',')}}
- path: ${{github.workspace}}/**/*
+ path: ${{runner.temp}}/workspace.tar.gz
retention-days: 2
- avr:
+ coverage:
if: github.event_name == 'push'
runs-on: ubuntu-latest
- env:
- mcu: at90can64
- flags: -Wall -Wextra -Werror -pedantic -Wconversion -Wtype-limits
- strategy:
- matrix:
- std: [ 'c99', 'c11', 'gnu99', 'gnu11' ]
+ container: ghcr.io/opencyphal/toolshed:ts24.4.3
steps:
- uses: actions/checkout@v4
+ with:
+ submodules: true
+ # language=bash
+ - run: >
+ cmake -B $GITHUB_WORKSPACE/build -DCMAKE_BUILD_TYPE=Debug -DNO_STATIC_ANALYSIS=ON -DENABLE_COVERAGE=ON .
# language=bash
- run: |
- sudo apt update -y && sudo apt upgrade -y
- sudo apt install gcc-avr avr-libc
- avr-gcc --version
- - run: avr-gcc libudpard/*.c -c -std=${{matrix.std}} -mmcu=${{env.mcu}} ${{env.flags}}
+ cd $GITHUB_WORKSPACE/build
+ make -j$(nproc) && make test && make coverage
+ - name: Archive workspace
+ if: always()
+ run: |
+ cd $GITHUB_WORKSPACE
+ tar --use-compress-program="gzip -9" -cf $RUNNER_TEMP/workspace.tar.gz .
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ${{github.job}}-#${{strategy.job-index}}-${{job.status}}
+ path: ${{runner.temp}}/workspace.tar.gz
+ retention-days: 30
+
+ # TODO: re-enable this
+ # avr:
+ # if: github.event_name == 'push'
+ # runs-on: ubuntu-latest
+ # env:
+ # mcu: at90can64
+ # flags: -Wall -Wextra -Werror -pedantic -Wconversion -Wtype-limits
+ # strategy:
+ # matrix:
+ # std: [ 'c99', 'c11', 'gnu99', 'gnu11' ]
+ # steps:
+ # - uses: actions/checkout@v4
+ # # language=bash
+ # - run: |
+ # sudo apt update -y && sudo apt upgrade -y
+ # sudo apt install gcc-avr avr-libc
+ # avr-gcc --version
+ # - run: avr-gcc -Ilib/cavl/ libudpard/*.c -c -std=${{matrix.std}} -mmcu=${{env.mcu}} ${{env.flags}}
arm:
if: github.event_name == 'push'
@@ -112,14 +150,14 @@ jobs:
flags: -Wall -Wextra -Werror -pedantic -Wconversion -Wtype-limits -Wcast-align -Wfatal-errors
strategy:
matrix:
- std: [ 'c99', 'c11', 'gnu99', 'gnu11' ]
+ std: [ "c99", "c11", "gnu99", "gnu11" ]
steps:
- uses: actions/checkout@v4
# language=bash
- run: |
sudo apt update -y && sudo apt upgrade -y
sudo apt-get install -y gcc-arm-none-eabi
- - run: arm-none-eabi-gcc libudpard/*.c -c -std=${{matrix.std}} ${{ env.flags }}
+ - run: arm-none-eabi-gcc -Ilib/cavl/ libudpard/*.c -c -std=${{matrix.std}} ${{ env.flags }}
sonar:
runs-on: ubuntu-latest
@@ -138,12 +176,12 @@ jobs:
steps:
- uses: actions/checkout@v4
with:
- fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis
+ fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis
submodules: true
- uses: actions/setup-java@v4
with:
java-version: 17
- distribution: 'zulu'
+ distribution: "zulu"
# language=bash
- run: |
clang --version
@@ -178,6 +216,6 @@ jobs:
- uses: actions/checkout@v4
- uses: DoozyX/clang-format-lint-action@v0.20
with:
- source: './libudpard ./tests'
- extensions: 'c,h,cpp,hpp'
+ source: "./libudpard ./tests"
+ extensions: "c,h,cpp,hpp"
clangFormatVersion: ${{ env.LLVM_VERSION }}
diff --git a/.gitignore b/.gitignore
index 30e85f4..d42729f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -20,16 +20,17 @@
*.hex
*.dSYM/
*build/
+build*/
cmake-build-*/
-build-avr/
+.cache/
.metadata
.settings
.project
.cproject
.pydevproject
-.gdbinit
.scannerwork/
.vscode/
+.sisyphus/
**/.idea/*
!**/.idea/dictionaries
!**/.idea/dictionaries/*
diff --git a/.idea/dictionaries/project.xml b/.idea/dictionaries/project.xml
new file mode 100644
index 0000000..8d9a0c0
--- /dev/null
+++ b/.idea/dictionaries/project.xml
@@ -0,0 +1,24 @@
+
+
+
+ abcdefghij
+ abcdefghijk
+ abcdefghijklmnopqrst
+ acks
+ dups
+ efgh
+ fghij
+ fstate
+ klmno
+ klmnopqrst
+ lmnopqrst
+ mnop
+ noinit
+ objcount
+ optin
+ pqrst
+ stdatomic
+ tidwin
+
+
+
\ No newline at end of file
diff --git a/.zed/tasks.json b/.zed/tasks.json
new file mode 100644
index 0000000..d7e48a6
--- /dev/null
+++ b/.zed/tasks.json
@@ -0,0 +1,37 @@
+[
+ {
+ "label": "Configure (CMake)",
+ "command": "cmake -B build",
+ "tags": ["build"]
+ },
+ {
+ "label": "Build",
+ "command": "cmake --build build",
+ "tags": ["build"]
+ },
+ {
+ "label": "Configure and Build",
+ "command": "cmake -B build && cmake --build build",
+ "tags": ["build"]
+ },
+ {
+ "label": "Run Tests",
+ "command": "ctest --test-dir build --output-on-failure",
+ "tags": ["test"]
+ },
+ {
+ "label": "Clean",
+ "command": "rm -rf build",
+ "tags": ["build"]
+ },
+ {
+ "label": "Format Code",
+ "command": "cmake --build build --target format",
+ "tags": ["format"]
+ },
+ {
+ "label": "Full Build and Test",
+ "command": "cmake -B build && cmake --build build && ctest --test-dir build --output-on-failure",
+ "tags": ["build", "test"]
+ }
+]
diff --git a/AGENTS.md b/AGENTS.md
new file mode 100644
index 0000000..8e77306
--- /dev/null
+++ b/AGENTS.md
@@ -0,0 +1,14 @@
+# LibUDPard instructions for AI agents
+
+Please read `README.md` for general information about LibUDPard, and `CONTRIBUTING.md` for development-related notes.
+
+Keep the code and comments very brief. Be sure every significant code block is preceded with a brief comment.
+
+If you need a build directory, create one in the project root named with a `build` prefix;
+you can also use existing build directories if you prefer so,
+but avoid using `cmake-build-*` because these are used by CLion.
+When building the code, don't hesitate to use multiple jobs to use all CPU cores.
+
+Run all tests in debug build to ensure that all assertion checks are enabled.
+
+It is best to use Clang-Format to format the code when done editing.
diff --git a/CLAUDE.md b/CLAUDE.md
new file mode 120000
index 0000000..47dc3e3
--- /dev/null
+++ b/CLAUDE.md
@@ -0,0 +1 @@
+AGENTS.md
\ No newline at end of file
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 68d7b54..624b4bb 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -12,13 +12,14 @@ cmake_minimum_required(VERSION 3.20)
project(udpard)
enable_testing()
+set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
+
# Shared Clang-Format target for all subprojects.
find_program(clang_format NAMES clang-format)
if (NOT clang_format)
message(STATUS "Could not locate clang-format")
else ()
file(GLOB_RECURSE format_files
- ${CMAKE_CURRENT_SOURCE_DIR}/demo/*.[ch]
${CMAKE_CURRENT_SOURCE_DIR}/libudpard/*.[ch]
${CMAKE_CURRENT_SOURCE_DIR}/tests/*.[ch]
${CMAKE_CURRENT_SOURCE_DIR}/tests/*.[ch]pp)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index d226f55..2875211 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -18,7 +18,7 @@ are impossible to track at the source code level.
An exception applies for the case of false-positive (invalid) warnings -- those should not be mentioned in the codebase.
Unfortunately, some rules are hard or impractical to enforce automatically,
-so code reviewers shall be aware of MISRA and general high-reliability coding practices
+so code reviewers should be aware of MISRA and general high-reliability coding practices
to prevent non-compliant code from being accepted into upstream.
## Build & test
@@ -27,6 +27,8 @@ Consult with the CI workflow files for the required tools and build & test instr
You may want to use the [toolshed](https://github.com/OpenCyphal/docker_toolchains/pkgs/container/toolshed)
container for this.
+To run tests with coverage reports, refer to the instructions in `tests/CMakeLists.txt`.
+
## Releasing
Simply create a new release & tag on GitHub.
diff --git a/MIGRATION_v1.x_to_v2.0.md b/MIGRATION_v1.x_to_v2.0.md
deleted file mode 100644
index 4771b45..0000000
--- a/MIGRATION_v1.x_to_v2.0.md
+++ /dev/null
@@ -1,197 +0,0 @@
-# Migration Guide: Upgrading from LibUDPard v1.x to v2.0
-
-This migration guide provides step-by-step instructions to help you update your application code from LibUDPard version 1.x to version 2.0. The guide highlights the key changes in the API and offers recommendations on how to adapt your code accordingly.
-
-## Introduction
-
-LibUDPard version 2.0 introduces several significant changes to improve memory management and payload handling. This guide will help you understand these changes and update your application code to be compatible with the new version.
-
-These changes do not affect wire compatibility.
-
-## Version Changes
-
-- **LibUDPard Version**:
- - **Old**: `UDPARD_VERSION_MAJOR 1`, `UDPARD_VERSION_MINOR 2`
- - **New**: `UDPARD_VERSION_MAJOR 2`, `UDPARD_VERSION_MINOR 0`
-- **Cyphal Specification Version**: Remains the same (`1.0`).
-
-## Key API Changes
-
-### UdpardTx Structure Changes
-
-- **Memory Resource Field**: The `UdpardTx` structure's `memory` field type has changed from `UdpardMemoryResource` to `UdpardTxMemoryResources`.
-
- ```c
- // In v1.x
- struct UdpardTx {
- // ...
- struct UdpardMemoryResource memory;
- // ...
- };
-
- // In v2.0
- struct UdpardTx {
- // ...
- struct UdpardTxMemoryResources memory;
- // ...
- };
- ```
-
-### Memory Management Adjustments
-
-- **Separate Memory Resources**: `UdpardTxMemoryResources` now allows separate memory resources for fragment handles and payload storage.
-
- ```c
- struct UdpardTxMemoryResources {
- struct UdpardMemoryResource fragment; // For UdpardTxItem allocations
- struct UdpardMemoryResource payload; // For datagram payload allocations
- };
- ```
-
-- **Memory Allocation Changes**: The number of memory allocations per datagram has increased from one to two:
- - **v1.x**: One allocation per datagram (including `UdpardTxItem` and payload).
- - **v2.0**: Two allocations per datagram—one for `UdpardTxItem` and one for the payload.
-
-### UdpardTxItem Structure Updates
-
-- **Mutable datagram_payload Field**: The `datagram_payload` field in `UdpardTxItem` is now mutable, allowing ownership transfer of the payload.
-
-- **New priority Field**: A new `priority` field has been added to `UdpardTxItem` to retain the original transfer priority level.
-
- ```c
- struct UdpardTxItem {
- // ...
- enum UdpardPriority priority; // New field in v2.0
- struct UdpardMutablePayload datagram_payload; // Now mutable
- // ...
- };
- ```
-
-### Function Signature Modifications
-
-- **udpardTxInit**: The `memory` parameter type has changed to `UdpardTxMemoryResources`.
-
- ```c
- // In v1.x
- int_fast8_t udpardTxInit(
- struct UdpardTx* self,
- const UdpardNodeID* local_node_id,
- size_t queue_capacity,
- struct UdpardMemoryResource memory
- );
-
- // In v2.0
- int_fast8_t udpardTxInit(
- struct UdpardTx* self,
- const UdpardNodeID* local_node_id,
- size_t queue_capacity,
- struct UdpardTxMemoryResources memory
- );
- ```
-
-- **udpardTxFree**: The `memory` parameter type has changed to `UdpardTxMemoryResources`.
-
- ```c
- // In v1.x
- void udpardTxFree(
- const struct UdpardMemoryResource memory,
- struct UdpardTxItem* item
- );
-
- // In v2.0
- void udpardTxFree(
- const struct UdpardTxMemoryResources memory,
- struct UdpardTxItem* item
- );
- ```
-
-- **udpardTxPeek**: The return type has changed from `const struct UdpardTxItem*` to `struct UdpardTxItem*` to allow modification of the `datagram_payload` field.
-
- ```c
- // In v1.x
- const struct UdpardTxItem* udpardTxPeek(const struct UdpardTx* self);
-
- // In v2.0
- struct UdpardTxItem* udpardTxPeek(const struct UdpardTx* self);
- ```
-
-## Migration Steps
-
-Follow these steps to update your application code to be compatible with LibUDPard v2.0.
-
-### 1. Update UdpardTx Initialization
-
-- **Adjust the `udpardTxInit` Call**: Update the `memory` parameter to use `UdpardTxMemoryResources`.
-
- ```c
- // Before (v1.x)
- struct UdpardMemoryResource tx_memory = { /*...*/ };
- udpardTxInit(&tx_instance, &local_node_id, queue_capacity, tx_memory);
-
- // After (v2.0)
- struct UdpardTxMemoryResources tx_memory = {
- .fragment = { /*...*/ },
- .payload = { /*...*/ }
- };
- udpardTxInit(&tx_instance, &local_node_id, queue_capacity, tx_memory);
- ```
-
-- **Define Separate Memory Resources**: Initialize separate memory resources for fragments and payloads.
-
-### 2. Adjust Memory Resources
-
-- **Update Memory Allocation Logic**: Ensure that your memory allocator handles two separate allocations per datagram—one for `UdpardTxItem` and one for the payload.
-
- ```c
- // Example allocator adjustments
- void* allocate_fragment(void* user_reference, size_t size) { /*...*/ }
- void* allocate_payload(void* user_reference, size_t size) { /*...*/ }
- ```
-
-### 3. Modify UdpardTxItem Usage
-
-- **Handle Mutable Payloads**: Since `datagram_payload` is now mutable, you can transfer ownership of the payload to another component (e.g., transmission media) by nullifying the `size` and `data` fields after copying.
-
- ```c
- struct UdpardTxItem* tx_item = udpardTxPeek(&tx_instance);
- if (tx_item) {
- // Transfer ownership of the payload
- transmit_payload(tx_item->datagram_payload.data, tx_item->datagram_payload.size);
- tx_item->datagram_payload.data = NULL;
- tx_item->datagram_payload.size = 0;
-
- // Pop and free the item after transmission
- udpardTxPop(&tx_instance, tx_item);
- udpardTxFree(tx_instance.memory, tx_item);
- }
- ```
-
-- **Utilize the New priority Field**: Access the `priority` field in `UdpardTxItem` if needed for your application logic.
-
- ```c
- enum UdpardPriority tx_priority = tx_item->priority;
- ```
-
-### 4. Revise Function Calls
-
-- **Update `udpardTxFree` Calls**: Pass the updated `memory` parameter type.
-
- ```c
- // Before (v1.x)
- udpardTxFree(tx_memory, tx_item);
-
- // After (v2.0)
- udpardTxFree(tx_instance.memory, tx_item);
- ```
-
-- **Modify `udpardTxPeek` Usage**: Since `udpardTxPeek` now returns a mutable pointer, update your code to handle the mutable `UdpardTxItem`.
-
- ```c
- // Before (v1.x)
- const struct UdpardTxItem* tx_item = udpardTxPeek(&tx_instance);
-
- // After (v2.0)
- struct UdpardTxItem* tx_item = udpardTxPeek(&tx_instance);
- ```
-
-- **Ensure Correct Deallocation**: When freeing payloads, use the appropriate memory resource from `UdpardTxMemoryResources`.
diff --git a/README.md b/README.md
index 5a5197b..55bc1eb 100644
--- a/README.md
+++ b/README.md
@@ -1,55 +1,48 @@
-# Compact Cyphal/UDP in C
+
+
+# Cyphal/UDP transport in C
[](https://github.com/OpenCyphal-Garage/libudpard/actions/workflows/main.yml)
[](https://sonarcloud.io/summary?id=libudpard)
[](https://sonarcloud.io/summary?id=libudpard)
[](https://forum.opencyphal.org)
-LibUDPard is a compact implementation of the Cyphal/UDP protocol in C99/C11 for high-integrity real-time
-embedded systems.
-
-[Cyphal](https://opencyphal.org) is an open lightweight data bus standard designed for reliable intravehicular
-communication in aerospace and robotic applications via CAN bus, UDP, and other robust transports.
+
-We pronounce LibUDPard as *lib-you-dee-pee-ard*.
+-----
-## Features
+LibUDPard is a robust implementation of the Cyphal/UDP transport layer in C99/C11 for high-integrity real-time systems.
-Some of the features listed here are intrinsic properties of Cyphal.
+[Cyphal](https://opencyphal.org) is an open technology for real-time intravehicular distributed computing and
+communication based on modern networking standards (Ethernet, CAN FD, etc.).
+It was created to address the challenge of on-board deterministic computing and data distribution in
+next-generation intelligent vehicles: manned and unmanned aircraft, spacecraft, robots, and cars.
-- Full branch coverage and extensive static analysis.
-
-- Compliance with automatically enforceable MISRA C rules (reach out to https://forum.opencyphal.org for details).
+## Features
+- Zero-copy RX pipeline -- payload is moved from the NIC driver all the way to the application without copying.
+- ≤1-copy TX pipeline with deduplication across multiple interfaces and scattered input buffer support.
+- Support for redundant network interfaces with seamless interface aggregation and zero fail-over delay.
+- Robust message reassembler supporting highly distorted datagram streams:
+ out-of-order fragments, message ordering recovery, fragment/message deduplication, interleaving, variable MTU, ...
+- Robust message ordering recovery for ordering-sensitive applications (e.g., state estimators, control loops)
+ with well-defined deterministic recovery in the event of lost messages.
+- Packet loss mitigation via:
+ - reliable topics (retransmit until acknowledged; callback notifications for successful/failed deliveries).
+ - redundant interfaces (packet lost on one interface may be received on another, transparent to the application);
+- Heap not required (but supported); the library can be used with fixed-size block pool allocators.
- Detailed time complexity and memory requirement models for the benefit of real-time high-integrity applications.
-
-- Purely reactive time-deterministic API without the need for background servicing.
-
-- Zero-copy data pipeline on reception --
- payload is moved from the underlying NIC driver all the way to the application without copying.
-
-- Support for redundant network interfaces with seamless interface aggregation and no fail-over delay.
-
-- Out-of-order multi-frame transfer reassembly, including cross-transfer interleaved frames.
-
-- Support for repetition-coding forward error correction (FEC) for lossy links (e.g., wireless)
- transparent to the application.
-
-- No dependency on heap memory; the library can be used with fixed-size block pool allocators.
-
-- Compatibility with all conventional 8/16/32/64-bit platforms.
-
-- Compatibility with extremely resource-constrained baremetal environments starting from 64K ROM and 64K RAM.
-
-- Implemented in ≈2000 lines of code.
+- Highly scalable: designed to handle thousands of topics and hundreds of concurrent transfers with minimal resources.
+- Runs anywhere out of the box, including extremely resource-constrained baremetal environments with ~100K ROM/RAM.
+ No porting required.
+- Partial MISRA C compliance (reach out to ).
+- Full implementation in a single C file with only 2k lines of straightforward C99!
+- Extensive verification suite.
## Usage
-The library implements the Cyphal/UDP protocol, which is a transport-layer entity.
-An application using this library will need to implement the presentation layer above the library,
-perhaps with the help of the [Nunavut transpiler](https://github.com/OpenCyphal/nunavut),
-and the network layer below the library using a third-party UDP/IP stack implementation with multicast/IGMP support
-(TCP and ARP are not needed).
+An application using this library will need to provide a third-party UDP/IP stack with multicast/IGMP support
+(TCP not needed).
In the most straightforward case, the network layer can be based on the standard Berkeley socket API
or a lightweight embedded stack such as LwIP.
@@ -57,31 +50,37 @@ or a lightweight embedded stack such as LwIP.
%%{init: {"fontFamily": "Ubuntu Mono, monospace", "flowchart": {"curve": "basis"}}}%%
flowchart TD
classDef OpenCyphal color:#00DAC6,fill:#1700b3,stroke:#00DAC6,stroke-width:2px,font-weight:600
- Application <-->|messages,\nrequests,\nresponses| LibUDPard[fa:fa-code LibUDPard]
+ Application <-->|messages| LibUDPard[fa:fa-code LibUDPard]
class LibUDPard OpenCyphal
- LibUDPard <-->|multicast datagrams| UDP
+ LibUDPard <-->|datagrams| UDP
subgraph domain_udpip["3rd-party UDP/IP+IGMP stack"]
UDP <--> IP["IPv4, IGMPv1+"] <--> MAC
end
MAC <--> PHY
```
-To integrate the library into your application, simply copy the files under `libudpard/` into your project tree,
-or add this entire repository as a submodule.
-The library contains only one translation unit named `udpard.c`;
-no special compiler options are needed to build it.
-The library should be compatible with all conventional computer architectures where a standards-compliant C99 compiler
-is available.
+To integrate the library into your application, simply copy `udpard.c` and `udpard.h` from `libudpard/`
+into your project tree, or add this entire repository as a submodule;
+also ensure you have [`cavl2.h`](https://github.com/pavel-kirienko/cavl) somewhere in your include paths.
+
+The library contains only one translation unit named `udpard.c`; no special compiler options are needed to build it.
+The library should be compatible out of the box with all conventional computer architectures where a
+standards-compliant C99 compiler is available.
**Read the API docs in [`libudpard/udpard.h`](libudpard/udpard.h).**
-For complete usage examples, please refer to .
## Revisions
+### v3.0 -- WORK IN PROGRESS
+
+The library has been redesigned from scratch to support Cyphal v1.1, named topics, and reliable transfers.
+No porting guide is provided since the changes are too significant;
+please refer to the new API docs in `libudpard/udpard.h`.
+
### v2.0
- Updating from LibUDPard v1 to v2 involves several significant changes to improve memory management and payload handling.
-- Please follow [MIGRATION_v1.x_to_v2.0](MIGRATION_v1.x_to_v2.0.md) guide and carefully update your code.
+- Please follow `MIGRATION_v1.x_to_v2.0.md` guide (available in v2 tree).
### v1.0
diff --git a/lib/cavl/cavl2.h b/lib/cavl/cavl2.h
new file mode 100644
index 0000000..a102077
--- /dev/null
+++ b/lib/cavl/cavl2.h
@@ -0,0 +1,572 @@
+/// Source: https://github.com/pavel-kirienko/cavl
+///
+/// Cavl is a single-header C library providing an implementation of AVL tree suitable for deeply embedded systems.
+/// To integrate it into your project, simply copy this file into your source tree.
+/// You can define build option macros before including the header to customize the behavior.
+/// All definitions are prefixed with cavl2 to avoid collisions with other major versions of the library.
+/// Read the API docs below.
+///
+/// See also O1Heap -- a deterministic memory manager for hard-real-time
+/// high-integrity embedded systems.
+///
+/// Version history:
+///
+/// - v1.0: initial release.
+/// - v2.0:
+/// - Simplify the API and improve naming.
+/// - The header file now bears the major version number, which simplifies vendoring: a project now can safely
+/// depend on cavl without the risk of version compatibility issues.
+/// - For the same reason as above, all definitions are now prefixed with cavl2 instead of cavl.
+/// - Add optional CAVL2_T macro to allow overriding the cavl2_t type. This is needed for libudpard/libcanard/etc
+/// and is generally useful because it allows library vendors to avoid exposing cavl via the library API.
+/// Also add CAVL2_RELATION to simplify comparator implementations.
+/// - Add the trivial factory definition because it is needed in nearly every application using cavl.
+/// - New traversal function cavl2_next_greater() offering the same time complexity but without recursion/callbacks.
+///
+/// -------------------------------------------------------------------------------------------------------------------
+///
+/// Copyright (c) Pavel Kirienko
+///
+/// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+/// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
+/// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
+/// and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+///
+/// The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+/// the Software.
+///
+/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+/// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
+/// OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+/// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+// ReSharper disable CppCStyleCast CppZeroConstantCanBeReplacedWithNullptr CppTooWideScopeInitStatement
+// ReSharper disable CppRedundantElaboratedTypeSpecifier CppRedundantInlineSpecifier
+#pragma once
+
+#include
+#include
+#include
+
+/// If Cavl is used in throughput-critical code, then it is recommended to disable assertion checks as they may
+/// be costly in terms of execution time.
+#ifndef CAVL2_ASSERT
+#if defined(CAVL2_NO_ASSERT) && CAVL2_NO_ASSERT
+#define CAVL2_ASSERT(x) (void)0
+#else
+#include
+#define CAVL2_ASSERT(x) assert(x)
+#endif
+#endif
+
+#ifdef __cplusplus
+// This is, strictly speaking, useless because we do not define any functions with external linkage here,
+// but it tells static analyzers that what follows should be interpreted as C code rather than C++.
+extern "C"
+{
+#endif
+
+// ---------------------------------------- PUBLIC API SECTION ----------------------------------------
+
+/// CAVL2_T can be defined before including this header to provide a custom struct type for the node element.
+/// The custom type must have the same fields as the default struct cavl2_t.
+/// This option is useful if Cavl is integrated into a library without exposing it through the library API.
+#ifndef CAVL2_T
+/// The tree node/root. The user data is to be added through composition/inheritance.
+/// The memory layout of this type is compatible with void*[4], which is useful if this type cannot be exposed in API.
+/// Per standard convention, nodes that compare smaller are put on the left.
+/// Usage example:
+/// struct my_user_type_t {
+/// struct cavl2_t base; ///< Tree node. Should be the first element, otherwise, offsetof() will be needed.
+/// ... user data ...
+/// };
+struct cavl2_t
+{
+ struct cavl2_t* up; ///< Parent node, NULL in the root.
+ struct cavl2_t* lr[2]; ///< Left child (lesser), right child (greater).
+ int_fast8_t bf; ///< Balance factor is positive when right-heavy. Allowed values are {-1, 0, +1}.
+};
+#define CAVL2_T struct cavl2_t
+#endif
+
+#if defined(static_assert) || defined(__cplusplus)
+static_assert(sizeof(CAVL2_T) <= sizeof(void* [4]), "Bad size");
+#endif
+
+/// The comparator result can be overridden to simplify comparator functions.
+/// The type shall be a signed integer type.
+/// Only three possible states of the result are considered: negative, zero, and positive; the magnitude is ignored.
+#ifndef CAVL2_RELATION
+#define CAVL2_RELATION ptrdiff_t
+#endif
+/// Returns POSITIVE if the search target is GREATER than the provided node, negative if smaller, zero on match (found).
+typedef CAVL2_RELATION (*cavl2_comparator_t)(const void* user, const CAVL2_T* node);
+
+/// If provided, the factory will be invoked when the sought node does not exist in the tree.
+/// It is expected to return a new node that will be inserted immediately (without the need to traverse the tree again).
+/// If the factory returns NULL or is not provided, the tree is not modified.
+typedef CAVL2_T* (*cavl2_factory_t)(void* user);
+
+/// Look for a node in the tree using the specified comparator. The worst-case complexity is O(log n).
+/// - If the node is found (i.e., zero comparison result), return it.
+/// - If the node is not found and the factory is NULL, return NULL.
+/// - Otherwise, construct a new node using the factory; if the result is not NULL, insert it; return the result.
+/// The user_comparator is passed into the comparator unmodified.
+/// The user_factory is passed into the factory unmodified.
+/// The root node may be replaced in the process iff the factory is not NULL and it returns a new node;
+/// otherwise, the root node will not be modified.
+/// If comparator is NULL, returns NULL.
+static inline CAVL2_T* cavl2_find_or_insert(CAVL2_T** const root,
+ const void* const user_comparator,
+ const cavl2_comparator_t comparator,
+ void* const user_factory,
+ const cavl2_factory_t factory);
+
+/// A convenience wrapper over cavl2_find_or_insert() that passes NULL factory, so the tree is never modified.
+/// Since the tree is not modified, the root pointer is passed by value, unlike in the mutating version.
+static inline CAVL2_T* cavl2_find(CAVL2_T* root, const void* const user_comparator, const cavl2_comparator_t comparator)
+{
+ return cavl2_find_or_insert(&root, user_comparator, comparator, NULL, NULL);
+}
+
+/// Remove the specified node from its tree. The root node may be replaced in the process.
+/// The worst-case complexity is O(log n).
+/// The function has no effect if either of the pointers are NULL.
+/// If the node is not in the tree, the behavior is undefined; it may create cycles in the tree which is deadly.
+/// It is safe to pass the result of cavl2_find/cavl2_find_or_insert directly as the second argument:
+/// cavl2_remove(&root, cavl2_find(&root, user, search_comparator));
+/// The removed node will have all of its pointers set to NULL.
+static inline void cavl2_remove(CAVL2_T** const root, CAVL2_T* const node);
+
+/// Replace the specified node with another node without rebalancing.
+/// This is useful when you want to replace a node with an equivalent one (same key ordering).
+/// The new node takes over the position (parent, children, balance factor) of the old node.
+/// The old node will have all of its pointers set to NULL.
+/// The new node must not already be in the tree; if it is, the behavior is undefined.
+/// The new node's fields (up, lr, bf) will be overwritten to match the old node's position in the tree.
+/// The complexity is O(1).
+/// The function has no effect if any of the pointers are NULL.
+/// If the old node is not in the tree, the behavior is undefined.
+static inline void cavl2_replace(CAVL2_T** const root, CAVL2_T* const old_node, CAVL2_T* const new_node);
+
+/// True iff the node is in the tree. The complexity is O(1).
+/// Returns false if the node is NULL.
+/// Assumes that the node pointers are NULL when it is not inserted (this is ensured by the removal function).
+static inline bool cavl2_is_inserted(const CAVL2_T* const root, const CAVL2_T* const node)
+{
+ bool out = false;
+ if (node != NULL) {
+ out = (node->up != NULL) || (node->lr[0] != NULL) || (node->lr[1] != NULL) || (node == root);
+ }
+ return out;
+}
+
+/// Remove the specified node if it is inserted in the tree; otherwise, do nothing.
+/// This is a convenience wrapper that combines cavl2_is_inserted() and cavl2_remove().
+/// Returns true if the node was inserted and has been removed, false otherwise.
+static inline bool cavl2_remove_if(CAVL2_T** const root, CAVL2_T* const node)
+{
+ bool removed = false;
+ if ((root != NULL) && cavl2_is_inserted(*root, node)) {
+ cavl2_remove(root, node);
+ removed = true;
+ }
+ return removed;
+}
+
+/// Return the min-/max-valued node stored in the tree, depending on the flag. This is an extremely fast query.
+/// Returns NULL iff the argument is NULL (i.e., the tree is empty). The worst-case complexity is O(log n).
+static inline CAVL2_T* cavl2_extremum(CAVL2_T* const root, const bool maximum)
+{
+ CAVL2_T* result = NULL;
+ CAVL2_T* c = root;
+ while (c != NULL) {
+ result = c;
+ c = c->lr[maximum];
+ }
+ return result;
+}
+
+// clang-format off
+/// Convenience wrappers for cavl2_extremum().
+static inline CAVL2_T* cavl2_min(CAVL2_T* const root) { return cavl2_extremum(root, false); }
+static inline CAVL2_T* cavl2_max(CAVL2_T* const root) { return cavl2_extremum(root, true); }
+// clang-format on
+
+/// Returns the next greater node in the in-order traversal of the tree.
+/// Does nothing and returns NULL if the argument is NULL. Behavior undefined if the node is not in the tree.
+/// To use it, first invoke cavl2_min() to get the first node, then call this function repeatedly until it returns NULL:
+/// for (CAVL2_T* p = cavl2_min(root); p != NULL; p = cavl2_next_greater(p)) {
+/// ...
+/// }
+/// The asymptotic complexity for traversing the entire tree is O(n), identical to the traditional recursive traversal.
+static inline CAVL2_T* cavl2_next_greater(CAVL2_T* const node)
+{
+ CAVL2_T* c = NULL;
+ if (node != NULL) {
+ if (node->lr[1] != NULL) {
+ c = cavl2_min(node->lr[1]);
+ } else {
+ const CAVL2_T* n = node;
+ CAVL2_T* p = node->up;
+ while ((p != NULL) && (p->lr[1] == n)) {
+ n = p;
+ p = p->up;
+ }
+ c = p;
+ }
+ }
+ return c;
+}
+
+/// Find the smallest node whose value is greater than or equal to the search target, in O(log n).
+/// Returns the first node for which the comparator returns a non-positive result.
+/// If no such node exists (all nodes compare less than target), returns NULL.
+/// The comparator returns: positive if target>candidate, zero if target==candidate, negative if target 5; target=5 => 5; target=8 => NULL.
+static inline CAVL2_T* cavl2_lower_bound(CAVL2_T* const root,
+ const void* const user,
+ const cavl2_comparator_t comparator)
+{
+ CAVL2_T* result = NULL;
+ if ((root != NULL) && (comparator != NULL)) {
+ CAVL2_T* n = root;
+ while (n != NULL) {
+ const CAVL2_RELATION cmp = comparator(user, n);
+ if (cmp <= 0) {
+ result = n;
+ n = n->lr[0];
+ } else {
+ n = n->lr[1];
+ }
+ }
+ }
+ return result;
+}
+
+/// Find the smallest node whose value is strictly greater than the search target (upper bound).
+/// Returns the first node for which the comparator returns a negative result.
+/// See cavl2_lower_bound() for details.
+/// Example: tree={1,3,5,7}, target=4 => 5; target=5 => 7; target=7 => NULL.
+static inline CAVL2_T* cavl2_upper_bound(CAVL2_T* const root,
+ const void* const user,
+ const cavl2_comparator_t comparator)
+{
+ CAVL2_T* result = NULL;
+ if ((root != NULL) && (comparator != NULL)) {
+ CAVL2_T* n = root;
+ while (n != NULL) {
+ const CAVL2_RELATION cmp = comparator(user, n);
+ if (cmp < 0) {
+ result = n;
+ n = n->lr[0];
+ } else {
+ n = n->lr[1];
+ }
+ }
+ }
+ return result;
+}
+
+/// Find the largest node whose value is less than or equal to the search target, in O(log n).
+/// Returns the last node for which the comparator returns a non-negative result.
+/// See cavl2_lower_bound() for details.
+/// Example: tree={1,3,5,7}, target=4 => 3; target=5 => 5; target=0 => NULL.
+static inline CAVL2_T* cavl2_predecessor(CAVL2_T* const root,
+ const void* const user,
+ const cavl2_comparator_t comparator)
+{
+ CAVL2_T* result = NULL;
+ if ((root != NULL) && (comparator != NULL)) {
+ CAVL2_T* n = root;
+ while (n != NULL) {
+ const CAVL2_RELATION cmp = comparator(user, n);
+ if (cmp >= 0) {
+ result = n;
+ n = n->lr[1];
+ } else {
+ n = n->lr[0];
+ }
+ }
+ }
+ return result;
+}
+
+/// The successor counterpart of cavl2_predecessor() is an alias of cavl2_lower_bound(), provided for completeness only.
+/// Example: tree={1,3,5,7}, target=4 => 5; target=5 => 5; target=8 => NULL.
+static inline CAVL2_T* cavl2_successor(CAVL2_T* const root, const void* const user, const cavl2_comparator_t comparator)
+{
+ return cavl2_lower_bound(root, user, comparator);
+}
+
+/// The trivial factory is useful in most applications. It simply returns the user pointed converted to CAVL2_T.
+/// It is meant for use with cavl2_find_or_insert().
+static inline CAVL2_T* cavl2_trivial_factory(void* const user)
+{
+ return (CAVL2_T*)user;
+}
+
+/// A convenience macro for use when a struct is a member of multiple AVL trees. For example:
+///
+/// struct my_type_t {
+/// struct cavl2_t tree_a;
+/// struct cavl2_t tree_b;
+/// ...
+/// };
+///
+/// If we only have tree_a, we don't need this helper because the C standard guarantees that the address of a struct
+/// equals the address of its first member, always, so simply casting a tree node to (struct my_type_t*) yields
+/// a valid pointer to the struct. However, if we have more than one tree nodes in a struct, for the other ones
+/// we will need to subtract the offset of the tree node field from the address of the tree node to get to the owner.
+/// This macro does exactly that. Example:
+///
+/// struct cavl2_t* tree_node_b = cavl2_find(...); // whatever
+/// if (tree_node_b == NULL) { ... } // do something else
+/// struct my_type_t* my_struct = CAVL2_TO_OWNER(tree_node_b, struct my_type_t, tree_b);
+#define CAVL2_TO_OWNER(tree_node_ptr, owner_type, owner_tree_node_field) \
+ ((owner_type*)cavl2_impl_to_owner_helper((tree_node_ptr), offsetof(owner_type, owner_tree_node_field))) // NOLINT
+
+// ---------------------------------------- END OF PUBLIC API SECTION ----------------------------------------
+// ---------------------------------------- POLICE LINE DO NOT CROSS ----------------------------------------
+
+/// INTERNAL USE ONLY.
+static inline void* cavl2_impl_to_owner_helper(const void* const tree_node_ptr, const size_t offset)
+{
+ return (tree_node_ptr == NULL) ? NULL : (void*)((char*)tree_node_ptr - offset);
+}
+
+/// INTERNAL USE ONLY. Makes the '!r' child of node 'x' its parent; i.e., rotates 'x' toward 'r'.
+static inline void cavl2_impl_rotate(CAVL2_T* const x, const bool r)
+{
+ CAVL2_ASSERT((x != NULL) && (x->lr[!r] != NULL) && ((x->bf >= -1) && (x->bf <= +1)));
+ CAVL2_T* const z = x->lr[!r];
+ if (x->up != NULL) {
+ x->up->lr[x->up->lr[1] == x] = z;
+ }
+ z->up = x->up;
+ x->up = z;
+ x->lr[!r] = z->lr[r];
+ if (x->lr[!r] != NULL) {
+ x->lr[!r]->up = x;
+ }
+ z->lr[r] = x;
+}
+
+/// INTERNAL USE ONLY.
+/// Accepts a node and how its balance factor needs to be changed -- either +1 or -1.
+/// Returns the new node to replace the old one if tree rotation took place, same node otherwise.
+static inline CAVL2_T* cavl2_impl_adjust_balance(CAVL2_T* const x, const bool increment)
+{
+ CAVL2_ASSERT((x != NULL) && ((x->bf >= -1) && (x->bf <= +1)));
+ CAVL2_T* out = x;
+ const int_fast8_t new_bf = (int_fast8_t)(x->bf + (increment ? +1 : -1));
+ if ((new_bf < -1) || (new_bf > 1)) {
+ const bool r = new_bf < 0; // bf<0 if left-heavy --> right rotation is needed.
+ const int_fast8_t sign = r ? +1 : -1; // Positive if we are rotating right.
+ CAVL2_T* const z = x->lr[!r];
+ CAVL2_ASSERT(z != NULL); // Heavy side cannot be empty. NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
+ if ((z->bf * sign) <= 0) { // Parent and child are heavy on the same side or the child is balanced.
+ out = z;
+ cavl2_impl_rotate(x, r);
+ if (0 == z->bf) {
+ x->bf = (int_fast8_t)(-sign);
+ z->bf = (int_fast8_t)(+sign);
+ } else {
+ x->bf = 0;
+ z->bf = 0;
+ }
+ } else { // Otherwise, the child needs to be rotated in the opposite direction first.
+ CAVL2_T* const y = z->lr[r];
+ CAVL2_ASSERT(y != NULL); // Heavy side cannot be empty.
+ out = y;
+ cavl2_impl_rotate(z, !r);
+ cavl2_impl_rotate(x, r);
+ if ((y->bf * sign) < 0) {
+ x->bf = (int_fast8_t)(+sign);
+ y->bf = 0;
+ z->bf = 0;
+ } else if ((y->bf * sign) > 0) {
+ x->bf = 0;
+ y->bf = 0;
+ z->bf = (int_fast8_t)(-sign);
+ } else {
+ x->bf = 0;
+ z->bf = 0;
+ }
+ }
+ } else {
+ x->bf = new_bf; // Balancing not needed, just update the balance factor and call it a day.
+ }
+ return out;
+}
+
+/// INTERNAL USE ONLY.
+/// Takes the culprit node (the one that is added); returns NULL or the root of the tree (possibly new one).
+/// When adding a new node, set its balance factor to zero and call this function to propagate the changes upward.
+static inline CAVL2_T* cavl2_impl_retrace_on_growth(CAVL2_T* const added)
+{
+ CAVL2_ASSERT((added != NULL) && (0 == added->bf));
+ CAVL2_T* c = added; // Child
+ CAVL2_T* p = added->up; // Parent
+ while (p != NULL) {
+ const bool r = p->lr[1] == c; // c is the right child of parent
+ CAVL2_ASSERT(p->lr[r] == c);
+ c = cavl2_impl_adjust_balance(p, r);
+ p = c->up;
+ if (0 == c->bf) { // The height change of the subtree made this parent balanced (as all things should be),
+ break; // hence, the height of the outer subtree is unchanged, so upper balance factors are unchanged.
+ }
+ }
+ CAVL2_ASSERT(c != NULL);
+ return (NULL == p) ? c : NULL; // New root or nothing.
+}
+
+static inline CAVL2_T* cavl2_find_or_insert(CAVL2_T** const root,
+ const void* const user_comparator,
+ const cavl2_comparator_t comparator,
+ void* const user_factory,
+ const cavl2_factory_t factory)
+{
+ CAVL2_T* out = NULL;
+ if ((root != NULL) && (comparator != NULL)) {
+ CAVL2_T* up = *root;
+ CAVL2_T** n = root;
+ while (*n != NULL) {
+ const CAVL2_RELATION cmp = comparator(user_comparator, *n);
+ if (0 == cmp) {
+ out = *n;
+ break;
+ }
+ up = *n;
+ n = &(*n)->lr[cmp > 0];
+ CAVL2_ASSERT((NULL == *n) || ((*n)->up == up));
+ }
+ if (NULL == out) {
+ out = (NULL == factory) ? NULL : factory(user_factory);
+ if (out != NULL) {
+ *n = out; // Overwrite the pointer to the new node in the parent node.
+ out->lr[0] = NULL;
+ out->lr[1] = NULL;
+ out->up = up;
+ out->bf = 0;
+ CAVL2_T* const rt = cavl2_impl_retrace_on_growth(out);
+ if (rt != NULL) {
+ *root = rt;
+ }
+ }
+ }
+ }
+ return out;
+}
+
+static inline void cavl2_remove(CAVL2_T** const root, CAVL2_T* const node)
+{
+ if ((root != NULL) && (node != NULL)) {
+ CAVL2_ASSERT(*root != NULL); // Otherwise, the node would have to be NULL.
+ CAVL2_ASSERT((node->up != NULL) || (node == *root));
+ CAVL2_T* p = NULL; // The lowest parent node that suffered a shortening of its subtree.
+ bool r = false; // Which side of the above was shortened.
+ // The first step is to update the topology and remember the node where to start the retracing from later.
+ // Balancing is not performed yet so we may end up with an unbalanced tree.
+ if ((node->lr[0] != NULL) && (node->lr[1] != NULL)) {
+ CAVL2_T* const re = cavl2_extremum(node->lr[1], false);
+ CAVL2_ASSERT((re != NULL) && (NULL == re->lr[0]) && (re->up != NULL));
+ re->bf = node->bf;
+ re->lr[0] = node->lr[0];
+ re->lr[0]->up = re;
+ if (re->up != node) {
+ p = re->up; // Retracing starts with the ex-parent of our replacement node.
+ CAVL2_ASSERT(p->lr[0] == re);
+ p->lr[0] = re->lr[1]; // Reducing the height of the left subtree here.
+ if (p->lr[0] != NULL) {
+ p->lr[0]->up = p;
+ }
+ re->lr[1] = node->lr[1];
+ re->lr[1]->up = re;
+ r = false;
+ } else { // In this case, we are reducing the height of the right subtree, so r=1.
+ p = re; // Retracing starts with the replacement node itself as we are deleting its parent.
+ r = true; // The right child of the replacement node remains the same so we don't bother relinking it.
+ }
+ re->up = node->up;
+ if (re->up != NULL) {
+ re->up->lr[re->up->lr[1] == node] = re; // Replace link in the parent of node.
+ } else {
+ *root = re;
+ }
+ } else { // Either or both of the children are NULL.
+ p = node->up;
+ const bool rr = node->lr[1] != NULL;
+ if (node->lr[rr] != NULL) {
+ node->lr[rr]->up = p;
+ }
+ if (p != NULL) {
+ r = p->lr[1] == node;
+ p->lr[r] = node->lr[rr];
+ if (p->lr[r] != NULL) {
+ p->lr[r]->up = p;
+ }
+ } else {
+ *root = node->lr[rr];
+ }
+ }
+ // Now that the topology is updated, perform the retracing to restore balance. We climb up adjusting the
+ // balance factors until we reach the root or a parent whose balance factor becomes plus/minus one, which
+ // means that that parent was able to absorb the balance delta; in other words, the height of the outer
+ // subtree is unchanged, so upper balance factors shall be kept unchanged.
+ if (p != NULL) {
+ CAVL2_T* c = NULL;
+ for (;;) {
+ c = cavl2_impl_adjust_balance(p, !r);
+ p = c->up;
+ if ((c->bf != 0) || (NULL == p)) { // Reached the root or the height difference is absorbed by c.
+ break;
+ }
+ r = p->lr[1] == c;
+ }
+ if (NULL == p) {
+ CAVL2_ASSERT(c != NULL);
+ *root = c;
+ }
+ }
+ // Invalidate the node's pointers to indicate it is no longer in the tree.
+ node->up = NULL;
+ node->lr[0] = NULL;
+ node->lr[1] = NULL;
+ }
+}
+
+static inline void cavl2_replace(CAVL2_T** const root, CAVL2_T* const old_node, CAVL2_T* const new_node)
+{
+ if ((root != NULL) && (old_node != NULL) && (new_node != NULL)) {
+ CAVL2_ASSERT(*root != NULL); // Otherwise, old_node would have to be NULL.
+ CAVL2_ASSERT((old_node->up != NULL) || (old_node == *root)); // old_node must be in the tree.
+ CAVL2_ASSERT((new_node->up == NULL) && (new_node->lr[0] == NULL) && (new_node->lr[1] == NULL));
+ // Copy the structural data from the old node to the new node.
+ new_node->up = old_node->up;
+ new_node->lr[0] = old_node->lr[0];
+ new_node->lr[1] = old_node->lr[1];
+ new_node->bf = old_node->bf;
+ // Update the parent to point to the new node.
+ if (old_node->up != NULL) {
+ old_node->up->lr[old_node->up->lr[1] == old_node] = new_node;
+ } else {
+ *root = new_node;
+ }
+ // Update the children to point to the new parent.
+ if (old_node->lr[0] != NULL) {
+ old_node->lr[0]->up = new_node;
+ }
+ if (old_node->lr[1] != NULL) {
+ old_node->lr[1]->up = new_node;
+ }
+ // Invalidate the old node's pointers to indicate it is no longer in the tree.
+ old_node->up = NULL;
+ old_node->lr[0] = NULL;
+ old_node->lr[1] = NULL;
+ }
+}
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/libudpard/_udpard_cavl.h b/libudpard/_udpard_cavl.h
deleted file mode 100644
index e8fe2e2..0000000
--- a/libudpard/_udpard_cavl.h
+++ /dev/null
@@ -1,338 +0,0 @@
-/// Source: https://github.com/pavel-kirienko/cavl
-///
-/// Cavl is a single-header C library providing an implementation of AVL tree suitable for deeply embedded systems.
-/// To integrate it into your project, simply copy this file into your source tree. Read the API docs below.
-///
-/// See also O1Heap -- a deterministic memory manager for hard-real-time
-/// high-integrity embedded systems.
-///
-/// Copyright (c) 2021 Pavel Kirienko
-///
-/// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
-/// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
-/// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
-/// and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-///
-/// The above copyright notice and this permission notice shall be included in all copies or substantial portions of
-/// the Software.
-///
-/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
-/// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
-/// OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-/// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-#pragma once
-
-#include "udpard.h"
-
-/// Modified for use with Libudpard: use the same assertion check macro if provided.
-#ifdef UDPARD_ASSERT
-# define CAVL_ASSERT UDPARD_ASSERT
-#else
-// Intentional violation of MISRA: inclusion not at the top of the file to eliminate unnecessary dependency on assert.h.
-# include // NOSONAR
-# define CAVL_ASSERT assert
-#endif
-
-#ifdef __cplusplus
-// This is, strictly speaking, useless because we do not define any functions with external linkage here,
-// but it tells static analyzers that what follows should be interpreted as C code rather than C++.
-extern "C" {
-#endif
-
-// ---------------------------------------- PUBLIC API SECTION ----------------------------------------
-
-/// Modified for use with LibUDPard: expose the Cavl structure via public API as UdpardTreeNode.
-typedef struct UdpardTreeNode Cavl;
-
-/// Returns POSITIVE if the search target is GREATER than the provided node, negative if smaller, zero on match (found).
-/// Values other than {-1, 0, +1} are not recommended to avoid overflow during the narrowing conversion of the result.
-typedef int_fast8_t (*CavlPredicate)(void* user_reference, const Cavl* node);
-
-/// If provided, the factory will be invoked when the sought node does not exist in the tree.
-/// It is expected to return a new node that will be inserted immediately (without the need to traverse the tree again).
-/// If the factory returns NULL or is not provided, the tree is not modified.
-typedef Cavl* (*CavlFactory)(void* user_reference);
-
-/// Look for a node in the tree using the specified search predicate. The worst-case complexity is O(log n).
-/// - If the node is found, return it.
-/// - If the node is not found and the factory is NULL, return NULL.
-/// - Otherwise, construct a new node using the factory; if the result is not NULL, insert it; return the result.
-/// The user_reference is passed into the predicate & factory unmodified.
-/// The root node may be replaced in the process.
-/// If predicate is NULL, returns NULL.
-static inline Cavl* cavlSearch(Cavl** const root,
- void* const user_reference,
- const CavlPredicate predicate,
- const CavlFactory factory);
-
-/// Remove the specified node from its tree. The root node may be replaced in the process.
-/// The worst-case complexity is O(log n).
-/// The function has no effect if either of the pointers are NULL.
-/// If the node is not in the tree, the behavior is undefined; it may create cycles in the tree which is deadly.
-/// It is safe to pass the result of cavlSearch() directly as the second argument:
-/// cavlRemove(&root, cavlSearch(&root, user_reference, search_predicate, NULL));
-/// It is recommended to invalidate the pointers stored in the node after its removal.
-static inline void cavlRemove(Cavl** const root, const Cavl* const node);
-
-/// Return the min-/max-valued node stored in the tree, depending on the flag. This is an extremely fast query.
-/// Returns NULL iff the argument is NULL (i.e., the tree is empty). The worst-case complexity is O(log n).
-static inline Cavl* cavlFindExtremum(Cavl* const root, const bool maximum)
-{
- Cavl* result = NULL;
- Cavl* c = root;
- while (c != NULL)
- {
- result = c;
- c = c->lr[maximum];
- }
- return result;
-}
-
-// ---------------------------------------- END OF PUBLIC API SECTION ----------------------------------------
-// ---------------------------------------- POLICE LINE DO NOT CROSS ----------------------------------------
-
-/// INTERNAL USE ONLY. Makes the '!r' child of node 'x' its parent; i.e., rotates 'x' toward 'r'.
-static inline void cavlPrivateRotate(Cavl* const x, const bool r)
-{
- CAVL_ASSERT((x != NULL) && (x->lr[!r] != NULL) && ((x->bf >= -1) && (x->bf <= +1)));
- Cavl* const z = x->lr[!r];
- if (x->up != NULL)
- {
- x->up->lr[x->up->lr[1] == x] = z;
- }
- z->up = x->up;
- x->up = z;
- x->lr[!r] = z->lr[r];
- if (x->lr[!r] != NULL)
- {
- x->lr[!r]->up = x;
- }
- z->lr[r] = x;
-}
-
-/// INTERNAL USE ONLY.
-/// Accepts a node and how its balance factor needs to be changed -- either +1 or -1.
-/// Returns the new node to replace the old one if tree rotation took place, same node otherwise.
-static inline Cavl* cavlPrivateAdjustBalance(Cavl* const x, const bool increment)
-{
- CAVL_ASSERT((x != NULL) && ((x->bf >= -1) && (x->bf <= +1)));
- Cavl* out = x;
- const int_fast8_t new_bf = (int_fast8_t) (x->bf + (increment ? +1 : -1));
- if ((new_bf < -1) || (new_bf > 1))
- {
- const bool r = new_bf < 0; // bf<0 if left-heavy --> right rotation is needed.
- const int_fast8_t sign = r ? +1 : -1; // Positive if we are rotating right.
- Cavl* const z = x->lr[!r];
- CAVL_ASSERT(z != NULL); // Heavy side cannot be empty.
- // NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
- if ((z->bf * sign) <= 0) // Parent and child are heavy on the same side or the child is balanced.
- {
- out = z;
- cavlPrivateRotate(x, r);
- if (0 == z->bf)
- {
- x->bf = (int_fast8_t) (-sign);
- z->bf = (int_fast8_t) (+sign);
- }
- else
- {
- x->bf = 0;
- z->bf = 0;
- }
- }
- else // Otherwise, the child needs to be rotated in the opposite direction first.
- {
- Cavl* const y = z->lr[r];
- CAVL_ASSERT(y != NULL); // Heavy side cannot be empty.
- out = y;
- cavlPrivateRotate(z, !r);
- cavlPrivateRotate(x, r);
- if ((y->bf * sign) < 0)
- {
- x->bf = (int_fast8_t) (+sign);
- y->bf = 0;
- z->bf = 0;
- }
- else if ((y->bf * sign) > 0)
- {
- x->bf = 0;
- y->bf = 0;
- z->bf = (int_fast8_t) (-sign);
- }
- else
- {
- x->bf = 0;
- z->bf = 0;
- }
- }
- }
- else
- {
- x->bf = new_bf; // Balancing not needed, just update the balance factor and call it a day.
- }
- return out;
-}
-
-/// INTERNAL USE ONLY.
-/// Takes the culprit node (the one that is added); returns NULL or the root of the tree (possibly new one).
-/// When adding a new node, set its balance factor to zero and call this function to propagate the changes upward.
-static inline Cavl* cavlPrivateRetraceOnGrowth(Cavl* const added)
-{
- CAVL_ASSERT((added != NULL) && (0 == added->bf));
- Cavl* c = added; // Child
- Cavl* p = added->up; // Parent
- while (p != NULL)
- {
- const bool r = p->lr[1] == c; // c is the right child of parent
- CAVL_ASSERT(p->lr[r] == c);
- c = cavlPrivateAdjustBalance(p, r);
- p = c->up;
- if (0 == c->bf)
- { // The height change of the subtree made this parent perfectly balanced (as all things should be),
- break; // hence, the height of the outer subtree is unchanged, so upper balance factors are unchanged.
- }
- }
- CAVL_ASSERT(c != NULL);
- return (NULL == p) ? c : NULL; // New root or nothing.
-}
-
-static inline Cavl* cavlSearch(Cavl** const root,
- void* const user_reference,
- const CavlPredicate predicate,
- const CavlFactory factory)
-{
- Cavl* out = NULL;
- if ((root != NULL) && (predicate != NULL))
- {
- Cavl* up = *root;
- Cavl** n = root;
- while (*n != NULL)
- {
- const int_fast8_t cmp = predicate(user_reference, *n);
- if (0 == cmp)
- {
- out = *n;
- break;
- }
- up = *n;
- n = &(*n)->lr[cmp > 0];
- CAVL_ASSERT((NULL == *n) || ((*n)->up == up));
- }
- if (NULL == out)
- {
- out = (NULL == factory) ? NULL : factory(user_reference);
- if (out != NULL)
- {
- *n = out; // Overwrite the pointer to the new node in the parent node.
- out->lr[0] = NULL;
- out->lr[1] = NULL;
- out->up = up;
- out->bf = 0;
- Cavl* const rt = cavlPrivateRetraceOnGrowth(out);
- if (rt != NULL)
- {
- *root = rt;
- }
- }
- }
- }
- return out;
-}
-
-static inline void cavlRemove(Cavl** const root, const Cavl* const node)
-{
- if ((root != NULL) && (node != NULL))
- {
- CAVL_ASSERT(*root != NULL); // Otherwise, the node would have to be NULL.
- CAVL_ASSERT((node->up != NULL) || (node == *root));
- Cavl* p = NULL; // The lowest parent node that suffered a shortening of its subtree.
- bool r = false; // Which side of the above was shortened.
- // The first step is to update the topology and remember the node where to start the retracing from later.
- // Balancing is not performed yet so we may end up with an unbalanced tree.
- if ((node->lr[0] != NULL) && (node->lr[1] != NULL))
- {
- Cavl* const re = cavlFindExtremum(node->lr[1], false);
- CAVL_ASSERT((re != NULL) && (NULL == re->lr[0]) && (re->up != NULL));
- re->bf = node->bf;
- re->lr[0] = node->lr[0];
- re->lr[0]->up = re;
- if (re->up != node)
- {
- p = re->up; // Retracing starts with the ex-parent of our replacement node.
- CAVL_ASSERT(p->lr[0] == re);
- p->lr[0] = re->lr[1]; // Reducing the height of the left subtree here.
- if (p->lr[0] != NULL)
- {
- p->lr[0]->up = p;
- }
- re->lr[1] = node->lr[1];
- re->lr[1]->up = re;
- r = false;
- }
- else // In this case, we are reducing the height of the right subtree, so r=1.
- {
- p = re; // Retracing starts with the replacement node itself as we are deleting its parent.
- r = true; // The right child of the replacement node remains the same so we don't bother relinking it.
- }
- re->up = node->up;
- if (re->up != NULL)
- {
- re->up->lr[re->up->lr[1] == node] = re; // Replace link in the parent of node.
- }
- else
- {
- *root = re;
- }
- }
- else // Either or both of the children are NULL.
- {
- p = node->up;
- const bool rr = node->lr[1] != NULL;
- if (node->lr[rr] != NULL)
- {
- node->lr[rr]->up = p;
- }
- if (p != NULL)
- {
- r = p->lr[1] == node;
- p->lr[r] = node->lr[rr];
- if (p->lr[r] != NULL)
- {
- p->lr[r]->up = p;
- }
- }
- else
- {
- *root = node->lr[rr];
- }
- }
- // Now that the topology is updated, perform the retracing to restore balance. We climb up adjusting the
- // balance factors until we reach the root or a parent whose balance factor becomes plus/minus one, which
- // means that that parent was able to absorb the balance delta; in other words, the height of the outer
- // subtree is unchanged, so upper balance factors shall be kept unchanged.
- if (p != NULL)
- {
- Cavl* c = NULL;
- for (;;)
- {
- c = cavlPrivateAdjustBalance(p, !r);
- p = c->up;
- if ((c->bf != 0) || (NULL == p)) // Reached the root or the height difference is absorbed by c.
- {
- break;
- }
- r = p->lr[1] == c;
- }
- if (NULL == p)
- {
- CAVL_ASSERT(c != NULL);
- *root = c;
- }
- }
- }
-}
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/libudpard/udpard.c b/libudpard/udpard.c
index c7a7241..b6a685b 100644
--- a/libudpard/udpard.c
+++ b/libudpard/udpard.c
@@ -3,1880 +3,2492 @@
/// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
/// Author: Pavel Kirienko
-#include "udpard.h"
-#include "_udpard_cavl.h"
+// ReSharper disable CppDFATimeOver
+#include "udpard.h"
+#include
#include
-// --------------------------------------------- BUILD CONFIGURATION ---------------------------------------------
-
/// Define this macro to include build configuration header.
/// Usage example with CMake: "-DUDPARD_CONFIG_HEADER=\"${CMAKE_CURRENT_SOURCE_DIR}/my_udpard_config.h\""
#ifdef UDPARD_CONFIG_HEADER
-# include UDPARD_CONFIG_HEADER
+#include UDPARD_CONFIG_HEADER
#endif
/// By default, this macro resolves to the standard assert(). The user can redefine this if necessary.
/// To disable assertion checks completely, make it expand into `(void)(0)`.
#ifndef UDPARD_ASSERT
-// Intentional violation of MISRA: inclusion not at the top of the file to eliminate unnecessary dependency on assert.h.
-# include // NOSONAR
// Intentional violation of MISRA: assertion macro cannot be replaced with a function definition.
-# define UDPARD_ASSERT(x) assert(x) // NOSONAR
+#define UDPARD_ASSERT(x) assert(x) // NOSONAR
#endif
-#if !defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L)
-# error "Unsupported language: ISO C99 or a newer version is required."
+#if __STDC_VERSION__ < 201112L
+// Intentional violation of MISRA: static assertion macro cannot be replaced with a function definition.
+#define static_assert(x, ...) typedef char _static_assert_gl(_static_assertion_, __LINE__)[(x) ? 1 : -1] // NOSONAR
+#define _static_assert_gl(a, b) _static_assert_gl_impl(a, b) // NOSONAR
+// Intentional violation of MISRA: the paste operator ## cannot be avoided in this context.
+#define _static_assert_gl_impl(a, b) a##b // NOSONAR
#endif
-// --------------------------------------------- COMMON DEFINITIONS ---------------------------------------------
-
-typedef uint_least8_t byte_t; ///< For compatibility with platforms where byte size is not 8 bits.
-
-static const uint_fast8_t ByteWidth = 8U;
-static const byte_t ByteMask = 0xFFU;
+#if !defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L)
+#error "Unsupported language: ISO C99 or a newer version is required."
+#endif
-#define RX_SLOT_COUNT 2
-#define TIMESTAMP_UNSET UINT64_MAX
-#define FRAME_INDEX_UNSET UINT32_MAX
-#define TRANSFER_ID_UNSET UINT64_MAX
+#define CAVL2_T udpard_tree_t
+#define CAVL2_RELATION int32_t
+#define CAVL2_ASSERT(x) UDPARD_ASSERT(x) // NOSONAR
+#include "cavl2.h" // NOSONAR
-typedef struct
-{
- enum UdpardPriority priority;
- UdpardNodeID src_node_id;
- UdpardNodeID dst_node_id;
- uint16_t data_specifier;
- UdpardTransferID transfer_id;
-} TransferMetadata;
+typedef unsigned char byte_t; ///< For compatibility with platforms where byte size is not 8 bits.
-#define DATA_SPECIFIER_SERVICE_NOT_MESSAGE_MASK 0x8000U
-#define DATA_SPECIFIER_SERVICE_REQUEST_NOT_RESPONSE_MASK 0x4000U
-#define DATA_SPECIFIER_SERVICE_ID_MASK 0x3FFFU
+/// Sessions will be garbage-collected after being idle for this long, along with unfinished transfers, if any.
+/// Pending slots within a live session will also be reset after this timeout to avoid storing stale data indefinitely.
+#define SESSION_LIFETIME (60 * MEGA)
-#define HEADER_SIZE_BYTES 24U
-#define HEADER_VERSION 1U
-/// The frame index is a 31-bit unsigned integer. The most significant bit is used to indicate the end of transfer.
-#define HEADER_FRAME_INDEX_EOT_MASK 0x80000000UL
-#define HEADER_FRAME_INDEX_MAX 0x7FFFFFFFUL
-#define HEADER_FRAME_INDEX_MASK HEADER_FRAME_INDEX_MAX
+/// The maximum number of incoming transfers that can be in the state of incomplete reassembly simultaneously.
+/// Additional transfers will replace the oldest ones.
+/// This number should normally be at least as large as there are priority levels. More is fine but rarely useful.
+#define RX_SLOT_COUNT UDPARD_PRIORITY_COUNT
-/// The port number is defined in the Cyphal/UDP Specification.
-#define UDP_PORT 9382U
+/// The number of most recent transfers to keep in the history for ACK retransmission and duplicate detection.
+/// Should be a power of two to allow replacement of modulo operation with a bitwise AND.
+///
+/// Implementation node: we used to store bitmap windows instead of a full list of recent transfer-IDs, but they
+/// were found to offer no advantage except in the perfect scenario of non-restarting senders, and an increased
+/// implementation complexity (more branches, more lines of code), so they were replaced with a simple list.
+/// The list works equally well given a non-contiguous transfer-ID stream, unlike the bitmap, thus more robust.
+#define RX_TRANSFER_HISTORY_COUNT 32U
+
+/// In the ORDERED reassembly mode, with the most recently received transfer-ID N, the library will reject
+/// transfers with transfer-ID less than or equal to N-ORDERING_WINDOW (modulo 2^64) as late.
+/// This limit is chosen rather arbitrarily; its value does not affect the resource utilization in any way.
+/// One trade-off to keep in mind is that a very large window may somewhat increase the likelihood of choosing a new
+/// random transfer-ID that falls within the window, thus being rejected as late by receivers; however, given the
+/// 64-bit ID space, this value will have to be extremely large to have any measurable effect on that probability.
+#define RX_TRANSFER_ORDERING_WINDOW 8192U
+
+#define UDP_PORT 9382U
+#define IPv4_MCAST_PREFIX 0xEF000000UL
+static_assert((UDPARD_IPv4_SUBJECT_ID_MAX & (UDPARD_IPv4_SUBJECT_ID_MAX + 1)) == 0,
+ "UDPARD_IPv4_SUBJECT_ID_MAX must be one less than a power of 2");
+
+#define BIG_BANG INT64_MIN
+#define HEAT_DEATH INT64_MAX
+
+#define KILO 1000LL
+#define MEGA 1000000LL
+
+/// Pending ack transfers expire after this long if not transmitted.
+#define ACK_TX_DEADLINE MEGA
+
+/// The ACK message payload is structured as follows, in DSDL notation:
+///
+/// uint64 topic_hash # Topic hash of the original message being acknowledged.
+/// uint64 transfer_id # Transfer-ID of the original message being acknowledged.
+/// # If there is any additional data not defined by the format, it must be ignored.
+#define ACK_SIZE_BYTES 16U
-// See Cyphal/UDP Specification, section 4.3.2.1 Endpoints.
-#define SUBJECT_MULTICAST_GROUP_ADDRESS_MASK 0xEF000000UL
-#define SERVICE_MULTICAST_GROUP_ADDRESS_MASK 0xEF010000UL
+static size_t smaller(const size_t a, const size_t b) { return (a < b) ? a : b; }
+static size_t larger(const size_t a, const size_t b) { return (a > b) ? a : b; }
+static int64_t min_i64(const int64_t a, const int64_t b) { return (a < b) ? a : b; }
+static int64_t max_i64(const int64_t a, const int64_t b) { return (a > b) ? a : b; }
+static udpard_us_t earlier(const udpard_us_t a, const udpard_us_t b) { return min_i64(a, b); }
+static udpard_us_t later(const udpard_us_t a, const udpard_us_t b) { return max_i64(a, b); }
-static uint32_t makeSubjectIPGroupAddress(const UdpardPortID subject_id)
+/// Two memory resources are considered identical if they share the same user pointer and the same allocation function.
+/// The deallocation function is intentionally excluded from the comparison.
+static bool mem_same(const udpard_mem_t a, const udpard_mem_t b)
{
- return SUBJECT_MULTICAST_GROUP_ADDRESS_MASK | ((uint32_t) subject_id);
+ return (a.context == b.context) && (a.vtable == b.vtable);
}
-static uint32_t makeServiceIPGroupAddress(const UdpardNodeID destination_node_id)
+static void* mem_alloc(const udpard_mem_t memory, const size_t size)
{
- return SERVICE_MULTICAST_GROUP_ADDRESS_MASK | ((uint32_t) destination_node_id);
+ return memory.vtable->alloc(memory.context, size);
}
-static struct UdpardUDPIPEndpoint makeSubjectUDPIPEndpoint(const UdpardPortID subject_id)
+static void mem_free(const udpard_mem_t memory, const size_t size, void* const data)
{
- return (struct UdpardUDPIPEndpoint) {.ip_address = makeSubjectIPGroupAddress(subject_id), //
- .udp_port = UDP_PORT};
+ memory.vtable->base.free(memory.context, size, data);
}
-static struct UdpardUDPIPEndpoint makeServiceUDPIPEndpoint(const UdpardNodeID destination_node_id)
+static void mem_free_payload(const udpard_deleter_t memory, const udpard_bytes_mut_t payload)
{
- return (struct UdpardUDPIPEndpoint) {.ip_address = makeServiceIPGroupAddress(destination_node_id),
- .udp_port = UDP_PORT};
+ if (payload.data != NULL) {
+ memory.vtable->free(memory.context, payload.size, payload.data);
+ }
}
-/// Used for inserting new items into AVL trees. Refer to the documentation for cavlSearch() for details.
-static struct UdpardTreeNode* avlTrivialFactory(void* const user_reference)
+static byte_t* serialize_u32(byte_t* ptr, const uint32_t value)
{
- return (struct UdpardTreeNode*) user_reference;
+ for (size_t i = 0; i < sizeof(value); i++) {
+ *ptr++ = (byte_t)((byte_t)(value >> (i * 8U)) & 0xFFU);
+ }
+ return ptr;
}
-static size_t smaller(const size_t a, const size_t b)
+static byte_t* serialize_u64(byte_t* ptr, const uint64_t value)
{
- return (a < b) ? a : b;
+ for (size_t i = 0; i < sizeof(value); i++) {
+ *ptr++ = (byte_t)((byte_t)(value >> (i * 8U)) & 0xFFU);
+ }
+ return ptr;
}
-static size_t larger(const size_t a, const size_t b)
+static const byte_t* deserialize_u32(const byte_t* ptr, uint32_t* const out_value)
{
- return (a > b) ? a : b;
+ UDPARD_ASSERT((ptr != NULL) && (out_value != NULL));
+ *out_value = 0;
+ for (size_t i = 0; i < sizeof(*out_value); i++) {
+ *out_value |= (uint32_t)((uint32_t)*ptr << (i * 8U)); // NOLINT(google-readability-casting) NOSONAR
+ ptr++;
+ }
+ return ptr;
}
-static uint32_t max32(const uint32_t a, const uint32_t b)
+static const byte_t* deserialize_u64(const byte_t* ptr, uint64_t* const out_value)
{
- return (a > b) ? a : b;
+ UDPARD_ASSERT((ptr != NULL) && (out_value != NULL));
+ *out_value = 0;
+ for (size_t i = 0; i < sizeof(*out_value); i++) {
+ *out_value |= ((uint64_t)*ptr << (i * 8U));
+ ptr++;
+ }
+ return ptr;
}
-/// Returns the sign of the subtraction of the operands; zero if equal. This is useful for AVL search.
-static int_fast8_t compare32(const uint32_t a, const uint32_t b)
+// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling)
+static void mem_zero(const size_t size, void* const data) { (void)memset(data, 0, size); }
+
+udpard_deleter_t udpard_make_deleter(const udpard_mem_t memory)
{
- int_fast8_t result = 0;
- if (a > b)
- {
- result = +1;
- }
- if (a < b)
- {
- result = -1;
- }
- return result;
+ return (udpard_deleter_t){ .vtable = &memory.vtable->base, .context = memory.context };
}
-static void* memAlloc(const struct UdpardMemoryResource memory, const size_t size)
+bool udpard_is_valid_endpoint(const udpard_udpip_ep_t ep)
{
- UDPARD_ASSERT(memory.allocate != NULL);
- return memory.allocate(memory.user_reference, size);
+ return (ep.port != 0) && (ep.ip != 0) && (ep.ip != UINT32_MAX);
}
-static void memFree(const struct UdpardMemoryResource memory, const size_t size, void* const data)
+static uint16_t valid_ep_bitmap(const udpard_udpip_ep_t remote_ep[UDPARD_IFACE_COUNT_MAX])
{
- UDPARD_ASSERT(memory.deallocate != NULL);
- memory.deallocate(memory.user_reference, size, data);
+ uint16_t bitmap = 0U;
+ if (remote_ep != NULL) {
+ for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) {
+ if (udpard_is_valid_endpoint(remote_ep[i])) {
+ bitmap |= (1U << i);
+ }
+ }
+ }
+ return bitmap;
}
-static void memFreePayload(const struct UdpardMemoryDeleter memory, const struct UdpardMutablePayload payload)
+udpard_udpip_ep_t udpard_make_subject_endpoint(const uint32_t subject_id)
{
- UDPARD_ASSERT(memory.deallocate != NULL);
- if (payload.data != NULL)
- {
- memory.deallocate(memory.user_reference, payload.size, payload.data);
- }
-}
-
-static void memZero(const size_t size, void* const data)
-{
- // NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling)
- (void) memset(data, 0, size);
-}
-
-// --------------------------------------------- HEADER CRC ---------------------------------------------
-
-#define HEADER_CRC_INITIAL 0xFFFFU
-#define HEADER_CRC_RESIDUE 0x0000U
-#define HEADER_CRC_SIZE_BYTES 2U
-
-static uint16_t headerCRCAddByte(const uint16_t crc, const byte_t byte)
-{
- static const uint16_t Table[256] = {
- 0x0000U, 0x1021U, 0x2042U, 0x3063U, 0x4084U, 0x50A5U, 0x60C6U, 0x70E7U, 0x8108U, 0x9129U, 0xA14AU, 0xB16BU,
- 0xC18CU, 0xD1ADU, 0xE1CEU, 0xF1EFU, 0x1231U, 0x0210U, 0x3273U, 0x2252U, 0x52B5U, 0x4294U, 0x72F7U, 0x62D6U,
- 0x9339U, 0x8318U, 0xB37BU, 0xA35AU, 0xD3BDU, 0xC39CU, 0xF3FFU, 0xE3DEU, 0x2462U, 0x3443U, 0x0420U, 0x1401U,
- 0x64E6U, 0x74C7U, 0x44A4U, 0x5485U, 0xA56AU, 0xB54BU, 0x8528U, 0x9509U, 0xE5EEU, 0xF5CFU, 0xC5ACU, 0xD58DU,
- 0x3653U, 0x2672U, 0x1611U, 0x0630U, 0x76D7U, 0x66F6U, 0x5695U, 0x46B4U, 0xB75BU, 0xA77AU, 0x9719U, 0x8738U,
- 0xF7DFU, 0xE7FEU, 0xD79DU, 0xC7BCU, 0x48C4U, 0x58E5U, 0x6886U, 0x78A7U, 0x0840U, 0x1861U, 0x2802U, 0x3823U,
- 0xC9CCU, 0xD9EDU, 0xE98EU, 0xF9AFU, 0x8948U, 0x9969U, 0xA90AU, 0xB92BU, 0x5AF5U, 0x4AD4U, 0x7AB7U, 0x6A96U,
- 0x1A71U, 0x0A50U, 0x3A33U, 0x2A12U, 0xDBFDU, 0xCBDCU, 0xFBBFU, 0xEB9EU, 0x9B79U, 0x8B58U, 0xBB3BU, 0xAB1AU,
- 0x6CA6U, 0x7C87U, 0x4CE4U, 0x5CC5U, 0x2C22U, 0x3C03U, 0x0C60U, 0x1C41U, 0xEDAEU, 0xFD8FU, 0xCDECU, 0xDDCDU,
- 0xAD2AU, 0xBD0BU, 0x8D68U, 0x9D49U, 0x7E97U, 0x6EB6U, 0x5ED5U, 0x4EF4U, 0x3E13U, 0x2E32U, 0x1E51U, 0x0E70U,
- 0xFF9FU, 0xEFBEU, 0xDFDDU, 0xCFFCU, 0xBF1BU, 0xAF3AU, 0x9F59U, 0x8F78U, 0x9188U, 0x81A9U, 0xB1CAU, 0xA1EBU,
- 0xD10CU, 0xC12DU, 0xF14EU, 0xE16FU, 0x1080U, 0x00A1U, 0x30C2U, 0x20E3U, 0x5004U, 0x4025U, 0x7046U, 0x6067U,
- 0x83B9U, 0x9398U, 0xA3FBU, 0xB3DAU, 0xC33DU, 0xD31CU, 0xE37FU, 0xF35EU, 0x02B1U, 0x1290U, 0x22F3U, 0x32D2U,
- 0x4235U, 0x5214U, 0x6277U, 0x7256U, 0xB5EAU, 0xA5CBU, 0x95A8U, 0x8589U, 0xF56EU, 0xE54FU, 0xD52CU, 0xC50DU,
- 0x34E2U, 0x24C3U, 0x14A0U, 0x0481U, 0x7466U, 0x6447U, 0x5424U, 0x4405U, 0xA7DBU, 0xB7FAU, 0x8799U, 0x97B8U,
- 0xE75FU, 0xF77EU, 0xC71DU, 0xD73CU, 0x26D3U, 0x36F2U, 0x0691U, 0x16B0U, 0x6657U, 0x7676U, 0x4615U, 0x5634U,
- 0xD94CU, 0xC96DU, 0xF90EU, 0xE92FU, 0x99C8U, 0x89E9U, 0xB98AU, 0xA9ABU, 0x5844U, 0x4865U, 0x7806U, 0x6827U,
- 0x18C0U, 0x08E1U, 0x3882U, 0x28A3U, 0xCB7DU, 0xDB5CU, 0xEB3FU, 0xFB1EU, 0x8BF9U, 0x9BD8U, 0xABBBU, 0xBB9AU,
- 0x4A75U, 0x5A54U, 0x6A37U, 0x7A16U, 0x0AF1U, 0x1AD0U, 0x2AB3U, 0x3A92U, 0xFD2EU, 0xED0FU, 0xDD6CU, 0xCD4DU,
- 0xBDAAU, 0xAD8BU, 0x9DE8U, 0x8DC9U, 0x7C26U, 0x6C07U, 0x5C64U, 0x4C45U, 0x3CA2U, 0x2C83U, 0x1CE0U, 0x0CC1U,
- 0xEF1FU, 0xFF3EU, 0xCF5DU, 0xDF7CU, 0xAF9BU, 0xBFBAU, 0x8FD9U, 0x9FF8U, 0x6E17U, 0x7E36U, 0x4E55U, 0x5E74U,
- 0x2E93U, 0x3EB2U, 0x0ED1U, 0x1EF0U,
- };
- return (uint16_t) ((uint16_t) (crc << ByteWidth) ^
- Table[(uint16_t) ((uint16_t) (crc >> ByteWidth) ^ byte) & ByteMask]);
+ return (udpard_udpip_ep_t){ .ip = IPv4_MCAST_PREFIX | (subject_id & UDPARD_IPv4_SUBJECT_ID_MAX), .port = UDP_PORT };
}
-static uint16_t headerCRCCompute(const size_t size, const void* const data)
+typedef struct
{
- UDPARD_ASSERT((data != NULL) || (size == 0U));
- uint16_t out = HEADER_CRC_INITIAL;
- const byte_t* p = (const byte_t*) data;
- for (size_t i = 0; i < size; i++)
- {
- out = headerCRCAddByte(out, *p);
- ++p;
+ const udpard_bytes_scattered_t* cursor; ///< Initially points at the head.
+ size_t position; ///< Position within the current fragment, initially zero.
+} bytes_scattered_reader_t;
+
+/// Sequentially reads data from a scattered byte array into a contiguous destination buffer.
+/// Requires that the total amount of read data does not exceed the total size of the scattered array.
+static void bytes_scattered_read(bytes_scattered_reader_t* const reader, const size_t size, void* const destination)
+{
+ UDPARD_ASSERT((reader != NULL) && (reader->cursor != NULL) && (destination != NULL));
+ byte_t* ptr = (byte_t*)destination;
+ size_t remaining = size;
+ while (remaining > 0U) {
+ UDPARD_ASSERT(reader->position <= reader->cursor->bytes.size);
+ while (reader->position == reader->cursor->bytes.size) { // Advance while skipping empty fragments.
+ reader->position = 0U;
+ reader->cursor = reader->cursor->next;
+ UDPARD_ASSERT(reader->cursor != NULL);
+ }
+ UDPARD_ASSERT(reader->position < reader->cursor->bytes.size);
+ const size_t progress = smaller(remaining, reader->cursor->bytes.size - reader->position);
+ UDPARD_ASSERT((progress > 0U) && (progress <= remaining));
+ UDPARD_ASSERT((reader->position + progress) <= reader->cursor->bytes.size);
+ // NOLINTNEXTLINE(*DeprecatedOrUnsafeBufferHandling)
+ (void)memcpy(ptr, ((const byte_t*)reader->cursor->bytes.data) + reader->position, progress);
+ ptr += progress;
+ remaining -= progress;
+ reader->position += progress;
}
- return out;
}
-// --------------------------------------------- TRANSFER CRC ---------------------------------------------
-
-#define TRANSFER_CRC_INITIAL 0xFFFFFFFFUL
-#define TRANSFER_CRC_OUTPUT_XOR 0xFFFFFFFFUL
-#define TRANSFER_CRC_RESIDUE_BEFORE_OUTPUT_XOR 0xB798B438UL
-#define TRANSFER_CRC_RESIDUE_AFTER_OUTPUT_XOR (TRANSFER_CRC_RESIDUE_BEFORE_OUTPUT_XOR ^ TRANSFER_CRC_OUTPUT_XOR)
-#define TRANSFER_CRC_SIZE_BYTES 4U
-
-static uint32_t transferCRCAddByte(const uint32_t crc, const byte_t byte)
-{
- static const uint32_t Table[256] = {
- 0x00000000UL, 0xF26B8303UL, 0xE13B70F7UL, 0x1350F3F4UL, 0xC79A971FUL, 0x35F1141CUL, 0x26A1E7E8UL, 0xD4CA64EBUL,
- 0x8AD958CFUL, 0x78B2DBCCUL, 0x6BE22838UL, 0x9989AB3BUL, 0x4D43CFD0UL, 0xBF284CD3UL, 0xAC78BF27UL, 0x5E133C24UL,
- 0x105EC76FUL, 0xE235446CUL, 0xF165B798UL, 0x030E349BUL, 0xD7C45070UL, 0x25AFD373UL, 0x36FF2087UL, 0xC494A384UL,
- 0x9A879FA0UL, 0x68EC1CA3UL, 0x7BBCEF57UL, 0x89D76C54UL, 0x5D1D08BFUL, 0xAF768BBCUL, 0xBC267848UL, 0x4E4DFB4BUL,
- 0x20BD8EDEUL, 0xD2D60DDDUL, 0xC186FE29UL, 0x33ED7D2AUL, 0xE72719C1UL, 0x154C9AC2UL, 0x061C6936UL, 0xF477EA35UL,
- 0xAA64D611UL, 0x580F5512UL, 0x4B5FA6E6UL, 0xB93425E5UL, 0x6DFE410EUL, 0x9F95C20DUL, 0x8CC531F9UL, 0x7EAEB2FAUL,
- 0x30E349B1UL, 0xC288CAB2UL, 0xD1D83946UL, 0x23B3BA45UL, 0xF779DEAEUL, 0x05125DADUL, 0x1642AE59UL, 0xE4292D5AUL,
- 0xBA3A117EUL, 0x4851927DUL, 0x5B016189UL, 0xA96AE28AUL, 0x7DA08661UL, 0x8FCB0562UL, 0x9C9BF696UL, 0x6EF07595UL,
- 0x417B1DBCUL, 0xB3109EBFUL, 0xA0406D4BUL, 0x522BEE48UL, 0x86E18AA3UL, 0x748A09A0UL, 0x67DAFA54UL, 0x95B17957UL,
- 0xCBA24573UL, 0x39C9C670UL, 0x2A993584UL, 0xD8F2B687UL, 0x0C38D26CUL, 0xFE53516FUL, 0xED03A29BUL, 0x1F682198UL,
- 0x5125DAD3UL, 0xA34E59D0UL, 0xB01EAA24UL, 0x42752927UL, 0x96BF4DCCUL, 0x64D4CECFUL, 0x77843D3BUL, 0x85EFBE38UL,
- 0xDBFC821CUL, 0x2997011FUL, 0x3AC7F2EBUL, 0xC8AC71E8UL, 0x1C661503UL, 0xEE0D9600UL, 0xFD5D65F4UL, 0x0F36E6F7UL,
- 0x61C69362UL, 0x93AD1061UL, 0x80FDE395UL, 0x72966096UL, 0xA65C047DUL, 0x5437877EUL, 0x4767748AUL, 0xB50CF789UL,
- 0xEB1FCBADUL, 0x197448AEUL, 0x0A24BB5AUL, 0xF84F3859UL, 0x2C855CB2UL, 0xDEEEDFB1UL, 0xCDBE2C45UL, 0x3FD5AF46UL,
- 0x7198540DUL, 0x83F3D70EUL, 0x90A324FAUL, 0x62C8A7F9UL, 0xB602C312UL, 0x44694011UL, 0x5739B3E5UL, 0xA55230E6UL,
- 0xFB410CC2UL, 0x092A8FC1UL, 0x1A7A7C35UL, 0xE811FF36UL, 0x3CDB9BDDUL, 0xCEB018DEUL, 0xDDE0EB2AUL, 0x2F8B6829UL,
- 0x82F63B78UL, 0x709DB87BUL, 0x63CD4B8FUL, 0x91A6C88CUL, 0x456CAC67UL, 0xB7072F64UL, 0xA457DC90UL, 0x563C5F93UL,
- 0x082F63B7UL, 0xFA44E0B4UL, 0xE9141340UL, 0x1B7F9043UL, 0xCFB5F4A8UL, 0x3DDE77ABUL, 0x2E8E845FUL, 0xDCE5075CUL,
- 0x92A8FC17UL, 0x60C37F14UL, 0x73938CE0UL, 0x81F80FE3UL, 0x55326B08UL, 0xA759E80BUL, 0xB4091BFFUL, 0x466298FCUL,
- 0x1871A4D8UL, 0xEA1A27DBUL, 0xF94AD42FUL, 0x0B21572CUL, 0xDFEB33C7UL, 0x2D80B0C4UL, 0x3ED04330UL, 0xCCBBC033UL,
- 0xA24BB5A6UL, 0x502036A5UL, 0x4370C551UL, 0xB11B4652UL, 0x65D122B9UL, 0x97BAA1BAUL, 0x84EA524EUL, 0x7681D14DUL,
- 0x2892ED69UL, 0xDAF96E6AUL, 0xC9A99D9EUL, 0x3BC21E9DUL, 0xEF087A76UL, 0x1D63F975UL, 0x0E330A81UL, 0xFC588982UL,
- 0xB21572C9UL, 0x407EF1CAUL, 0x532E023EUL, 0xA145813DUL, 0x758FE5D6UL, 0x87E466D5UL, 0x94B49521UL, 0x66DF1622UL,
- 0x38CC2A06UL, 0xCAA7A905UL, 0xD9F75AF1UL, 0x2B9CD9F2UL, 0xFF56BD19UL, 0x0D3D3E1AUL, 0x1E6DCDEEUL, 0xEC064EEDUL,
- 0xC38D26C4UL, 0x31E6A5C7UL, 0x22B65633UL, 0xD0DDD530UL, 0x0417B1DBUL, 0xF67C32D8UL, 0xE52CC12CUL, 0x1747422FUL,
- 0x49547E0BUL, 0xBB3FFD08UL, 0xA86F0EFCUL, 0x5A048DFFUL, 0x8ECEE914UL, 0x7CA56A17UL, 0x6FF599E3UL, 0x9D9E1AE0UL,
- 0xD3D3E1ABUL, 0x21B862A8UL, 0x32E8915CUL, 0xC083125FUL, 0x144976B4UL, 0xE622F5B7UL, 0xF5720643UL, 0x07198540UL,
- 0x590AB964UL, 0xAB613A67UL, 0xB831C993UL, 0x4A5A4A90UL, 0x9E902E7BUL, 0x6CFBAD78UL, 0x7FAB5E8CUL, 0x8DC0DD8FUL,
- 0xE330A81AUL, 0x115B2B19UL, 0x020BD8EDUL, 0xF0605BEEUL, 0x24AA3F05UL, 0xD6C1BC06UL, 0xC5914FF2UL, 0x37FACCF1UL,
- 0x69E9F0D5UL, 0x9B8273D6UL, 0x88D28022UL, 0x7AB90321UL, 0xAE7367CAUL, 0x5C18E4C9UL, 0x4F48173DUL, 0xBD23943EUL,
- 0xF36E6F75UL, 0x0105EC76UL, 0x12551F82UL, 0xE03E9C81UL, 0x34F4F86AUL, 0xC69F7B69UL, 0xD5CF889DUL, 0x27A40B9EUL,
- 0x79B737BAUL, 0x8BDCB4B9UL, 0x988C474DUL, 0x6AE7C44EUL, 0xBE2DA0A5UL, 0x4C4623A6UL, 0x5F16D052UL, 0xAD7D5351UL,
- };
- return (crc >> ByteWidth) ^ Table[byte ^ (crc & ByteMask)];
-}
-
-/// Do not forget to apply the output XOR when done, or use transferCRCCompute().
-static uint32_t transferCRCAdd(const uint32_t crc, const size_t size, const void* const data)
+static size_t bytes_scattered_size(const udpard_bytes_scattered_t head)
{
- UDPARD_ASSERT((data != NULL) || (size == 0U));
- uint32_t out = crc;
- const byte_t* p = (const byte_t*) data;
- for (size_t i = 0; i < size; i++)
- {
- out = transferCRCAddByte(out, *p);
- ++p;
+ size_t size = head.bytes.size;
+ const udpard_bytes_scattered_t* current = head.next;
+ while (current != NULL) {
+ size += current->bytes.size;
+ current = current->next;
}
- return out;
+ return size;
}
-static uint32_t transferCRCCompute(const size_t size, const void* const data)
+/// We require that the fragment tree does not contain fully-contained or equal-range fragments. This implies that no
+/// two fragments have the same offset, and that fragments ordered by offset also order by their ends.
+static int32_t cavl_compare_fragment_offset(const void* const user, const udpard_tree_t* const node)
{
- return transferCRCAdd(TRANSFER_CRC_INITIAL, size, data) ^ TRANSFER_CRC_OUTPUT_XOR;
+ const size_t u = *(const size_t*)user;
+ const size_t v = ((const udpard_fragment_t*)node)->offset; // clang-format off
+ if (u < v) { return -1; }
+ if (u > v) { return +1; }
+ return 0; // clang-format on
}
-
-// =====================================================================================================================
-// ================================================= TX PIPELINE =================================================
-// =====================================================================================================================
-
-/// Chain of TX frames prepared for insertion into a TX queue.
-typedef struct
+static int32_t cavl_compare_fragment_end(const void* const user, const udpard_tree_t* const node)
{
- struct UdpardTxItem* head;
- struct UdpardTxItem* tail;
- size_t count;
-} TxChain;
+ const size_t u = *(const size_t*)user;
+ const udpard_fragment_t* const f = (const udpard_fragment_t*)node;
+ const size_t v = f->offset + f->view.size; // clang-format off
+ if (u < v) { return -1; }
+ if (u > v) { return +1; }
+ return 0; // clang-format on
+}
-static bool txValidateMemoryResources(const struct UdpardTxMemoryResources memory)
+// NOLINTNEXTLINE(misc-no-recursion)
+void udpard_fragment_free_all(udpard_fragment_t* const frag, const udpard_deleter_t fragment_deleter)
{
- return (memory.fragment.allocate != NULL) && (memory.fragment.deallocate != NULL) &&
- (memory.payload.allocate != NULL) && (memory.payload.deallocate != NULL);
+ if (frag != NULL) {
+ // Descend the tree
+ for (size_t i = 0; i < 2; i++) {
+ if (frag->index_offset.lr[i] != NULL) {
+ frag->index_offset.lr[i]->up = NULL; // Prevent backtrack ascension from this branch
+ udpard_fragment_free_all((udpard_fragment_t*)frag->index_offset.lr[i], fragment_deleter);
+ frag->index_offset.lr[i] = NULL; // Avoid dangly pointers even if we're headed for imminent destruction
+ }
+ }
+ // Delete this fragment
+ udpard_fragment_t* const parent = (udpard_fragment_t*)frag->index_offset.up;
+ mem_free_payload(frag->payload_deleter, frag->origin);
+ fragment_deleter.vtable->free(fragment_deleter.context, sizeof(udpard_fragment_t), frag);
+ // Ascend the tree.
+ if (parent != NULL) {
+ parent->index_offset.lr[parent->index_offset.lr[1] == (udpard_tree_t*)frag] = NULL;
+ udpard_fragment_free_all(parent, fragment_deleter); // tail call
+ }
+ }
}
-static struct UdpardTxItem* txNewItem(const struct UdpardTxMemoryResources memory,
- const uint_least8_t dscp_value_per_priority[UDPARD_PRIORITY_MAX + 1U],
- const UdpardMicrosecond deadline_usec,
- const enum UdpardPriority priority,
- const struct UdpardUDPIPEndpoint endpoint,
- const size_t datagram_payload_size,
- void* const user_transfer_reference)
+udpard_fragment_t* udpard_fragment_seek(const udpard_fragment_t* frag, const size_t offset)
{
- struct UdpardTxItem* out = memAlloc(memory.fragment, sizeof(struct UdpardTxItem));
- if (out != NULL)
- {
- // No tree linkage by default.
- out->base.up = NULL;
- out->base.lr[0] = NULL;
- out->base.lr[1] = NULL;
- out->base.bf = 0;
- // Init metadata.
- out->priority = priority;
- out->next_in_transfer = NULL; // Last by default.
- out->deadline_usec = deadline_usec;
- UDPARD_ASSERT(priority <= UDPARD_PRIORITY_MAX);
- out->dscp = dscp_value_per_priority[priority];
- out->destination = endpoint;
- out->user_transfer_reference = user_transfer_reference;
-
- void* const payload_data = memAlloc(memory.payload, datagram_payload_size);
- if (NULL != payload_data)
- {
- out->datagram_payload.data = payload_data;
- out->datagram_payload.size = datagram_payload_size;
+ if (frag != NULL) {
+ while (frag->index_offset.up != NULL) { // Only if the given node is not already the root.
+ frag = (const udpard_fragment_t*)frag->index_offset.up;
}
- else
- {
- memFree(memory.fragment, sizeof(struct UdpardTxItem), out);
- out = NULL;
+ if (offset == 0) { // Common fast path.
+ return (udpard_fragment_t*)cavl2_min((udpard_tree_t*)frag);
+ }
+ udpard_fragment_t* const f =
+ (udpard_fragment_t*)cavl2_predecessor((udpard_tree_t*)frag, &offset, &cavl_compare_fragment_offset);
+ if ((f != NULL) && ((f->offset + f->view.size) > offset)) {
+ UDPARD_ASSERT(f->offset <= offset);
+ return f;
}
}
- return out;
+ return NULL;
}
-/// Frames with identical weight are processed in the FIFO order.
-/// Frames with higher weight compare smaller (i.e., put on the left side of the tree).
-static int_fast8_t txAVLPredicate(void* const user_reference, // NOSONAR Cavl API requires pointer to non-const.
- const struct UdpardTreeNode* const node)
+udpard_fragment_t* udpard_fragment_next(const udpard_fragment_t* frag)
{
- const struct UdpardTxItem* const target = (const struct UdpardTxItem*) user_reference;
- const struct UdpardTxItem* const other = (const struct UdpardTxItem*) (const void*) node;
- UDPARD_ASSERT((target != NULL) && (other != NULL));
- return (target->priority >= other->priority) ? +1 : -1;
+ return (frag != NULL) ? ((udpard_fragment_t*)cavl2_next_greater((udpard_tree_t*)frag)) : NULL;
}
-/// The primitive serialization functions are endian-agnostic.
-static byte_t* txSerializeU16(byte_t* const destination_buffer, const uint16_t value)
+size_t udpard_fragment_gather(const udpard_fragment_t** cursor,
+ const size_t offset,
+ const size_t size,
+ void* const destination)
{
- byte_t* ptr = destination_buffer;
- *ptr++ = (byte_t) (value & ByteMask);
- *ptr++ = (byte_t) ((byte_t) (value >> ByteWidth) & ByteMask);
- return ptr;
-}
+ size_t copied = 0;
+ if ((cursor != NULL) && (*cursor != NULL) && (destination != NULL)) {
+ const size_t end_offset = (*cursor)->offset + (*cursor)->view.size;
+ const udpard_fragment_t* f = NULL;
+ if ((offset < (*cursor)->offset) || (offset > end_offset)) {
+ f = udpard_fragment_seek(*cursor, offset);
+ } else if (offset == end_offset) { // Common case during sequential access.
+ f = udpard_fragment_next(*cursor);
+ } else {
+ f = *cursor;
+ }
+ if ((f != NULL) && (size > 0U)) {
+ const udpard_fragment_t* last = f;
+ size_t pos = offset;
+ byte_t* const out = (byte_t*)destination;
+ while ((f != NULL) && (copied < size)) { // Copy contiguous fragments starting at the requested offset.
+ UDPARD_ASSERT(f->offset <= pos);
+ UDPARD_ASSERT(pos < (f->offset + f->view.size));
+ UDPARD_ASSERT(f->view.data != NULL);
+ const size_t bias = pos - f->offset;
+ const size_t to_copy = smaller(f->view.size - bias, size - copied);
+ // NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling)
+ (void)memcpy(out + copied, ((const byte_t*)f->view.data) + bias, to_copy);
+ copied += to_copy;
+ pos += to_copy;
+ last = f;
+ if (copied < size) {
+ f = udpard_fragment_next(f);
+ UDPARD_ASSERT((f == NULL) || (f->offset == pos));
+ }
+ }
+ *cursor = last; // Keep iterator non-NULL.
+ }
+ UDPARD_ASSERT(NULL != *cursor);
+ }
+ return copied;
+}
+
+// --------------------------------------------- CRC ---------------------------------------------
+
+#define CRC_INITIAL 0xFFFFFFFFUL
+#define CRC_OUTPUT_XOR 0xFFFFFFFFUL
+#define CRC_RESIDUE_BEFORE_OUTPUT_XOR 0xB798B438UL
+#define CRC_RESIDUE_AFTER_OUTPUT_XOR (CRC_RESIDUE_BEFORE_OUTPUT_XOR ^ CRC_OUTPUT_XOR)
+#define CRC_SIZE_BYTES 4U
+
+static const uint32_t crc_table[256] = {
+ 0x00000000UL, 0xF26B8303UL, 0xE13B70F7UL, 0x1350F3F4UL, 0xC79A971FUL, 0x35F1141CUL, 0x26A1E7E8UL, 0xD4CA64EBUL,
+ 0x8AD958CFUL, 0x78B2DBCCUL, 0x6BE22838UL, 0x9989AB3BUL, 0x4D43CFD0UL, 0xBF284CD3UL, 0xAC78BF27UL, 0x5E133C24UL,
+ 0x105EC76FUL, 0xE235446CUL, 0xF165B798UL, 0x030E349BUL, 0xD7C45070UL, 0x25AFD373UL, 0x36FF2087UL, 0xC494A384UL,
+ 0x9A879FA0UL, 0x68EC1CA3UL, 0x7BBCEF57UL, 0x89D76C54UL, 0x5D1D08BFUL, 0xAF768BBCUL, 0xBC267848UL, 0x4E4DFB4BUL,
+ 0x20BD8EDEUL, 0xD2D60DDDUL, 0xC186FE29UL, 0x33ED7D2AUL, 0xE72719C1UL, 0x154C9AC2UL, 0x061C6936UL, 0xF477EA35UL,
+ 0xAA64D611UL, 0x580F5512UL, 0x4B5FA6E6UL, 0xB93425E5UL, 0x6DFE410EUL, 0x9F95C20DUL, 0x8CC531F9UL, 0x7EAEB2FAUL,
+ 0x30E349B1UL, 0xC288CAB2UL, 0xD1D83946UL, 0x23B3BA45UL, 0xF779DEAEUL, 0x05125DADUL, 0x1642AE59UL, 0xE4292D5AUL,
+ 0xBA3A117EUL, 0x4851927DUL, 0x5B016189UL, 0xA96AE28AUL, 0x7DA08661UL, 0x8FCB0562UL, 0x9C9BF696UL, 0x6EF07595UL,
+ 0x417B1DBCUL, 0xB3109EBFUL, 0xA0406D4BUL, 0x522BEE48UL, 0x86E18AA3UL, 0x748A09A0UL, 0x67DAFA54UL, 0x95B17957UL,
+ 0xCBA24573UL, 0x39C9C670UL, 0x2A993584UL, 0xD8F2B687UL, 0x0C38D26CUL, 0xFE53516FUL, 0xED03A29BUL, 0x1F682198UL,
+ 0x5125DAD3UL, 0xA34E59D0UL, 0xB01EAA24UL, 0x42752927UL, 0x96BF4DCCUL, 0x64D4CECFUL, 0x77843D3BUL, 0x85EFBE38UL,
+ 0xDBFC821CUL, 0x2997011FUL, 0x3AC7F2EBUL, 0xC8AC71E8UL, 0x1C661503UL, 0xEE0D9600UL, 0xFD5D65F4UL, 0x0F36E6F7UL,
+ 0x61C69362UL, 0x93AD1061UL, 0x80FDE395UL, 0x72966096UL, 0xA65C047DUL, 0x5437877EUL, 0x4767748AUL, 0xB50CF789UL,
+ 0xEB1FCBADUL, 0x197448AEUL, 0x0A24BB5AUL, 0xF84F3859UL, 0x2C855CB2UL, 0xDEEEDFB1UL, 0xCDBE2C45UL, 0x3FD5AF46UL,
+ 0x7198540DUL, 0x83F3D70EUL, 0x90A324FAUL, 0x62C8A7F9UL, 0xB602C312UL, 0x44694011UL, 0x5739B3E5UL, 0xA55230E6UL,
+ 0xFB410CC2UL, 0x092A8FC1UL, 0x1A7A7C35UL, 0xE811FF36UL, 0x3CDB9BDDUL, 0xCEB018DEUL, 0xDDE0EB2AUL, 0x2F8B6829UL,
+ 0x82F63B78UL, 0x709DB87BUL, 0x63CD4B8FUL, 0x91A6C88CUL, 0x456CAC67UL, 0xB7072F64UL, 0xA457DC90UL, 0x563C5F93UL,
+ 0x082F63B7UL, 0xFA44E0B4UL, 0xE9141340UL, 0x1B7F9043UL, 0xCFB5F4A8UL, 0x3DDE77ABUL, 0x2E8E845FUL, 0xDCE5075CUL,
+ 0x92A8FC17UL, 0x60C37F14UL, 0x73938CE0UL, 0x81F80FE3UL, 0x55326B08UL, 0xA759E80BUL, 0xB4091BFFUL, 0x466298FCUL,
+ 0x1871A4D8UL, 0xEA1A27DBUL, 0xF94AD42FUL, 0x0B21572CUL, 0xDFEB33C7UL, 0x2D80B0C4UL, 0x3ED04330UL, 0xCCBBC033UL,
+ 0xA24BB5A6UL, 0x502036A5UL, 0x4370C551UL, 0xB11B4652UL, 0x65D122B9UL, 0x97BAA1BAUL, 0x84EA524EUL, 0x7681D14DUL,
+ 0x2892ED69UL, 0xDAF96E6AUL, 0xC9A99D9EUL, 0x3BC21E9DUL, 0xEF087A76UL, 0x1D63F975UL, 0x0E330A81UL, 0xFC588982UL,
+ 0xB21572C9UL, 0x407EF1CAUL, 0x532E023EUL, 0xA145813DUL, 0x758FE5D6UL, 0x87E466D5UL, 0x94B49521UL, 0x66DF1622UL,
+ 0x38CC2A06UL, 0xCAA7A905UL, 0xD9F75AF1UL, 0x2B9CD9F2UL, 0xFF56BD19UL, 0x0D3D3E1AUL, 0x1E6DCDEEUL, 0xEC064EEDUL,
+ 0xC38D26C4UL, 0x31E6A5C7UL, 0x22B65633UL, 0xD0DDD530UL, 0x0417B1DBUL, 0xF67C32D8UL, 0xE52CC12CUL, 0x1747422FUL,
+ 0x49547E0BUL, 0xBB3FFD08UL, 0xA86F0EFCUL, 0x5A048DFFUL, 0x8ECEE914UL, 0x7CA56A17UL, 0x6FF599E3UL, 0x9D9E1AE0UL,
+ 0xD3D3E1ABUL, 0x21B862A8UL, 0x32E8915CUL, 0xC083125FUL, 0x144976B4UL, 0xE622F5B7UL, 0xF5720643UL, 0x07198540UL,
+ 0x590AB964UL, 0xAB613A67UL, 0xB831C993UL, 0x4A5A4A90UL, 0x9E902E7BUL, 0x6CFBAD78UL, 0x7FAB5E8CUL, 0x8DC0DD8FUL,
+ 0xE330A81AUL, 0x115B2B19UL, 0x020BD8EDUL, 0xF0605BEEUL, 0x24AA3F05UL, 0xD6C1BC06UL, 0xC5914FF2UL, 0x37FACCF1UL,
+ 0x69E9F0D5UL, 0x9B8273D6UL, 0x88D28022UL, 0x7AB90321UL, 0xAE7367CAUL, 0x5C18E4C9UL, 0x4F48173DUL, 0xBD23943EUL,
+ 0xF36E6F75UL, 0x0105EC76UL, 0x12551F82UL, 0xE03E9C81UL, 0x34F4F86AUL, 0xC69F7B69UL, 0xD5CF889DUL, 0x27A40B9EUL,
+ 0x79B737BAUL, 0x8BDCB4B9UL, 0x988C474DUL, 0x6AE7C44EUL, 0xBE2DA0A5UL, 0x4C4623A6UL, 0x5F16D052UL, 0xAD7D5351UL,
+};
-static byte_t* txSerializeU32(byte_t* const destination_buffer, const uint32_t value)
+/// Do not forget to apply the output XOR when done, or use crc_full().
+static uint32_t crc_add(uint32_t crc, const size_t n_bytes, const void* const data)
{
- byte_t* ptr = destination_buffer;
- for (size_t i = 0; i < sizeof(value); i++) // We sincerely hope that the compiler will use memcpy.
- {
- *ptr++ = (byte_t) ((byte_t) (value >> (i * ByteWidth)) & ByteMask);
+ UDPARD_ASSERT((data != NULL) || (n_bytes == 0U));
+ const byte_t* p = (const byte_t*)data;
+ for (size_t i = 0; i < n_bytes; i++) {
+ crc = (crc >> 8U) ^ crc_table[(*p++) ^ (crc & 0xFFU)];
}
- return ptr;
+ return crc;
}
-static byte_t* txSerializeU64(byte_t* const destination_buffer, const uint64_t value)
+static uint32_t crc_full(const size_t n_bytes, const void* const data)
{
- byte_t* ptr = destination_buffer;
- for (size_t i = 0; i < sizeof(value); i++) // We sincerely hope that the compiler will use memcpy.
- {
- *ptr++ = (byte_t) ((byte_t) (value >> (i * ByteWidth)) & ByteMask);
- }
- return ptr;
+ return crc_add(CRC_INITIAL, n_bytes, data) ^ CRC_OUTPUT_XOR;
}
-static byte_t* txSerializeHeader(byte_t* const destination_buffer,
- const TransferMetadata meta,
- const uint32_t frame_index,
- const bool end_of_transfer)
-{
- byte_t* ptr = destination_buffer;
- *ptr++ = HEADER_VERSION;
- *ptr++ = (byte_t) meta.priority;
- ptr = txSerializeU16(ptr, meta.src_node_id);
- ptr = txSerializeU16(ptr, meta.dst_node_id);
- ptr = txSerializeU16(ptr, meta.data_specifier);
- ptr = txSerializeU64(ptr, meta.transfer_id);
- UDPARD_ASSERT((frame_index + 0UL) <= HEADER_FRAME_INDEX_MAX); // +0UL is to avoid a compiler warning.
- ptr = txSerializeU32(ptr, frame_index | (end_of_transfer ? HEADER_FRAME_INDEX_EOT_MASK : 0U));
- ptr = txSerializeU16(ptr, 0); // opaque user data
- // Header CRC in the big endian format. Optimization prospect: the header up to frame_index is constant in
- // multi-frame transfers, so we don't really need to recompute the CRC from scratch per frame.
- const uint16_t crc = headerCRCCompute(HEADER_SIZE_BYTES - HEADER_CRC_SIZE_BYTES, destination_buffer);
- *ptr++ = (byte_t) ((byte_t) (crc >> ByteWidth) & ByteMask);
- *ptr++ = (byte_t) (crc & ByteMask);
- UDPARD_ASSERT(ptr == (destination_buffer + HEADER_SIZE_BYTES));
- return ptr;
-}
+// --------------------------------------------- LIST CONTAINER ---------------------------------------------
-/// Produces a chain of Tx queue items for later insertion into the Tx queue. The tail is NULL if OOM.
-/// The caller is responsible for freeing the memory allocated for the chain.
-static TxChain txMakeChain(const struct UdpardTxMemoryResources memory,
- const uint_least8_t dscp_value_per_priority[UDPARD_PRIORITY_MAX + 1U],
- const size_t mtu,
- const UdpardMicrosecond deadline_usec,
- const TransferMetadata meta,
- const struct UdpardUDPIPEndpoint endpoint,
- const struct UdpardPayload payload,
- void* const user_transfer_reference)
+static bool is_listed(const udpard_list_t* const list, const udpard_listed_t* const member)
{
- UDPARD_ASSERT(mtu > 0);
- UDPARD_ASSERT((payload.data != NULL) || (payload.size == 0U));
- const size_t payload_size_with_crc = payload.size + TRANSFER_CRC_SIZE_BYTES;
- byte_t crc_bytes[TRANSFER_CRC_SIZE_BYTES];
- txSerializeU32(crc_bytes, transferCRCCompute(payload.size, payload.data));
- TxChain out = {NULL, NULL, 0};
- size_t offset = 0U;
- while (offset < payload_size_with_crc)
- {
- struct UdpardTxItem* const item = txNewItem(memory,
- dscp_value_per_priority,
- deadline_usec,
- meta.priority,
- endpoint,
- smaller(payload_size_with_crc - offset, mtu) + HEADER_SIZE_BYTES,
- user_transfer_reference);
- if (NULL == out.head)
- {
- out.head = item;
- }
- else
- {
- // C std, 6.7.2.1.15: A pointer to a structure object <...> points to its initial member, and vice versa.
- // Can't just read tqi->base because tqi may be NULL; https://github.com/OpenCyphal/libcanard/issues/203.
- out.tail->next_in_transfer = item;
- }
- out.tail = item;
- if (NULL == out.tail)
- {
- break;
- }
- const bool last = (payload_size_with_crc - offset) <= mtu;
- byte_t* const dst_buffer = item->datagram_payload.data;
- byte_t* write_ptr = txSerializeHeader(dst_buffer, meta, (uint32_t) out.count, last);
- if (offset < payload.size)
- {
- const size_t progress = smaller(payload.size - offset, mtu);
- // NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling)
- (void) memcpy(write_ptr, ((const byte_t*) payload.data) + offset, progress);
- offset += progress;
- write_ptr += progress;
- UDPARD_ASSERT(offset <= payload.size);
- UDPARD_ASSERT((!last) || (offset == payload.size));
- }
- if (offset >= payload.size)
- {
- const size_t crc_offset = offset - payload.size;
- UDPARD_ASSERT(crc_offset < TRANSFER_CRC_SIZE_BYTES);
- const size_t available = item->datagram_payload.size - (size_t) (write_ptr - dst_buffer);
- UDPARD_ASSERT(available <= TRANSFER_CRC_SIZE_BYTES);
- const size_t write_size = smaller(TRANSFER_CRC_SIZE_BYTES - crc_offset, available);
- // NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling)
- (void) memcpy(write_ptr, &crc_bytes[crc_offset], write_size);
- offset += write_size;
- }
- UDPARD_ASSERT((out.count + 0ULL) < HEADER_FRAME_INDEX_MAX); // +0 is to suppress warning.
- out.count++;
- }
- UDPARD_ASSERT((offset == payload_size_with_crc) || (out.tail == NULL));
- return out;
+ return (member->next != NULL) || (member->prev != NULL) || (list->head == member);
}
-static int32_t txPush(struct UdpardTx* const tx,
- const UdpardMicrosecond deadline_usec,
- const TransferMetadata meta,
- const struct UdpardUDPIPEndpoint endpoint,
- const struct UdpardPayload payload,
- void* const user_transfer_reference)
+/// No effect if not in the list.
+static void delist(udpard_list_t* const list, udpard_listed_t* const member)
{
- UDPARD_ASSERT(tx != NULL);
- int32_t out = 0; // The number of frames enqueued or negated error.
- const size_t mtu = larger(tx->mtu, 1U);
- const size_t frame_count = ((payload.size + TRANSFER_CRC_SIZE_BYTES + mtu) - 1U) / mtu;
- UDPARD_ASSERT((frame_count > 0U) && ((frame_count + 0ULL) <= INT32_MAX)); // +0 is to suppress warning.
- const bool anonymous = (*tx->local_node_id) > UDPARD_NODE_ID_MAX;
- const bool service = (meta.data_specifier & DATA_SPECIFIER_SERVICE_NOT_MESSAGE_MASK) != 0;
- if (anonymous && ((frame_count > 1) || service))
- {
- out = -UDPARD_ERROR_ANONYMOUS; // Only single-frame message transfers can be anonymous.
+ if (member->next != NULL) {
+ member->next->prev = member->prev;
}
- else if ((tx->queue_size + frame_count) > tx->queue_capacity)
- {
- out = -UDPARD_ERROR_CAPACITY; // Not enough space in the queue.
+ if (member->prev != NULL) {
+ member->prev->next = member->next;
}
- else
- {
- const TxChain chain = txMakeChain(tx->memory,
- tx->dscp_value_per_priority,
- mtu,
- deadline_usec,
- meta,
- endpoint,
- payload,
- user_transfer_reference);
- if (chain.tail != NULL)
- {
- UDPARD_ASSERT(frame_count == chain.count);
- struct UdpardTxItem* next = chain.head;
- do
- {
- const struct UdpardTreeNode* const res =
- cavlSearch(&tx->root, &next->base, &txAVLPredicate, &avlTrivialFactory);
- (void) res;
- UDPARD_ASSERT(res == &next->base);
- UDPARD_ASSERT(tx->root != NULL);
- next = next->next_in_transfer;
- } while (next != NULL);
- tx->queue_size += chain.count;
- UDPARD_ASSERT(tx->queue_size <= tx->queue_capacity);
- UDPARD_ASSERT((chain.count + 0ULL) <= INT32_MAX); // +0 is to suppress warning.
- out = (int32_t) chain.count;
- }
- else // The queue is large enough but we ran out of heap memory, so we have to unwind the chain.
- {
- out = -UDPARD_ERROR_MEMORY;
- struct UdpardTxItem* head = chain.head;
- while (head != NULL)
- {
- struct UdpardTxItem* const next = head->next_in_transfer;
- udpardTxFree(tx->memory, head);
- head = next;
- }
- }
+ if (list->head == member) {
+ list->head = member->next;
}
- UDPARD_ASSERT((out < 0) || (out >= 1));
- return out;
+ if (list->tail == member) {
+ list->tail = member->prev;
+ }
+ member->next = NULL;
+ member->prev = NULL;
+ UDPARD_ASSERT((list->head != NULL) == (list->tail != NULL));
}
-int_fast8_t udpardTxInit(struct UdpardTx* const self,
- const UdpardNodeID* const local_node_id,
- const size_t queue_capacity,
- const struct UdpardTxMemoryResources memory)
+/// If the item is already in the list, it will be delisted first. Can be used for moving to the front.
+static void enlist_head(udpard_list_t* const list, udpard_listed_t* const member)
{
- int_fast8_t ret = -UDPARD_ERROR_ARGUMENT;
- if ((NULL != self) && (NULL != local_node_id) && txValidateMemoryResources(memory))
- {
- ret = 0;
- memZero(sizeof(*self), self);
- self->local_node_id = local_node_id;
- self->queue_capacity = queue_capacity;
- self->mtu = UDPARD_MTU_DEFAULT;
- // The DSCP mapping recommended by the Specification is all zeroes, so we don't need to set it.
- self->memory = memory;
- self->queue_size = 0;
- self->root = NULL;
- }
- return ret;
-}
-
-int32_t udpardTxPublish(struct UdpardTx* const self,
- const UdpardMicrosecond deadline_usec,
- const enum UdpardPriority priority,
- const UdpardPortID subject_id,
- const UdpardTransferID transfer_id,
- const struct UdpardPayload payload,
- void* const user_transfer_reference)
-{
- int32_t out = -UDPARD_ERROR_ARGUMENT;
- const bool args_ok = (self != NULL) && (self->local_node_id != NULL) && (priority <= UDPARD_PRIORITY_MAX) &&
- (subject_id <= UDPARD_SUBJECT_ID_MAX) && ((payload.data != NULL) || (payload.size == 0U));
- if (args_ok)
- {
- out = txPush(self,
- deadline_usec,
- (TransferMetadata) {
- .priority = priority,
- .src_node_id = *self->local_node_id,
- .dst_node_id = UDPARD_NODE_ID_UNSET,
- .transfer_id = transfer_id,
- .data_specifier = subject_id,
- },
- makeSubjectUDPIPEndpoint(subject_id),
- payload,
- user_transfer_reference);
+ delist(list, member);
+ UDPARD_ASSERT((member->next == NULL) && (member->prev == NULL));
+ UDPARD_ASSERT((list->head != NULL) == (list->tail != NULL));
+ member->next = list->head;
+ if (list->head != NULL) {
+ list->head->prev = member;
}
- return out;
-}
-
-int32_t udpardTxRequest(struct UdpardTx* const self,
- const UdpardMicrosecond deadline_usec,
- const enum UdpardPriority priority,
- const UdpardPortID service_id,
- const UdpardNodeID server_node_id,
- const UdpardTransferID transfer_id,
- const struct UdpardPayload payload,
- void* const user_transfer_reference)
-{
- int32_t out = -UDPARD_ERROR_ARGUMENT;
- const bool args_ok = (self != NULL) && (self->local_node_id != NULL) && (priority <= UDPARD_PRIORITY_MAX) &&
- (service_id <= UDPARD_SERVICE_ID_MAX) && (server_node_id <= UDPARD_NODE_ID_MAX) &&
- ((payload.data != NULL) || (payload.size == 0U));
- if (args_ok)
- {
- out = txPush(self,
- deadline_usec,
- (TransferMetadata) {
- .priority = priority,
- .src_node_id = *self->local_node_id,
- .dst_node_id = server_node_id,
- .transfer_id = transfer_id,
- .data_specifier = DATA_SPECIFIER_SERVICE_NOT_MESSAGE_MASK |
- DATA_SPECIFIER_SERVICE_REQUEST_NOT_RESPONSE_MASK | service_id,
- },
- makeServiceUDPIPEndpoint(server_node_id),
- payload,
- user_transfer_reference);
+ list->head = member;
+ if (list->tail == NULL) {
+ list->tail = member;
}
- return out;
+ UDPARD_ASSERT((list->head != NULL) && (list->tail != NULL));
}
-int32_t udpardTxRespond(struct UdpardTx* const self,
- const UdpardMicrosecond deadline_usec,
- const enum UdpardPriority priority,
- const UdpardPortID service_id,
- const UdpardNodeID client_node_id,
- const UdpardTransferID transfer_id,
- const struct UdpardPayload payload,
- void* const user_transfer_reference)
-{
- int32_t out = -UDPARD_ERROR_ARGUMENT;
- const bool args_ok = (self != NULL) && (self->local_node_id != NULL) && (priority <= UDPARD_PRIORITY_MAX) &&
- (service_id <= UDPARD_SERVICE_ID_MAX) && (client_node_id <= UDPARD_NODE_ID_MAX) &&
- ((payload.data != NULL) || (payload.size == 0U));
- if (args_ok)
- {
- out = txPush(self,
- deadline_usec,
- (TransferMetadata) {
- .priority = priority,
- .src_node_id = *self->local_node_id,
- .dst_node_id = client_node_id,
- .transfer_id = transfer_id,
- .data_specifier = DATA_SPECIFIER_SERVICE_NOT_MESSAGE_MASK | service_id,
- },
- makeServiceUDPIPEndpoint(client_node_id),
- payload,
- user_transfer_reference);
- }
- return out;
-}
-
-struct UdpardTxItem* udpardTxPeek(const struct UdpardTx* const self)
+#define LIST_MEMBER(ptr, owner_type, owner_field) ((owner_type*)ptr_unbias((ptr), offsetof(owner_type, owner_field)))
+static void* ptr_unbias(const void* const ptr, const size_t offset)
{
- struct UdpardTxItem* out = NULL;
- if (self != NULL)
- {
- // Paragraph 6.7.2.1.15 of the C standard says:
- // A pointer to a structure object, suitably converted, points to its initial member, and vice versa.
- out = (struct UdpardTxItem*) (void*) cavlFindExtremum(self->root, false);
- }
- return out;
+ return (ptr == NULL) ? NULL : (void*)((char*)ptr - offset);
}
+#define LIST_TAIL(list, owner_type, owner_field) LIST_MEMBER((list).tail, owner_type, owner_field)
+
+// ---------------------------------------------------------------------------------------------------------------------
+// --------------------------------------------- HEADER ---------------------------------------------
+// ---------------------------------------------------------------------------------------------------------------------
+
+#define HEADER_SIZE_BYTES 48U
+#define HEADER_VERSION 2U
+#define HEADER_FLAG_RELIABLE 0x01U
+#define HEADER_FLAG_ACKNOWLEDGEMENT 0x02U
+#define HEADER_FRAME_INDEX_MAX 0xFFFFFFU /// 4 GiB with 256-byte MTU; 21.6 GiB with 1384-byte MTU
-struct UdpardTxItem* udpardTxPop(struct UdpardTx* const self, struct UdpardTxItem* const item)
+typedef struct
{
- if ((self != NULL) && (item != NULL))
- {
- // Paragraph 6.7.2.1.15 of the C standard says:
- // A pointer to a structure object, suitably converted, points to its initial member, and vice versa.
- // Note that the highest-priority frame is always a leaf node in the AVL tree, which means that it is very
- // cheap to remove.
- cavlRemove(&self->root, &item->base);
- UDPARD_ASSERT(self->queue_size > 0U);
- self->queue_size--;
- }
- return item;
+ udpard_prio_t priority;
+
+ bool flag_reliable;
+ bool flag_acknowledgement;
+
+ uint32_t transfer_payload_size;
+ uint64_t transfer_id;
+ uint64_t sender_uid;
+ uint64_t topic_hash;
+} meta_t;
+
+static byte_t* header_serialize(byte_t* const buffer,
+ const meta_t meta,
+ const uint32_t frame_index,
+ const uint32_t frame_payload_offset,
+ const uint32_t prefix_crc)
+{
+ byte_t* ptr = buffer;
+ byte_t flags = 0;
+ if (meta.flag_reliable) {
+ flags |= HEADER_FLAG_RELIABLE;
+ }
+ if (meta.flag_acknowledgement) {
+ flags |= HEADER_FLAG_ACKNOWLEDGEMENT;
+ }
+ *ptr++ = (byte_t)(HEADER_VERSION | (meta.priority << 5U));
+ *ptr++ = flags;
+ *ptr++ = 0;
+ *ptr++ = 0;
+ ptr = serialize_u32(ptr, frame_index & HEADER_FRAME_INDEX_MAX);
+ ptr = serialize_u32(ptr, frame_payload_offset);
+ ptr = serialize_u32(ptr, meta.transfer_payload_size);
+ ptr = serialize_u64(ptr, meta.transfer_id);
+ ptr = serialize_u64(ptr, meta.sender_uid);
+ ptr = serialize_u64(ptr, meta.topic_hash);
+ ptr = serialize_u32(ptr, prefix_crc);
+ ptr = serialize_u32(ptr, crc_full(HEADER_SIZE_BYTES - CRC_SIZE_BYTES, buffer));
+ UDPARD_ASSERT((size_t)(ptr - buffer) == HEADER_SIZE_BYTES);
+ return ptr;
}
-void udpardTxFree(const struct UdpardTxMemoryResources memory, struct UdpardTxItem* const item)
-{
- if (item != NULL)
- {
- if (item->datagram_payload.data != NULL)
- {
- memFree(memory.payload, item->datagram_payload.size, item->datagram_payload.data);
+static bool header_deserialize(const udpard_bytes_mut_t dgram_payload,
+ meta_t* const out_meta,
+ uint32_t* const frame_index,
+ uint32_t* const frame_payload_offset,
+ uint32_t* const prefix_crc,
+ udpard_bytes_t* const out_payload)
+{
+ UDPARD_ASSERT(out_payload != NULL);
+ bool ok = (dgram_payload.size >= HEADER_SIZE_BYTES) && (dgram_payload.data != NULL) && //
+ (crc_full(HEADER_SIZE_BYTES, dgram_payload.data) == CRC_RESIDUE_AFTER_OUTPUT_XOR);
+ if (ok) {
+ const byte_t* ptr = dgram_payload.data;
+ const byte_t head = *ptr++;
+ const byte_t version = head & 0x1FU;
+ if (version == HEADER_VERSION) {
+ out_meta->priority = (udpard_prio_t)((byte_t)(head >> 5U) & 0x07U);
+ const byte_t flags = *ptr++;
+ out_meta->flag_reliable = (flags & HEADER_FLAG_RELIABLE) != 0U;
+ out_meta->flag_acknowledgement = (flags & HEADER_FLAG_ACKNOWLEDGEMENT) != 0U;
+ const byte_t incompatibility = (byte_t)(flags & ~(HEADER_FLAG_RELIABLE | HEADER_FLAG_ACKNOWLEDGEMENT));
+ ptr += 2U;
+ ptr = deserialize_u32(ptr, frame_index);
+ ptr = deserialize_u32(ptr, frame_payload_offset);
+ ptr = deserialize_u32(ptr, &out_meta->transfer_payload_size);
+ ptr = deserialize_u64(ptr, &out_meta->transfer_id);
+ ptr = deserialize_u64(ptr, &out_meta->sender_uid);
+ ptr = deserialize_u64(ptr, &out_meta->topic_hash);
+ ptr = deserialize_u32(ptr, prefix_crc);
+ (void)ptr;
+ // Set up the output payload view.
+ out_payload->size = dgram_payload.size - HEADER_SIZE_BYTES;
+ out_payload->data = (byte_t*)dgram_payload.data + HEADER_SIZE_BYTES;
+ // Finalize the fields.
+ *frame_index = HEADER_FRAME_INDEX_MAX & *frame_index;
+ // Validate the fields.
+ ok = ok && (incompatibility == 0U);
+ ok = ok && (((uint64_t)*frame_payload_offset + (uint64_t)out_payload->size) <=
+ (uint64_t)out_meta->transfer_payload_size);
+ ok = ok && ((0 == *frame_index) == (0 == *frame_payload_offset));
+ // The prefix-CRC of the first frame of a transfer equals the CRC of its payload.
+ ok = ok && ((0 < *frame_payload_offset) || (crc_full(out_payload->size, out_payload->data) == *prefix_crc));
+ // ACK frame requires zero offset.
+ ok = ok && ((!out_meta->flag_acknowledgement) || (*frame_payload_offset == 0U));
+ // Detect impossible flag combinations.
+ ok = ok && (!(out_meta->flag_reliable && out_meta->flag_acknowledgement));
+ } else {
+ ok = false;
}
-
- memFree(memory.fragment, sizeof(struct UdpardTxItem), item);
}
+ return ok;
}
-// =====================================================================================================================
-// ================================================= RX PIPELINE =================================================
-// =====================================================================================================================
+// ---------------------------------------------------------------------------------------------------------------------
+// --------------------------------------------- TX PIPELINE ---------------------------------------------
+// ---------------------------------------------------------------------------------------------------------------------
-/// All but the transfer metadata.
-typedef struct
-{
- uint32_t index;
- bool end_of_transfer;
- struct UdpardPayload payload; ///< Also contains the transfer CRC (but not the header CRC).
- struct UdpardMutablePayload origin; ///< The entirety of the free-able buffer passed from the application.
-} RxFrameBase;
-
-/// Full frame state.
-typedef struct
+typedef struct tx_frame_t
{
- RxFrameBase base;
- TransferMetadata meta;
-} RxFrame;
+ size_t refcount;
+ udpard_deleter_t deleter;
+ size_t* objcount;
+ struct tx_frame_t* next;
+ size_t size;
+ byte_t data[];
+} tx_frame_t;
-/// The primitive deserialization functions are endian-agnostic.
-static const byte_t* txDeserializeU16(const byte_t* const source_buffer, uint16_t* const out_value)
+static udpard_bytes_t tx_frame_view(const tx_frame_t* const frame)
{
- UDPARD_ASSERT((source_buffer != NULL) && (out_value != NULL));
- const byte_t* ptr = source_buffer;
- *out_value = *ptr;
- ptr++;
- *out_value |= (uint16_t) (((uint16_t) *ptr) << ByteWidth);
- ptr++;
- return ptr;
+ return (udpard_bytes_t){ .size = frame->size, .data = frame->data };
}
-static const byte_t* txDeserializeU32(const byte_t* const source_buffer, uint32_t* const out_value)
+static tx_frame_t* tx_frame_from_view(const udpard_bytes_t view)
{
- UDPARD_ASSERT((source_buffer != NULL) && (out_value != NULL));
- const byte_t* ptr = source_buffer;
- *out_value = 0;
- for (size_t i = 0; i < sizeof(*out_value); i++) // We sincerely hope that the compiler will use memcpy.
- {
- *out_value |= (uint32_t) ((uint32_t) *ptr << (i * ByteWidth)); // NOLINT(google-readability-casting) NOSONAR
- ptr++;
- }
- return ptr;
+ return (tx_frame_t*)ptr_unbias(view.data, offsetof(tx_frame_t, data));
}
-static const byte_t* txDeserializeU64(const byte_t* const source_buffer, uint64_t* const out_value)
+static tx_frame_t* tx_frame_new(udpard_tx_t* const tx, const udpard_mem_t mem, const size_t data_size)
{
- UDPARD_ASSERT((source_buffer != NULL) && (out_value != NULL));
- const byte_t* ptr = source_buffer;
- *out_value = 0;
- for (size_t i = 0; i < sizeof(*out_value); i++) // We sincerely hope that the compiler will use memcpy.
- {
- *out_value |= ((uint64_t) *ptr << (i * ByteWidth));
- ptr++;
+ tx_frame_t* const frame = (tx_frame_t*)mem_alloc(mem, sizeof(tx_frame_t) + data_size);
+ if (frame != NULL) {
+ frame->refcount = 1U;
+ frame->deleter = udpard_make_deleter(mem);
+ frame->objcount = &tx->enqueued_frames_count;
+ frame->next = NULL;
+ frame->size = data_size;
+ // Update the count; this is decremented when the frame is freed upon refcount reaching zero.
+ tx->enqueued_frames_count++;
+ UDPARD_ASSERT(tx->enqueued_frames_count <= tx->enqueued_frames_limit);
}
- return ptr;
+ return frame;
}
-/// This is roughly the inverse of the txSerializeHeader function, but it also handles the frame payload.
-static bool rxParseFrame(const struct UdpardMutablePayload datagram_payload, RxFrame* const out)
+/// The ordering is by topic hash first, then by transfer-ID.
+/// Therefore, it orders all transfers by topic hash, allowing quick lookup by topic with an arbitrary transfer-ID.
+typedef struct
{
- UDPARD_ASSERT((out != NULL) && (datagram_payload.data != NULL));
- out->base.origin = datagram_payload;
- bool ok = false;
- if (datagram_payload.size > 0) // HEADER_SIZE_BYTES may change in the future depending on the header version.
- {
- const byte_t* ptr = (const byte_t*) datagram_payload.data;
- const uint_fast8_t version = *ptr++;
- // The frame payload cannot be empty because every transfer has at least four bytes of CRC.
- if ((datagram_payload.size > HEADER_SIZE_BYTES) && (version == HEADER_VERSION) &&
- (headerCRCCompute(HEADER_SIZE_BYTES, datagram_payload.data) == HEADER_CRC_RESIDUE))
- {
- const uint_fast8_t priority = *ptr++;
- if (priority <= UDPARD_PRIORITY_MAX)
- {
- out->meta.priority = (enum UdpardPriority) priority;
- ptr = txDeserializeU16(ptr, &out->meta.src_node_id);
- ptr = txDeserializeU16(ptr, &out->meta.dst_node_id);
- ptr = txDeserializeU16(ptr, &out->meta.data_specifier);
- ptr = txDeserializeU64(ptr, &out->meta.transfer_id);
- uint32_t index_eot = 0;
- ptr = txDeserializeU32(ptr, &index_eot);
- out->base.index = (uint32_t) (index_eot & HEADER_FRAME_INDEX_MASK);
- out->base.end_of_transfer = (index_eot & HEADER_FRAME_INDEX_EOT_MASK) != 0U;
- ptr += 2; // Opaque user data.
- ptr += HEADER_CRC_SIZE_BYTES;
- out->base.payload.data = ptr;
- out->base.payload.size = datagram_payload.size - HEADER_SIZE_BYTES;
- ok = true;
- UDPARD_ASSERT((ptr == (((const byte_t*) datagram_payload.data) + HEADER_SIZE_BYTES)) &&
- (out->base.payload.size > 0U));
- }
+ uint64_t topic_hash;
+ uint64_t transfer_id;
+} tx_transfer_key_t;
+
+/// The transmission scheduler maintains several indexes for the transfers in the pipeline.
+/// The segregated priority queue only contains transfers that are ready for transmission.
+/// The staged index contains transfers ordered by readiness for retransmission;
+/// transfers that will no longer be transmitted but are retained waiting for the ack are in neither of these.
+/// The deadline index contains ALL transfers, ordered by their deadlines, used for purging expired transfers.
+/// The transfer index contains ALL transfers, used for lookup by (topic_hash, transfer_id).
+typedef struct tx_transfer_t
+{
+ udpard_tree_t index_staged; ///< Soonest to be ready on the left. Key: staged_until + transfer identity
+ udpard_tree_t index_deadline; ///< Soonest to expire on the left. Key: deadline + transfer identity
+ udpard_tree_t index_transfer; ///< Specific transfer lookup for ack management. Key: tx_transfer_key_t
+ udpard_listed_t queue[UDPARD_IFACE_COUNT_MAX]; ///< Listed when ready for transmission.
+ udpard_listed_t agewise; ///< Listed when created; oldest at the tail.
+ udpard_tree_t index_transfer_ack; ///< Only for acks. Key: tx_transfer_key_t but referencing remote_*.
+
+ /// We always keep a pointer to the head, plus a cursor that scans the frames during transmission.
+ /// Both are NULL if the payload is destroyed.
+ /// The head points to the first frame unless it is known that no (further) retransmissions are needed,
+ /// in which case the old head is deleted and the head points to the next frame to transmit.
+ tx_frame_t* head[UDPARD_IFACE_COUNT_MAX];
+
+ /// Mutable transmission state. All other fields, except for the index handles, are immutable.
+ tx_frame_t* cursor[UDPARD_IFACE_COUNT_MAX];
+ uint_fast8_t epoch; ///< Does not overflow due to exponential backoff; e.g. 1us with epoch=48 => 9 years.
+ udpard_us_t staged_until;
+
+ /// Constant transfer properties supplied by the client.
+ /// The remote_* fields are identical to the local ones except in the case of ack transfers, where they contain the
+ /// values encoded in the ack message. This is needed to find pending acks (to minimize duplicates);
+ /// in the future we may even remove them and accept potential ack duplication, since they are idempotent and cheap.
+ /// By default, upon construction, the remote_* fields equal the local ones, which is valid for ordinary messages.
+ uint64_t topic_hash;
+ uint64_t transfer_id;
+ uint64_t remote_topic_hash;
+ uint64_t remote_transfer_id;
+ udpard_us_t deadline;
+ bool reliable;
+ udpard_prio_t priority;
+ uint16_t iface_bitmap; ///< Guaranteed to have at least one bit set within UDPARD_IFACE_COUNT_MAX.
+ udpard_udpip_ep_t p2p_destination[UDPARD_IFACE_COUNT_MAX]; ///< Only for P2P transfers.
+ udpard_user_context_t user;
+
+ void (*feedback)(udpard_tx_t*, udpard_tx_feedback_t);
+} tx_transfer_t;
+
+static bool tx_validate_mem_resources(const udpard_tx_mem_resources_t memory)
+{
+ for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) {
+ if ((memory.payload[i].vtable == NULL) || //
+ (memory.payload[i].vtable->base.free == NULL) || //
+ (memory.payload[i].vtable->alloc == NULL)) {
+ return false;
}
- // Parsers for other header versions may be added here later.
- }
- if (ok) // Version-agnostic semantics check.
- {
- UDPARD_ASSERT(out->base.payload.size > 0); // Follows from the prior checks.
- const bool anonymous = out->meta.src_node_id == UDPARD_NODE_ID_UNSET;
- const bool broadcast = out->meta.dst_node_id == UDPARD_NODE_ID_UNSET;
- const bool service = (out->meta.data_specifier & DATA_SPECIFIER_SERVICE_NOT_MESSAGE_MASK) != 0;
- const bool single_frame = (out->base.index == 0) && out->base.end_of_transfer;
- ok = service ? ((!broadcast) && (!anonymous)) : (broadcast && ((!anonymous) || single_frame));
- ok = ok && (out->meta.transfer_id != TRANSFER_ID_UNSET);
}
- return ok;
+ return (memory.transfer.vtable != NULL) && //
+ (memory.transfer.vtable->base.free != NULL) && //
+ (memory.transfer.vtable->alloc != NULL);
}
-static bool rxValidateMemoryResources(const struct UdpardRxMemoryResources memory)
+static void tx_transfer_free_payload(tx_transfer_t* const tr)
{
- return (memory.session.allocate != NULL) && (memory.session.deallocate != NULL) &&
- (memory.fragment.allocate != NULL) && (memory.fragment.deallocate != NULL) &&
- (memory.payload.deallocate != NULL);
+ UDPARD_ASSERT(tr != NULL);
+ for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) {
+ const tx_frame_t* frame = tr->head[i];
+ while (frame != NULL) {
+ const tx_frame_t* const next = frame->next;
+ udpard_tx_refcount_dec(tx_frame_view(frame));
+ frame = next;
+ }
+ tr->head[i] = NULL;
+ tr->cursor[i] = NULL;
+ }
}
-/// This helper is needed to minimize the risk of argument swapping when passing these two resources around,
-/// as they almost always go side by side.
-typedef struct
-{
- struct UdpardMemoryResource fragment;
- struct UdpardMemoryDeleter payload;
-} RxMemory;
-
-typedef struct
-{
- struct UdpardTreeNode base;
- struct RxFragment* this; // This is needed to avoid pointer arithmetic with multiple inheritance.
-} RxFragmentTreeNode;
-
-/// This is designed to be convertible to/from UdpardFragment, so that the application could be
-/// given a linked list of these objects represented as a list of UdpardFragment.
-typedef struct RxFragment
-{
- struct UdpardFragment base;
- RxFragmentTreeNode tree;
- uint32_t frame_index;
-} RxFragment;
-
-/// Internally, the RX pipeline is arranged as follows:
-///
-/// - There is one port per subscription or an RPC-service listener. Within the port, there are N sessions,
-/// one session per remote node emitting transfers on this port (i.e., on this subject, or sending
-/// request/response of this service). Sessions are constructed dynamically in memory provided by
-/// UdpardMemoryResource.
+/// Currently, we use a very simple implementation that ceases delivery attempts after the first acknowledgment
+/// is received, similar to the CAN bus. Such mode of reliability is useful in the following scenarios:
///
-/// - Per session, there are UDPARD_NETWORK_INTERFACE_COUNT_MAX interface states to support interface redundancy.
+/// - With topics with a single subscriber, or sent via P2P transport (responses to published messages).
+/// With a single recipient, a single acknowledgement is sufficient to guarantee delivery.
///
-/// - Per interface, there are RX_SLOT_COUNT slots; a slot keeps the state of a transfer in the process of being
-/// reassembled which includes its payload fragments.
+/// - The application only cares about one acknowledgement (anycast), e.g., with modular redundant nodes.
///
-/// Port -> Session -> Interface -> Slot -> Fragments.
+/// - The application assumes that if one copy was delivered successfully, then other copies have likely
+/// succeeded as well (depends on the required reliability guarantees), similar to the CAN bus.
///
-/// Consider the following examples, where A,B,C denote distinct multi-frame transfers:
-///
-/// A0 A1 A2 B0 B1 B2 -- two transfers without OOO frames; both accepted
-/// A2 A0 A1 B0 B2 B1 -- two transfers with OOO frames; both accepted
-/// A0 A1 B0 A2 B1 B2 -- two transfers with interleaved frames; both accepted (this is why we need 2 slots)
-/// B1 A2 A0 C0 B0 A1 C1 -- B evicted by C; A and C accepted, B dropped (to accept B we would need 3 slots)
-/// B0 A0 A1 C0 B1 A2 C1 -- ditto
-/// A0 A1 C0 B0 A2 C1 B1 -- A evicted by B; B and C accepted, A dropped
-///
-/// In this implementation we postpone the implicit truncation until all fragments of a transfer are received.
-/// Early truncation such that excess payload is not stored in memory at all is difficult to implement if
-/// out-of-order reassembly is a requirement.
-/// To implement early truncation with out-of-order reassembly, we need to deduce the MTU of the sender per transfer
-/// (which is easy as we only need to take note of the payload size of any non-last frame of the transfer),
-/// then, based on the MTU, determine the maximum frame index we should accept (higher indexes will be dropped);
-/// then, for each fragment (i.e., frame) we need to compute the CRC (including those that are discarded).
-/// At the end, when all frames have been observed, combine all CRCs to obtain the final transfer CRC
-/// (this is possible because all common CRC functions are linear).
-typedef struct
-{
- UdpardMicrosecond ts_usec; ///< Timestamp of the earliest frame; TIMESTAMP_UNSET upon restart.
- UdpardTransferID transfer_id; ///< When first constructed, this shall be set to UINT64_MAX (unreachable value).
- uint32_t max_index; ///< Maximum observed frame index in this transfer (so far); zero upon restart.
- uint32_t eot_index; ///< Frame index where the EOT flag was observed; FRAME_INDEX_UNSET upon restart.
- uint32_t accepted_frames; ///< Number of frames accepted so far.
- size_t payload_size;
- RxFragmentTreeNode* fragments;
-} RxSlot;
+/// TODO In the future, there are plans to extend this mechanism to track the number of acknowledgements per topic,
+/// such that we can retain transfers until a specified number of acknowledgements have been received. A remote
+/// node can be considered to have disappeared if it failed to acknowledge a transfer after the maximum number
+/// of attempts have been made. This is somewhat similar in principle to the connection-oriented DDS/RTPS approach,
+/// where pub/sub associations are established and removed automatically, transparently to the application.
+static void tx_transfer_retire(udpard_tx_t* const tx, tx_transfer_t* const tr, const bool success)
+{
+ // Construct the feedback object first before the transfer is destroyed.
+ const udpard_tx_feedback_t fb = { .user = tr->user, .acknowledgements = success ? 1 : 0 };
+ UDPARD_ASSERT(tr->reliable == (tr->feedback != NULL));
+ // save the feedback pointer
+ void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t) = tr->feedback;
+
+ // Remove from all indexes and lists.
+ for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) {
+ delist(&tx->queue[i][tr->priority], &tr->queue[i]);
+ }
+ delist(&tx->agewise, &tr->agewise);
+ (void)cavl2_remove_if(&tx->index_staged, &tr->index_staged);
+ cavl2_remove(&tx->index_deadline, &tr->index_deadline);
+ cavl2_remove(&tx->index_transfer, &tr->index_transfer);
+ (void)cavl2_remove_if(&tx->index_transfer_ack, &tr->index_transfer_ack);
+
+ // Free the memory. The payload memory may already be empty depending on where we were invoked from.
+ tx_transfer_free_payload(tr);
+ mem_free(tx->memory.transfer, sizeof(tx_transfer_t), tr);
+
+ // Finally, when the internal state is updated and consistent, invoke the feedback callback if any.
+ if (feedback != NULL) {
+ feedback(tx, fb);
+ }
+}
+
+/// When the queue is exhausted, finds a transfer to sacrifice using simple heuristics and returns it.
+/// Will return NULL if there are no transfers worth sacrificing (no queue space can be reclaimed).
+/// We cannot simply stop accepting new transfers when the queue is full, because it may be caused by a single
+/// stalled interface holding back progress for all transfers.
+/// The heuristics are subject to review and improvement.
+static tx_transfer_t* tx_sacrifice(udpard_tx_t* const tx) { return LIST_TAIL(tx->agewise, tx_transfer_t, agewise); }
+
+/// True on success, false if not possible to reclaim enough space.
+static bool tx_ensure_queue_space(udpard_tx_t* const tx, const size_t total_frames_needed)
+{
+ if (total_frames_needed > tx->enqueued_frames_limit) {
+ return false; // not gonna happen
+ }
+ while (total_frames_needed > (tx->enqueued_frames_limit - tx->enqueued_frames_count)) {
+ tx_transfer_t* const tr = tx_sacrifice(tx);
+ if (tr == NULL) {
+ break; // We may have no transfers anymore but the NIC TX driver could still be holding some frames.
+ }
+ tx_transfer_retire(tx, tr, false);
+ tx->errors_sacrifice++;
+ }
+ return total_frames_needed <= (tx->enqueued_frames_limit - tx->enqueued_frames_count);
+}
+// Key for time-ordered TX indices with stable tiebreaking.
typedef struct
{
- UdpardMicrosecond ts_usec; ///< The timestamp of the last valid transfer to arrive on this interface.
- RxSlot slots[RX_SLOT_COUNT];
-} RxIface;
+ udpard_us_t time;
+ uint64_t topic_hash;
+ uint64_t transfer_id;
+} tx_time_key_t;
-/// This type is forward-declared externally, hence why it has such a long name with the "udpard" prefix.
-/// Keep in mind that we have a dedicated session object per remote node per port; this means that the states
-/// kept here -- the timestamp and the transfer-ID -- are specific per remote node, as it should be.
-struct UdpardInternalRxSession
-{
- struct UdpardTreeNode base;
- /// The remote node-ID is needed here as this is the ordering/search key.
- UdpardNodeID remote_node_id;
- /// This shared state is used for redundant transfer deduplication.
- /// Redundancies occur as a result of the use of multiple network interfaces, spurious frame duplication along
- /// the network path, and trivial forward error correction through duplication (if used by the sender).
- UdpardMicrosecond last_ts_usec;
- UdpardTransferID last_transfer_id;
- /// Each redundant interface maintains its own session state independently.
- /// The first interface to receive a transfer takes precedence, thus the redundant group always operates
- /// at the speed of the fastest interface. Duplicate transfers delivered by the slower interfaces are discarded.
- RxIface ifaces[UDPARD_NETWORK_INTERFACE_COUNT_MAX];
-};
-
-// -------------------------------------------------- RX FRAGMENT --------------------------------------------------
-
-/// Frees all fragments in the tree and their payload buffers. Destroys the passed fragment.
-/// This is meant to be invoked on the root of the tree.
-/// The maximum recursion depth is ceil(1.44*log2(FRAME_INDEX_MAX+1)-0.328) = 45 levels.
-// NOLINTNEXTLINE(misc-no-recursion) MISRA C:2012 rule 17.2
-static void rxFragmentDestroyTree(RxFragment* const self, const RxMemory memory)
+// Compare staged transfers by time then by transfer identity.
+static int32_t tx_cavl_compare_staged(const void* const user, const udpard_tree_t* const node)
{
- UDPARD_ASSERT(self != NULL);
- memFreePayload(memory.payload, self->base.origin);
- for (uint_fast8_t i = 0; i < 2; i++)
- {
- RxFragmentTreeNode* const child = (RxFragmentTreeNode*) self->tree.base.lr[i];
- if (child != NULL)
- {
- UDPARD_ASSERT(child->base.up == &self->tree.base);
- rxFragmentDestroyTree(child->this, memory); // NOSONAR recursion
- }
- }
- memFree(memory.fragment, sizeof(RxFragment), self); // self-destruct
+ const tx_time_key_t* const key = (const tx_time_key_t*)user;
+ const tx_transfer_t* const tr = CAVL2_TO_OWNER(node, tx_transfer_t, index_staged); // clang-format off
+ if (key->time < tr->staged_until) { return -1; }
+ if (key->time > tr->staged_until) { return +1; }
+ if (key->topic_hash < tr->topic_hash) { return -1; }
+ if (key->topic_hash > tr->topic_hash) { return +1; }
+ if (key->transfer_id < tr->transfer_id) { return -1; }
+ if (key->transfer_id > tr->transfer_id) { return +1; }
+ return 0; // clang-format on
}
-/// Frees all fragments in the list and their payload buffers. Destroys the passed fragment.
-/// This is meant to be invoked on the head of the list.
-/// This function is needed because when a fragment tree is transformed into a list, the tree structure itself
-/// is invalidated and cannot be used to free the fragments anymore.
-static void rxFragmentDestroyList(struct UdpardFragment* const head, const RxMemory memory)
+// Compare deadlines by time then by transfer identity.
+static int32_t tx_cavl_compare_deadline(const void* const user, const udpard_tree_t* const node)
{
- struct UdpardFragment* handle = head;
- while (handle != NULL)
- {
- struct UdpardFragment* const next = handle->next;
- memFreePayload(memory.payload, handle->origin); // May be NULL, is okay.
- memFree(memory.fragment, sizeof(RxFragment), handle);
- handle = next;
- }
+ const tx_time_key_t* const key = (const tx_time_key_t*)user;
+ const tx_transfer_t* const tr = CAVL2_TO_OWNER(node, tx_transfer_t, index_deadline); // clang-format off
+ if (key->time < tr->deadline) { return -1; }
+ if (key->time > tr->deadline) { return +1; }
+ if (key->topic_hash < tr->topic_hash) { return -1; }
+ if (key->topic_hash > tr->topic_hash) { return +1; }
+ if (key->transfer_id < tr->transfer_id) { return -1; }
+ if (key->transfer_id > tr->transfer_id) { return +1; }
+ return 0; // clang-format on
}
-
-// -------------------------------------------------- RX SLOT --------------------------------------------------
-
-static void rxSlotFree(RxSlot* const self, const RxMemory memory)
+static int32_t tx_cavl_compare_transfer(const void* const user, const udpard_tree_t* const node)
{
- UDPARD_ASSERT(self != NULL);
- if (self->fragments != NULL)
- {
- rxFragmentDestroyTree(self->fragments->this, memory);
- self->fragments = NULL;
- }
+ const tx_transfer_key_t* const key = (const tx_transfer_key_t*)user;
+ const tx_transfer_t* const tr = CAVL2_TO_OWNER(node, tx_transfer_t, index_transfer); // clang-format off
+ if (key->topic_hash < tr->topic_hash) { return -1; }
+ if (key->topic_hash > tr->topic_hash) { return +1; }
+ if (key->transfer_id < tr->transfer_id) { return -1; }
+ if (key->transfer_id > tr->transfer_id) { return +1; }
+ return 0; // clang-format on
}
-
-static void rxSlotRestart(RxSlot* const self, const UdpardTransferID transfer_id, const RxMemory memory)
+static int32_t tx_cavl_compare_transfer_remote(const void* const user, const udpard_tree_t* const node)
{
- UDPARD_ASSERT(self != NULL);
- rxSlotFree(self, memory);
- self->ts_usec = TIMESTAMP_UNSET; // Will be assigned when the first frame of the transfer has arrived.
- self->transfer_id = transfer_id;
- self->max_index = 0;
- self->eot_index = FRAME_INDEX_UNSET;
- self->accepted_frames = 0;
- self->payload_size = 0;
+ const tx_transfer_key_t* const key = (const tx_transfer_key_t*)user;
+ const tx_transfer_t* const tr = CAVL2_TO_OWNER(node, tx_transfer_t, index_transfer_ack); // clang-format off
+ if (key->topic_hash < tr->remote_topic_hash) { return -1; }
+ if (key->topic_hash > tr->remote_topic_hash) { return +1; }
+ if (key->transfer_id < tr->remote_transfer_id) { return -1; }
+ if (key->transfer_id > tr->remote_transfer_id) { return +1; }
+ return 0; // clang-format on
}
-/// This is a helper for rxSlotRestart that restarts the transfer for the next transfer-ID value.
-/// The transfer-ID increment is necessary to weed out duplicate transfers.
-static void rxSlotRestartAdvance(RxSlot* const self, const RxMemory memory)
+static tx_transfer_t* tx_transfer_find(udpard_tx_t* const tx, const uint64_t topic_hash, const uint64_t transfer_id)
{
- rxSlotRestart(self, self->transfer_id + 1U, memory);
+ const tx_transfer_key_t key = { .topic_hash = topic_hash, .transfer_id = transfer_id };
+ return CAVL2_TO_OWNER(
+ cavl2_find(tx->index_transfer, &key, &tx_cavl_compare_transfer), tx_transfer_t, index_transfer);
}
-typedef struct
-{
- uint32_t frame_index;
- bool accepted;
- struct UdpardMemoryResource memory_fragment;
-} RxSlotUpdateContext;
-
-static int_fast8_t rxSlotFragmentSearch(void* const user_reference, // NOSONAR Cavl API requires non-const.
- const struct UdpardTreeNode* node)
+/// True iff listed in at least one interface queue.
+static bool tx_is_pending(const udpard_tx_t* const tx, const tx_transfer_t* const tr)
{
- UDPARD_ASSERT((user_reference != NULL) && (node != NULL));
- return compare32(((const RxSlotUpdateContext*) user_reference)->frame_index,
- ((const RxFragmentTreeNode*) node)->this->frame_index);
+ for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) {
+ if (is_listed(&tx->queue[i][tr->priority], &tr->queue[i])) {
+ return true;
+ }
+ }
+ return false;
}
-static struct UdpardTreeNode* rxSlotFragmentFactory(void* const user_reference)
+/// Returns the head of the transfer chain; NULL on OOM.
+static tx_frame_t* tx_spool(udpard_tx_t* const tx,
+ const udpard_mem_t memory,
+ const size_t mtu,
+ const meta_t meta,
+ const udpard_bytes_scattered_t payload)
{
- RxSlotUpdateContext* const ctx = (RxSlotUpdateContext*) user_reference;
- UDPARD_ASSERT((ctx != NULL) && (ctx->memory_fragment.allocate != NULL) &&
- (ctx->memory_fragment.deallocate != NULL));
- struct UdpardTreeNode* out = NULL;
- RxFragment* const frag = memAlloc(ctx->memory_fragment, sizeof(RxFragment));
- if (frag != NULL)
- {
- memZero(sizeof(RxFragment), frag);
- out = &frag->tree.base; // this is not an escape bug, we retain the pointer via "this"
- frag->frame_index = ctx->frame_index;
- frag->tree.this = frag; // <-- right here, see?
- ctx->accepted = true;
+ UDPARD_ASSERT(mtu > 0);
+ uint32_t prefix_crc = CRC_INITIAL;
+ tx_frame_t* head = NULL;
+ tx_frame_t* tail = NULL;
+ size_t frame_index = 0U;
+ size_t offset = 0U;
+ bytes_scattered_reader_t reader = { .cursor = &payload, .position = 0U };
+ do {
+ // Compute the size of the next frame, allocate it and link it up in the chain.
+ const size_t progress = smaller(meta.transfer_payload_size - offset, mtu);
+ tx_frame_t* const item = tx_frame_new(tx, memory, progress + HEADER_SIZE_BYTES);
+ if (NULL == head) {
+ head = item;
+ } else {
+ tail->next = item;
+ }
+ tail = item;
+ // On OOM, deallocate the entire chain and quit.
+ if (NULL == tail) {
+ while (head != NULL) {
+ tx_frame_t* const next = head->next;
+ udpard_tx_refcount_dec(tx_frame_view(head));
+ head = next;
+ }
+ break;
+ }
+ // Populate the frame contents.
+ byte_t* const payload_ptr = &tail->data[HEADER_SIZE_BYTES];
+ bytes_scattered_read(&reader, progress, payload_ptr);
+ prefix_crc = crc_add(prefix_crc, progress, payload_ptr);
+ const byte_t* const end_of_header =
+ header_serialize(tail->data, meta, (uint32_t)frame_index, (uint32_t)offset, prefix_crc ^ CRC_OUTPUT_XOR);
+ UDPARD_ASSERT(end_of_header == payload_ptr);
+ (void)end_of_header;
+ // Advance the state.
+ ++frame_index;
+ offset += progress;
+ UDPARD_ASSERT(offset <= meta.transfer_payload_size);
+ } while (offset < meta.transfer_payload_size);
+ UDPARD_ASSERT((offset == meta.transfer_payload_size) || ((head == NULL) && (tail == NULL)));
+ return head;
+}
+
+/// Derives the ack timeout for an outgoing transfer.
+static udpard_us_t tx_ack_timeout(const udpard_us_t baseline, const udpard_prio_t prio, const size_t attempts)
+{
+ UDPARD_ASSERT(baseline > 0);
+ UDPARD_ASSERT(prio < UDPARD_PRIORITY_COUNT);
+ return baseline * (1LL << smaller((size_t)prio + attempts, 62)); // NOLINT(*-signed-bitwise)
+}
+
+/// Updates the next attempt time and inserts the transfer into the staged index, unless the next scheduled
+/// transmission time is too close to the deadline, in which case no further attempts will be made.
+/// When invoking for the first time, staged_until must be set to the time of the first attempt (usually now).
+/// Once can deduce whether further attempts are planned by checking if the transfer is in the staged index.
+///
+/// The idea is that retransmitting the transfer too close to the deadline is pointless, because
+/// the ack may arrive just after the deadline and the transfer would be considered failed anyway.
+/// The solution is to add a small margin before the deadline. The margin is derived using a simple heuristic,
+/// which is subject to review and improvement later on (this is not an API-visible trait).
+static void tx_stage_if(udpard_tx_t* const tx, tx_transfer_t* const tr)
+{
+ UDPARD_ASSERT(!cavl2_is_inserted(tx->index_staged, &tr->index_staged));
+ const uint_fast8_t epoch = tr->epoch++;
+ const udpard_us_t timeout = tx_ack_timeout(tx->ack_baseline_timeout, tr->priority, epoch);
+ tr->staged_until += timeout;
+ if ((tr->deadline - timeout) >= tr->staged_until) {
+ // Insert into staged index with deterministic tie-breaking.
+ const tx_time_key_t key = { .time = tr->staged_until,
+ .topic_hash = tr->topic_hash,
+ .transfer_id = tr->transfer_id };
+ // Ensure we didn't collide with another entry that should be unique.
+ const udpard_tree_t* const tree_staged = cavl2_find_or_insert(&tx->index_staged, //
+ &key,
+ tx_cavl_compare_staged,
+ &tr->index_staged,
+ cavl2_trivial_factory);
+ UDPARD_ASSERT(tree_staged == &tr->index_staged);
+ (void)tree_staged;
+ }
+}
+
+static void tx_purge_expired_transfers(udpard_tx_t* const self, const udpard_us_t now)
+{
+ while (true) { // we can use next_greater instead of doing min search every time
+ tx_transfer_t* const tr = CAVL2_TO_OWNER(cavl2_min(self->index_deadline), tx_transfer_t, index_deadline);
+ if ((tr != NULL) && (now > tr->deadline)) {
+ tx_transfer_retire(self, tr, false);
+ self->errors_expiration++;
+ } else {
+ break;
+ }
}
- return out; // OOM handled by the caller
}
-/// States outliving each level of recursion while ejecting the transfer from the fragment tree.
-typedef struct
+static void tx_promote_staged_transfers(udpard_tx_t* const self, const udpard_us_t now)
{
- struct UdpardFragment* head; // Points to the first fragment in the list.
- struct UdpardFragment* predecessor;
- uint32_t crc;
- size_t retain_size;
- size_t offset;
- RxMemory memory;
-} RxSlotEjectContext;
-
-/// See rxSlotEject() for details.
-/// The maximum recursion depth is ceil(1.44*log2(FRAME_INDEX_MAX+1)-0.328) = 45 levels.
-/// NOLINTNEXTLINE(misc-no-recursion) MISRA C:2012 rule 17.2
-static void rxSlotEjectFragment(RxFragment* const frag, RxSlotEjectContext* const ctx)
-{
- UDPARD_ASSERT((frag != NULL) && (ctx != NULL));
- if (frag->tree.base.lr[0] != NULL)
- {
- RxFragment* const child = ((RxFragmentTreeNode*) frag->tree.base.lr[0])->this;
- UDPARD_ASSERT(child->frame_index < frag->frame_index);
- UDPARD_ASSERT(child->tree.base.up == &frag->tree.base);
- rxSlotEjectFragment(child, ctx); // NOSONAR recursion
- }
- const size_t fragment_size = frag->base.view.size;
- frag->base.next = NULL; // Default state; may be overwritten.
- ctx->crc = transferCRCAdd(ctx->crc, fragment_size, frag->base.view.data);
- // Truncate unnecessary payload past the specified limit. This enforces the extent and removes the transfer CRC.
- const bool retain = ctx->offset < ctx->retain_size;
- if (retain)
- {
- UDPARD_ASSERT(ctx->retain_size >= ctx->offset);
- ctx->head = (ctx->head == NULL) ? &frag->base : ctx->head;
- frag->base.view.size = smaller(frag->base.view.size, ctx->retain_size - ctx->offset);
- if (ctx->predecessor != NULL)
- {
- ctx->predecessor->next = &frag->base;
+ while (true) { // we can use next_greater instead of doing min search every time
+ tx_transfer_t* const tr = CAVL2_TO_OWNER(cavl2_min(self->index_staged), tx_transfer_t, index_staged);
+ if ((tr != NULL) && (now >= tr->staged_until)) {
+ // Reinsert into the staged index at the new position, when the next attempt is due (if any).
+ cavl2_remove(&self->index_staged, &tr->index_staged);
+ tx_stage_if(self, tr);
+ // Enqueue for transmission unless it's been there since the last attempt (stalled interface?)
+ for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) {
+ if (((tr->iface_bitmap & (1U << i)) != 0) && !is_listed(&self->queue[i][tr->priority], &tr->queue[i])) {
+ UDPARD_ASSERT(tr->head[i] != NULL); // cannot stage without payload, doesn't make sense
+ UDPARD_ASSERT(tr->cursor[i] == tr->head[i]); // must have been rewound after last attempt
+ enlist_head(&self->queue[i][tr->priority], &tr->queue[i]);
+ }
+ }
+ } else {
+ break;
}
- ctx->predecessor = &frag->base;
- }
- // Adjust the offset of the next fragment and descend into it. Keep the sub-tree alive for now even if not needed.
- ctx->offset += fragment_size;
- if (frag->tree.base.lr[1] != NULL)
- {
- RxFragment* const child = ((RxFragmentTreeNode*) frag->tree.base.lr[1])->this;
- UDPARD_ASSERT(child->frame_index > frag->frame_index);
- UDPARD_ASSERT(child->tree.base.up == &frag->tree.base);
- rxSlotEjectFragment(child, ctx); // NOSONAR recursion
- }
- // Drop the unneeded fragments and their handles after the sub-tree is fully traversed.
- if (!retain)
- {
- memFreePayload(ctx->memory.payload, frag->base.origin);
- memFree(ctx->memory.fragment, sizeof(RxFragment), frag);
}
}
-/// This function finalizes the fragmented transfer payload by doing multiple things in one pass through the tree:
-///
-/// - Compute the transfer-CRC. The caller should verify the result.
-/// - Build a linked list of fragments ordered by frame index, as the application would expect it.
-/// - Truncate the payload according to the specified size limit.
-/// - Free the tree nodes and their payload buffers past the size limit.
-///
-/// It is guaranteed that the output list is sorted by frame index. It may be empty.
-/// After this function is invoked, the tree will be destroyed and cannot be used anymore;
-/// hence, in the event of invalid transfer being received (bad CRC), the fragments will have to be freed
-/// by traversing the linked list instead of the tree.
-///
-/// The payload shall contain at least the transfer CRC, so the minimum size is TRANSFER_CRC_SIZE_BYTES.
-/// There shall be at least one fragment (because a Cyphal transfer contains at least one frame).
-///
-/// The return value indicates whether the transfer is valid (CRC is correct).
-static bool rxSlotEject(size_t* const out_payload_size,
- struct UdpardFragment* const out_payload_head,
- RxFragmentTreeNode* const fragment_tree,
- const size_t received_total_size, // With CRC.
- const size_t extent,
- const RxMemory memory)
-{
- UDPARD_ASSERT((received_total_size >= TRANSFER_CRC_SIZE_BYTES) && (fragment_tree != NULL) &&
- (out_payload_size != NULL) && (out_payload_head != NULL));
- bool result = false;
- RxSlotEjectContext eject_ctx = {
- .head = NULL,
- .predecessor = NULL,
- .crc = TRANSFER_CRC_INITIAL,
- .retain_size = smaller(received_total_size - TRANSFER_CRC_SIZE_BYTES, extent),
- .offset = 0,
- .memory = memory,
- };
- rxSlotEjectFragment(fragment_tree->this, &eject_ctx);
- UDPARD_ASSERT(eject_ctx.offset == received_total_size); // Ensure we have traversed the entire tree.
- if (TRANSFER_CRC_RESIDUE_BEFORE_OUTPUT_XOR == eject_ctx.crc)
- {
- result = true;
- *out_payload_size = eject_ctx.retain_size;
- if (eject_ctx.head != NULL)
- {
- // This is the single-frame transfer optimization suggested by Scott: we free the first fragment handle
- // early by moving the contents into the rx_transfer structure by value.
- // No need to free the payload buffer because it has been transferred to the transfer.
- *out_payload_head = *eject_ctx.head; // Slice off the derived type fields as they are not needed.
- memFree(memory.fragment, sizeof(RxFragment), eject_ctx.head);
- }
- else
- {
- *out_payload_head = (struct UdpardFragment) {.next = NULL, .view = {0, NULL}, .origin = {0, NULL}};
+/// A transfer can use the same fragments between two interfaces if
+/// (both have the same MTU OR the transfer fits in both MTU) AND both use the same allocator.
+/// Either they will share the same spool, or there is only a single frame so the MTU difference does not matter.
+/// The allocator requirement is important because it is possible that distinct NICs may not be able to reach the
+/// same memory region via DMA.
+static bool tx_spool_shareable(const size_t mtu_a,
+ const udpard_mem_t mem_a,
+ const size_t mtu_b,
+ const udpard_mem_t mem_b,
+ const size_t payload_size)
+{
+ return ((mtu_a == mtu_b) || (payload_size <= smaller(mtu_a, mtu_b))) && mem_same(mem_a, mem_b);
+}
+
+/// The prediction takes into account that some interfaces may share the same frame spool.
+static size_t tx_predict_frame_count(const size_t mtu[UDPARD_IFACE_COUNT_MAX],
+ const udpard_mem_t memory[UDPARD_IFACE_COUNT_MAX],
+ const uint16_t iface_bitmap,
+ const size_t payload_size)
+{
+ UDPARD_ASSERT((iface_bitmap & UDPARD_IFACE_BITMAP_ALL) != 0); // The caller ensures this
+ size_t n_frames_total = 0;
+ for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) {
+ UDPARD_ASSERT(mtu[i] > 0);
+ if ((iface_bitmap & (1U << i)) != 0) {
+ bool shared = false;
+ for (size_t j = 0; j < i; j++) {
+ shared = shared || (((iface_bitmap & (1U << j)) != 0) &&
+ tx_spool_shareable(mtu[i], memory[i], mtu[j], memory[j], payload_size));
+ }
+ if (!shared) {
+ n_frames_total += larger(1, (payload_size + mtu[i] - 1U) / mtu[i]);
+ }
}
}
- else // The transfer turned out to be invalid. We have to free the fragments. Can't use the tree anymore.
- {
- rxFragmentDestroyList(eject_ctx.head, memory);
- }
- return result;
+ UDPARD_ASSERT(n_frames_total > 0); // The caller ensures that at least one endpoint is valid.
+ return n_frames_total;
}
-/// Update the frame count discovery state in this transfer.
-/// Returns true on success, false if inconsistencies are detected and the slot should be restarted.
-static bool rxSlotAccept_UpdateFrameCount(RxSlot* const self, const RxFrameBase frame)
+static bool tx_push(udpard_tx_t* const tx,
+ const udpard_us_t now,
+ const udpard_us_t deadline,
+ const meta_t meta,
+ const uint16_t iface_bitmap,
+ const udpard_udpip_ep_t p2p_destination[UDPARD_IFACE_COUNT_MAX],
+ const udpard_bytes_scattered_t payload,
+ void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t),
+ const udpard_user_context_t user,
+ tx_transfer_t** const out_transfer)
{
- UDPARD_ASSERT((self != NULL) && (frame.payload.size > 0));
- bool ok = true;
- self->max_index = max32(self->max_index, frame.index);
- if (frame.end_of_transfer)
- {
- if ((self->eot_index != FRAME_INDEX_UNSET) && (self->eot_index != frame.index))
- {
- ok = false; // Inconsistent EOT flag, could be a node-ID conflict.
+ UDPARD_ASSERT(now <= deadline);
+ UDPARD_ASSERT(tx != NULL);
+ UDPARD_ASSERT((iface_bitmap & UDPARD_IFACE_BITMAP_ALL) != 0);
+ UDPARD_ASSERT((iface_bitmap & UDPARD_IFACE_BITMAP_ALL) == iface_bitmap);
+
+ // Purge expired transfers before accepting a new one to make room in the queue.
+ tx_purge_expired_transfers(tx, now);
+
+ // Promote staged transfers that are now eligible for retransmission to ensure fairness:
+ // if they have the same priority as the new transfer, they should get a chance to go first.
+ tx_promote_staged_transfers(tx, now);
+
+ // Construct the empty transfer object, without the frames for now. The frame spools will be constructed next.
+ tx_transfer_t* const tr = mem_alloc(tx->memory.transfer, sizeof(tx_transfer_t));
+ if (tr == NULL) {
+ tx->errors_oom++;
+ return false;
+ }
+ mem_zero(sizeof(*tr), tr);
+ tr->epoch = 0;
+ tr->staged_until = now;
+ tr->topic_hash = meta.topic_hash;
+ tr->transfer_id = meta.transfer_id;
+ tr->remote_topic_hash = meta.topic_hash;
+ tr->remote_transfer_id = meta.transfer_id;
+ tr->deadline = deadline;
+ tr->reliable = meta.flag_reliable;
+ tr->priority = meta.priority;
+ tr->iface_bitmap = iface_bitmap;
+ tr->user = user;
+ tr->feedback = feedback;
+ for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) {
+ tr->p2p_destination[i] = p2p_destination[i];
+ tr->head[i] = tr->cursor[i] = NULL;
+ }
+
+ // Ensure the queue has enough space.
+ for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) {
+ tx->mtu[i] = larger(tx->mtu[i], UDPARD_MTU_MIN); // enforce minimum MTU
+ }
+ const size_t n_frames =
+ tx_predict_frame_count(tx->mtu, tx->memory.payload, iface_bitmap, meta.transfer_payload_size);
+ UDPARD_ASSERT(n_frames > 0);
+ if (!tx_ensure_queue_space(tx, n_frames)) {
+ mem_free(tx->memory.transfer, sizeof(tx_transfer_t), tr);
+ tx->errors_capacity++;
+ return false;
+ }
+
+ // Spool the frames for each interface, with deduplication where possible to conserve memory and queue space.
+ const size_t enqueued_frames_before = tx->enqueued_frames_count;
+ bool oom = false;
+ for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) {
+ if ((tr->iface_bitmap & (1U << i)) != 0) {
+ if (tr->head[i] == NULL) {
+ tr->head[i] = tx_spool(tx, tx->memory.payload[i], tx->mtu[i], meta, payload);
+ tr->cursor[i] = tr->head[i];
+ if (tr->head[i] == NULL) {
+ oom = true;
+ break;
+ }
+ // Detect which interfaces can use the same spool to conserve memory.
+ for (size_t j = i + 1; j < UDPARD_IFACE_COUNT_MAX; j++) {
+ if (((tr->iface_bitmap & (1U << j)) != 0) && tx_spool_shareable(tx->mtu[i],
+ tx->memory.payload[i],
+ tx->mtu[j],
+ tx->memory.payload[j],
+ meta.transfer_payload_size)) {
+ tr->head[j] = tr->head[i];
+ tr->cursor[j] = tr->cursor[i];
+ tx_frame_t* frame = tr->head[j];
+ while (frame != NULL) {
+ frame->refcount++;
+ frame = frame->next;
+ }
+ }
+ }
+ }
}
- self->eot_index = frame.index;
}
- UDPARD_ASSERT(frame.index <= self->max_index);
- if (self->max_index > self->eot_index)
- {
- ok = false; // Frames past EOT found, discard the entire transfer because we don't trust it anymore.
+ if (oom) {
+ tx_transfer_free_payload(tr);
+ mem_free(tx->memory.transfer, sizeof(tx_transfer_t), tr);
+ tx->errors_oom++;
+ return false;
}
- return ok;
-}
+ UDPARD_ASSERT((tx->enqueued_frames_count - enqueued_frames_before) == n_frames);
+ UDPARD_ASSERT(tx->enqueued_frames_count <= tx->enqueued_frames_limit);
+ (void)enqueued_frames_before;
-/// Insert the fragment into the fragment tree. If it already exists, drop and free the duplicate.
-/// Returns 0 if the fragment is not needed, 1 if it is needed, negative on error.
-/// The fragment shall be deallocated unless the return value is 1.
-static int_fast8_t rxSlotAccept_InsertFragment(RxSlot* const self, const RxFrameBase frame, const RxMemory memory)
-{
- UDPARD_ASSERT((self != NULL) && (frame.payload.size > 0) && (self->max_index <= self->eot_index) &&
- (self->accepted_frames <= self->eot_index));
- RxSlotUpdateContext update_ctx = {.frame_index = frame.index,
- .accepted = false,
- .memory_fragment = memory.fragment};
- RxFragmentTreeNode* const frag = (RxFragmentTreeNode*) cavlSearch((struct UdpardTreeNode**) &self->fragments, //
- &update_ctx,
- &rxSlotFragmentSearch,
- &rxSlotFragmentFactory);
- int_fast8_t result = update_ctx.accepted ? 1 : 0;
- if (frag == NULL)
- {
- UDPARD_ASSERT(!update_ctx.accepted);
- result = -UDPARD_ERROR_MEMORY;
- // No restart because there is hope that there will be enough memory when we receive a duplicate.
- }
- UDPARD_ASSERT(self->max_index <= self->eot_index);
- if (update_ctx.accepted)
- {
- UDPARD_ASSERT((result > 0) && (frag->this->frame_index == frame.index));
- frag->this->base.view = frame.payload;
- frag->this->base.origin = frame.origin;
- self->payload_size += frame.payload.size;
- self->accepted_frames++;
- }
- return result;
-}
-
-/// Detect transfer completion. If complete, eject the payload from the fragment tree and check its CRC.
-/// The return value is passed over from rxSlotEject.
-static int_fast8_t rxSlotAccept_FinalizeMaybe(RxSlot* const self,
- size_t* const out_transfer_payload_size,
- struct UdpardFragment* const out_transfer_payload_head,
- const size_t extent,
- const RxMemory memory)
-{
- UDPARD_ASSERT((self != NULL) && (out_transfer_payload_size != NULL) && (out_transfer_payload_head != NULL) &&
- (self->fragments != NULL));
- int_fast8_t result = 0;
- if (self->accepted_frames > self->eot_index) // Mind the off-by-one: cardinal vs. ordinal.
- {
- if (self->payload_size >= TRANSFER_CRC_SIZE_BYTES)
- {
- result = rxSlotEject(out_transfer_payload_size,
- out_transfer_payload_head,
- self->fragments,
- self->payload_size,
- extent,
- memory)
- ? 1
- : 0;
- // The tree is now unusable and the data is moved into rx_transfer.
- self->fragments = NULL;
- }
- rxSlotRestartAdvance(self, memory); // Restart needed even if invalid.
- }
- return result;
-}
-
-/// This function will either move the frame payload into the session, or free it if it can't be used.
-/// Upon return, certain state fields may be overwritten, so the caller should not rely on them.
-/// Returns: 1 -- transfer available, payload written; 0 -- transfer not yet available; <0 -- error.
-static int_fast8_t rxSlotAccept(RxSlot* const self,
- size_t* const out_transfer_payload_size,
- struct UdpardFragment* const out_transfer_payload_head,
- const RxFrameBase frame,
- const size_t extent,
- const RxMemory memory)
-{
- UDPARD_ASSERT((self != NULL) && (frame.payload.size > 0) && (out_transfer_payload_size != NULL) &&
- (out_transfer_payload_head != NULL));
- int_fast8_t result = 0;
- bool release = true;
- if (rxSlotAccept_UpdateFrameCount(self, frame))
- {
- result = rxSlotAccept_InsertFragment(self, frame, memory);
- UDPARD_ASSERT(result <= 1);
- if (result > 0)
- {
- release = false;
- result = rxSlotAccept_FinalizeMaybe(self, //
- out_transfer_payload_size,
- out_transfer_payload_head,
- extent,
- memory);
+ // Enqueue for transmission immediately.
+ for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) {
+ if ((tr->iface_bitmap & (1U << i)) != 0) {
+ enlist_head(&tx->queue[i][tr->priority], &tr->queue[i]);
}
}
- else
- {
- rxSlotRestartAdvance(self, memory);
- }
- if (release)
- {
- memFreePayload(memory.payload, frame.origin);
+ // Add to the staged index so that it is repeatedly re-enqueued later until acknowledged or expired.
+ if (meta.flag_reliable) {
+ tx_stage_if(tx, tr);
+ }
+ // Add to the deadline index for expiration management.
+ // Insert into deadline index with deterministic tie-breaking.
+ const tx_time_key_t deadline_key = { .time = tr->deadline,
+ .topic_hash = tr->topic_hash,
+ .transfer_id = tr->transfer_id };
+ // Ensure we didn't collide with another entry that should be unique.
+ const udpard_tree_t* const tree_deadline = cavl2_find_or_insert(
+ &tx->index_deadline, &deadline_key, tx_cavl_compare_deadline, &tr->index_deadline, cavl2_trivial_factory);
+ UDPARD_ASSERT(tree_deadline == &tr->index_deadline);
+ (void)tree_deadline;
+ // Add to the transfer index for incoming ack management.
+ const tx_transfer_key_t transfer_key = { .topic_hash = tr->topic_hash, .transfer_id = tr->transfer_id };
+ const udpard_tree_t* const tree_transfer = cavl2_find_or_insert(
+ &tx->index_transfer, &transfer_key, tx_cavl_compare_transfer, &tr->index_transfer, cavl2_trivial_factory);
+ UDPARD_ASSERT(tree_transfer == &tr->index_transfer); // ensure no duplicates; checked at the API level
+ (void)tree_transfer;
+ // Add to the agewise list for sacrifice management on queue exhaustion.
+ enlist_head(&tx->agewise, &tr->agewise);
+
+ // Finalize.
+ if (out_transfer != NULL) {
+ *out_transfer = tr;
+ }
+ return true;
+}
+
+/// Handle an ACK received from a remote node.
+static void tx_receive_ack(udpard_rx_t* const rx, const uint64_t topic_hash, const uint64_t transfer_id)
+{
+ if (rx->tx != NULL) {
+ tx_transfer_t* const tr = tx_transfer_find(rx->tx, topic_hash, transfer_id);
+ if ((tr != NULL) && tr->reliable) {
+ tx_transfer_retire(rx->tx, tr, true);
+ }
}
- UDPARD_ASSERT(result <= 1);
- return result;
}
-// -------------------------------------------------- RX IFACE --------------------------------------------------
+/// Generate an ack transfer for the specified remote transfer.
+/// Do nothing if an ack for the same transfer is already enqueued with equal or better endpoint coverage.
+static void tx_send_ack(udpard_rx_t* const rx,
+ const udpard_us_t now,
+ const udpard_prio_t priority,
+ const uint64_t topic_hash,
+ const uint64_t transfer_id,
+ const udpard_remote_t remote)
+{
+ udpard_tx_t* const tx = rx->tx;
+ if (tx != NULL) {
+ // Check if an ack for this transfer is already enqueued.
+ const tx_transfer_key_t key = { .topic_hash = topic_hash, .transfer_id = transfer_id };
+ tx_transfer_t* const prior =
+ CAVL2_TO_OWNER(cavl2_find(tx->index_transfer_ack, &key, &tx_cavl_compare_transfer_remote),
+ tx_transfer_t,
+ index_transfer_ack);
+ const uint16_t prior_ep_bitmap = (prior != NULL) ? valid_ep_bitmap(prior->p2p_destination) : 0U;
+ UDPARD_ASSERT((prior == NULL) || (prior_ep_bitmap == prior->iface_bitmap));
+ const uint16_t new_ep_bitmap = valid_ep_bitmap(remote.endpoints);
+ const bool new_better = (new_ep_bitmap & (uint16_t)(~prior_ep_bitmap)) != 0U;
+ if (!new_better) {
+ return; // Can we get an ack? We have ack at home!
+ }
+ if (prior != NULL) { // avoid redundant acks for the same transfer -- replace with better one
+ UDPARD_ASSERT(prior->feedback == NULL);
+ tx_transfer_retire(tx, prior, false); // this will free up a queue slot and some memory
+ }
+ // Even if the new, better ack fails to enqueue for some reason, it's no big deal -- we will send the next one.
+ // The only reason it might fail is an OOM but we just freed a slot so it should be fine.
+
+ // Serialize the ACK payload.
+ byte_t message[ACK_SIZE_BYTES];
+ byte_t* ptr = message;
+ ptr = serialize_u64(ptr, topic_hash);
+ ptr = serialize_u64(ptr, transfer_id);
+ UDPARD_ASSERT((ptr - message) == ACK_SIZE_BYTES);
+ (void)ptr;
+
+ // Enqueue the transfer.
+ const udpard_bytes_t payload = { .size = ACK_SIZE_BYTES, .data = message };
+ const meta_t meta = {
+ .priority = priority,
+ .flag_reliable = false,
+ .flag_acknowledgement = true,
+ .transfer_payload_size = (uint32_t)payload.size,
+ .transfer_id = tx->p2p_transfer_id++,
+ .sender_uid = tx->local_uid,
+ .topic_hash = remote.uid,
+ };
+ tx_transfer_t* tr = NULL;
+ const uint32_t count = tx_push(tx,
+ now,
+ now + ACK_TX_DEADLINE,
+ meta,
+ new_ep_bitmap,
+ remote.endpoints,
+ (udpard_bytes_scattered_t){ .bytes = payload, .next = NULL },
+ NULL,
+ UDPARD_USER_CONTEXT_NULL,
+ &tr);
+ UDPARD_ASSERT(count <= 1);
+ if (count == 1) { // ack is always a single-frame transfer, so we get either 0 or 1
+ UDPARD_ASSERT(tr != NULL);
+ tr->remote_topic_hash = topic_hash;
+ tr->remote_transfer_id = transfer_id;
+ (void)cavl2_find_or_insert(&tx->index_transfer_ack,
+ &key,
+ tx_cavl_compare_transfer_remote,
+ &tr->index_transfer_ack,
+ cavl2_trivial_factory);
+ } else {
+ rx->errors_ack_tx++;
+ }
+ } else {
+ rx->errors_ack_tx++;
+ }
+}
+
+bool udpard_tx_new(udpard_tx_t* const self,
+ const uint64_t local_uid,
+ const uint64_t p2p_transfer_id_initial,
+ const size_t enqueued_frames_limit,
+ const udpard_tx_mem_resources_t memory,
+ const udpard_tx_vtable_t* const vtable)
+{
+ const bool ok = (NULL != self) && (local_uid != 0) && tx_validate_mem_resources(memory) && (vtable != NULL) &&
+ (vtable->eject_subject != NULL) && (vtable->eject_p2p != NULL);
+ if (ok) {
+ mem_zero(sizeof(*self), self);
+ self->vtable = vtable;
+ self->local_uid = local_uid;
+ self->p2p_transfer_id = p2p_transfer_id_initial;
+ self->ack_baseline_timeout = UDPARD_TX_ACK_BASELINE_TIMEOUT_DEFAULT_us;
+ self->enqueued_frames_limit = enqueued_frames_limit;
+ self->enqueued_frames_count = 0;
+ self->memory = memory;
+ self->index_staged = NULL;
+ self->index_deadline = NULL;
+ self->index_transfer = NULL;
+ self->user = NULL;
+ for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) {
+ self->mtu[i] = UDPARD_MTU_DEFAULT;
+ for (size_t p = 0; p < UDPARD_PRIORITY_COUNT; p++) {
+ self->queue[i][p].head = NULL;
+ self->queue[i][p].tail = NULL;
+ }
+ }
+ }
+ return ok;
+}
-/// Whether the supplied transfer-ID is greater than all transfer-IDs in the RX slots.
-/// This indicates that the new transfer is not a duplicate and should be accepted.
-static bool rxIfaceIsFutureTransferID(const RxIface* const self, const UdpardTransferID transfer_id)
-{
- bool is_future_tid = true;
- for (uint_fast8_t i = 0; i < RX_SLOT_COUNT; i++) // Expected to be unrolled by the compiler.
- {
- is_future_tid = is_future_tid && ((self->slots[i].transfer_id < transfer_id) ||
- (self->slots[i].transfer_id == TRANSFER_ID_UNSET));
+bool udpard_tx_push(udpard_tx_t* const self,
+ const udpard_us_t now,
+ const udpard_us_t deadline,
+ const uint16_t iface_bitmap,
+ const udpard_prio_t priority,
+ const uint64_t topic_hash,
+ const uint64_t transfer_id,
+ const udpard_bytes_scattered_t payload,
+ void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), // NULL if best-effort.
+ const udpard_user_context_t user)
+{
+ bool ok = (self != NULL) && (deadline >= now) && (now >= 0) && (self->local_uid != 0) &&
+ ((iface_bitmap & UDPARD_IFACE_BITMAP_ALL) != 0) && (priority < UDPARD_PRIORITY_COUNT) &&
+ ((payload.bytes.data != NULL) || (payload.bytes.size == 0U)) &&
+ (tx_transfer_find(self, topic_hash, transfer_id) == NULL);
+ if (ok) {
+ const meta_t meta = {
+ .priority = priority,
+ .flag_reliable = feedback != NULL,
+ .transfer_payload_size = (uint32_t)bytes_scattered_size(payload),
+ .transfer_id = transfer_id,
+ .sender_uid = self->local_uid,
+ .topic_hash = topic_hash,
+ };
+ const udpard_udpip_ep_t blank_ep[UDPARD_IFACE_COUNT_MAX] = { 0 };
+ ok = tx_push(self, // --------------------------------------
+ now,
+ deadline,
+ meta,
+ iface_bitmap & UDPARD_IFACE_BITMAP_ALL,
+ blank_ep,
+ payload,
+ feedback,
+ user,
+ NULL);
}
- return is_future_tid;
+ return ok;
}
-/// Whether the time that has passed since the last accepted first frame of a transfer exceeds the TID timeout.
-/// This indicates that the transfer should be accepted even if its transfer-ID is not greater than all transfer-IDs
-/// in the RX slots.
-static bool rxIfaceCheckTransferIDTimeout(const RxIface* const self,
- const UdpardMicrosecond ts_usec,
- const UdpardMicrosecond transfer_id_timeout_usec)
-{
- // We use the RxIface state here because the RxSlot state is reset between transfers.
- // If there is reassembly in progress, we want to use the timestamps from these in-progress transfers,
- // as that eliminates the risk of a false-positive TID-timeout detection.
- UdpardMicrosecond most_recent_ts_usec = self->ts_usec;
- for (uint_fast8_t i = 0; i < RX_SLOT_COUNT; i++) // Expected to be unrolled by the compiler.
- {
- if ((most_recent_ts_usec == TIMESTAMP_UNSET) ||
- ((self->slots[i].ts_usec != TIMESTAMP_UNSET) && (self->slots[i].ts_usec > most_recent_ts_usec)))
- {
- most_recent_ts_usec = self->slots[i].ts_usec;
+bool udpard_tx_push_p2p(udpard_tx_t* const self,
+ const udpard_us_t now,
+ const udpard_us_t deadline,
+ const udpard_prio_t priority,
+ const udpard_remote_t remote,
+ const udpard_bytes_scattered_t payload,
+ void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), // NULL if best-effort.
+ const udpard_user_context_t user,
+ uint64_t* const out_transfer_id)
+{
+ const uint16_t iface_bitmap = valid_ep_bitmap(remote.endpoints);
+ bool ok = (self != NULL) && (deadline >= now) && (now >= 0) && (self->local_uid != 0) && (iface_bitmap != 0) &&
+ (priority < UDPARD_PRIORITY_COUNT) && ((payload.bytes.data != NULL) || (payload.bytes.size == 0U));
+ if (ok) {
+ const meta_t meta = {
+ .priority = priority,
+ .flag_reliable = feedback != NULL,
+ .transfer_payload_size = (uint32_t)bytes_scattered_size(payload),
+ .transfer_id = self->p2p_transfer_id++,
+ .sender_uid = self->local_uid,
+ .topic_hash = remote.uid,
+ };
+ tx_transfer_t* tr = NULL;
+ ok = tx_push(self, now, deadline, meta, iface_bitmap, remote.endpoints, payload, feedback, user, &tr);
+ UDPARD_ASSERT((!ok) || (tr->transfer_id == meta.transfer_id));
+ if (ok && (out_transfer_id != NULL)) {
+ *out_transfer_id = tr->transfer_id;
}
}
- return (most_recent_ts_usec == TIMESTAMP_UNSET) ||
- ((ts_usec >= most_recent_ts_usec) && ((ts_usec - most_recent_ts_usec) >= transfer_id_timeout_usec));
+ return ok;
}
-/// Traverses the list of slots trying to find a slot with a matching transfer-ID that is already IN PROGRESS.
-/// If there is no such slot, tries again without the IN PROGRESS requirement.
-/// The purpose of this complicated dual check is to support the case where multiple slots have the same
-/// transfer-ID, which may occur with interleaved transfers.
-static RxSlot* rxIfaceFindMatchingSlot(RxSlot slots[RX_SLOT_COUNT], const UdpardTransferID transfer_id)
+static void tx_eject_pending_frames(udpard_tx_t* const self, const udpard_us_t now, const uint_fast8_t ifindex)
{
- RxSlot* slot = NULL;
- for (uint_fast8_t i = 0; i < RX_SLOT_COUNT; i++)
- {
- if ((slots[i].transfer_id == transfer_id) && (slots[i].ts_usec != TIMESTAMP_UNSET))
- {
- slot = &slots[i];
- break;
+ while (true) {
+ // Find the highest-priority pending transfer.
+ tx_transfer_t* tr = NULL;
+ for (size_t prio = 0; prio < UDPARD_PRIORITY_COUNT; prio++) {
+ tx_transfer_t* const candidate = // This pointer arithmetic is ugly and perhaps should be improved
+ ptr_unbias(self->queue[ifindex][prio].tail,
+ offsetof(tx_transfer_t, queue) + (sizeof(udpard_listed_t) * ifindex));
+ if (candidate != NULL) {
+ tr = candidate;
+ break;
+ }
}
- }
- if (slot == NULL)
- {
- for (uint_fast8_t i = 0; i < RX_SLOT_COUNT; i++)
+ if (tr == NULL) {
+ break; // No pending transfers at the moment. Find something else to do.
+ }
+ UDPARD_ASSERT(tr->cursor[ifindex] != NULL); // cannot be pending without payload, doesn't make sense
+ UDPARD_ASSERT(tr->priority < UDPARD_PRIORITY_COUNT);
+
+ // Eject the frame.
+ const tx_frame_t* const frame = tr->cursor[ifindex];
+ tx_frame_t* const frame_next = frame->next;
+ const bool last_attempt = !cavl2_is_inserted(self->index_staged, &tr->index_staged);
+ const bool last_frame = frame_next == NULL; // if not last attempt we will have to rewind to head.
{
- if (slots[i].transfer_id == transfer_id)
- {
- slot = &slots[i];
- break;
+ udpard_tx_ejection_t ejection = { .now = now,
+ .deadline = tr->deadline,
+ .iface_index = ifindex,
+ .dscp = self->dscp_value_per_priority[tr->priority],
+ .datagram = tx_frame_view(frame),
+ .user = tr->user };
+ const bool ep_valid = udpard_is_valid_endpoint(tr->p2p_destination[ifindex]);
+ UDPARD_ASSERT((!ep_valid) || ((tr->iface_bitmap & (1U << ifindex)) != 0U));
+ const bool ejected = ep_valid ? self->vtable->eject_p2p(self, &ejection, tr->p2p_destination[ifindex])
+ : self->vtable->eject_subject(self, &ejection);
+ if (!ejected) { // The easy case -- no progress was made at this time;
+ break; // don't change anything, just try again later as-is
+ }
+ }
+
+ // Frame ejected successfully. Update the transfer state to get ready for the next frame.
+ if (last_attempt) { // no need to keep frames that we will no longer use; free early to reduce pressure
+ UDPARD_ASSERT(tr->head[ifindex] == tr->cursor[ifindex]);
+ tr->head[ifindex] = frame_next;
+ udpard_tx_refcount_dec(tx_frame_view(frame));
+ }
+ tr->cursor[ifindex] = frame_next;
+
+ // Finalize the transmission if this was the last frame of the transfer.
+ if (last_frame) {
+ tr->cursor[ifindex] = tr->head[ifindex];
+ delist(&self->queue[ifindex][tr->priority], &tr->queue[ifindex]); // no longer pending for transmission
+ UDPARD_ASSERT(!last_attempt || (tr->head[ifindex] == NULL)); // this iface is done with the payload
+ if (last_attempt && !tr->reliable && !tx_is_pending(self, tr)) { // remove early once all ifaces are done
+ UDPARD_ASSERT(tr->feedback == NULL); // non-reliable transfers have no feedback callback
+ tx_transfer_retire(self, tr, true);
}
}
}
- return slot;
}
-/// This function is invoked when a new datagram pertaining to a certain session is received on an interface.
-/// This function will either move the frame payload into the session, or free it if it cannot be made use of.
-/// Returns: 1 -- transfer available; 0 -- transfer not yet available; <0 -- error.
-static int_fast8_t rxIfaceAccept(RxIface* const self,
- const UdpardMicrosecond ts_usec,
- const RxFrame frame,
- const size_t extent,
- const UdpardMicrosecond transfer_id_timeout_usec,
- const RxMemory memory,
- struct UdpardRxTransfer* const out_transfer)
-{
- UDPARD_ASSERT((self != NULL) && (frame.base.payload.size > 0) && (out_transfer != NULL));
- RxSlot* slot = rxIfaceFindMatchingSlot(self->slots, frame.meta.transfer_id);
- // If there is no suitable slot, we should check if the transfer is a future one (high transfer-ID),
- // or a transfer-ID timeout has occurred. In this case we sacrifice the oldest slot.
- if (slot == NULL)
- {
- // The timestamp is UNSET when the slot is waiting for the next transfer.
- // Such slots are the best candidates for replacement because reusing them does not cause loss of
- // transfers that are in the process of being reassembled. If there are no such slots, we must
- // sacrifice the one whose first frame has arrived the longest time ago.
- RxSlot* victim = &self->slots[0];
- for (uint_fast8_t i = 0; i < RX_SLOT_COUNT; i++) // Expected to be unrolled by the compiler.
- {
- if ((self->slots[i].ts_usec == TIMESTAMP_UNSET) ||
- ((victim->ts_usec != TIMESTAMP_UNSET) && (self->slots[i].ts_usec < victim->ts_usec)))
- {
- victim = &self->slots[i];
+void udpard_tx_poll(udpard_tx_t* const self, const udpard_us_t now, const uint16_t iface_bitmap)
+{
+ if ((self != NULL) && (now >= 0)) { // This is the main scheduler state machine update tick.
+ tx_purge_expired_transfers(self, now); // This may free up some memory and some queue slots.
+ tx_promote_staged_transfers(self, now); // This may add some new transfers to the queue.
+ for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) {
+ if ((iface_bitmap & (1U << i)) != 0U) {
+ tx_eject_pending_frames(self, now, i);
}
}
- if (rxIfaceIsFutureTransferID(self, frame.meta.transfer_id) ||
- rxIfaceCheckTransferIDTimeout(self, ts_usec, transfer_id_timeout_usec))
- {
- rxSlotRestart(victim, frame.meta.transfer_id, memory);
- slot = victim;
- UDPARD_ASSERT(slot != NULL);
- }
}
- // If there is a suitable slot (perhaps a newly created one for this frame), update it.
- // If there is neither a suitable slot nor a new one was created, the frame cannot be used.
- int_fast8_t result = 0;
- if (slot != NULL)
- {
- if (slot->ts_usec == TIMESTAMP_UNSET)
- {
- slot->ts_usec = ts_usec; // Transfer timestamp is the timestamp of the earliest frame.
- }
- const UdpardMicrosecond ts = slot->ts_usec;
- UDPARD_ASSERT(slot->transfer_id == frame.meta.transfer_id);
- result = rxSlotAccept(slot, // May invalidate state variables such as timestamp or transfer-ID.
- &out_transfer->payload_size,
- &out_transfer->payload,
- frame.base,
- extent,
- memory);
- if (result > 0) // Transfer successfully received, populate the transfer descriptor for the client.
- {
- self->ts_usec = ts; // Update the last valid transfer timestamp on this iface.
- out_transfer->timestamp_usec = ts;
- out_transfer->priority = frame.meta.priority;
- out_transfer->source_node_id = frame.meta.src_node_id;
- out_transfer->transfer_id = frame.meta.transfer_id;
+}
+
+bool udpard_tx_cancel(udpard_tx_t* const self, const uint64_t topic_hash, const uint64_t transfer_id)
+{
+ bool cancelled = false;
+ if (self != NULL) {
+ tx_transfer_t* const tr = tx_transfer_find(self, topic_hash, transfer_id);
+ if (tr != NULL) {
+ tx_transfer_retire(self, tr, false);
+ cancelled = true;
}
}
- else
- {
- memFreePayload(memory.payload, frame.base.origin);
- }
- return result;
+ return cancelled;
}
-static void rxIfaceInit(RxIface* const self, const RxMemory memory)
+size_t udpard_tx_cancel_all(udpard_tx_t* const self, const uint64_t topic_hash)
{
- UDPARD_ASSERT(self != NULL);
- memZero(sizeof(*self), self);
- self->ts_usec = TIMESTAMP_UNSET;
- for (uint_fast8_t i = 0; i < RX_SLOT_COUNT; i++)
- {
- self->slots[i].fragments = NULL;
- rxSlotRestart(&self->slots[i], TRANSFER_ID_UNSET, memory);
+ size_t count = 0;
+ if (self != NULL) {
+ // Find the first transfer with matching topic_hash using transfer_id=0 as lower bound.
+ const tx_transfer_key_t key = { .topic_hash = topic_hash, .transfer_id = 0 };
+ tx_transfer_t* tr = CAVL2_TO_OWNER(
+ cavl2_lower_bound(self->index_transfer, &key, &tx_cavl_compare_transfer), tx_transfer_t, index_transfer);
+ // Iterate through all transfers with the same topic_hash.
+ while ((tr != NULL) && (tr->topic_hash == topic_hash)) {
+ tx_transfer_t* const next =
+ CAVL2_TO_OWNER(cavl2_next_greater(&tr->index_transfer), tx_transfer_t, index_transfer);
+ tx_transfer_retire(self, tr, false);
+ count++;
+ tr = next;
+ }
}
+ return count;
}
-/// Frees the iface and all slots in it. The iface instance itself is not freed.
-static void rxIfaceFree(RxIface* const self, const RxMemory memory)
+uint16_t udpard_tx_pending_ifaces(const udpard_tx_t* const self)
{
- UDPARD_ASSERT(self != NULL);
- for (uint_fast8_t i = 0; i < RX_SLOT_COUNT; i++)
- {
- rxSlotFree(&self->slots[i], memory);
+ uint16_t bitmap = 0;
+ if (self != NULL) {
+ // Even though it's constant-time, I still mildly dislike this loop. Shall it become a bottleneck,
+ // we could modify the TX state to keep a bitmap of pending interfaces updated incrementally.
+ for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) {
+ for (size_t p = 0; p < UDPARD_PRIORITY_COUNT; p++) {
+ if (self->queue[i][p].head != NULL) {
+ bitmap |= (1U << i);
+ break;
+ }
+ }
+ }
}
+ return bitmap;
}
-// -------------------------------------------------- RX SESSION --------------------------------------------------
+void udpard_tx_refcount_inc(const udpard_bytes_t tx_payload_view)
+{
+ if (tx_payload_view.data != NULL) {
+ tx_frame_t* const frame = tx_frame_from_view(tx_payload_view);
+ UDPARD_ASSERT(frame->refcount > 0); // NOLINT(*ArrayBound)
+ // TODO: if C11 is enabled, use stdatomic here
+ frame->refcount++;
+ }
+}
-/// Checks if the given transfer should be accepted. If not, the transfer is freed.
-/// Internal states are updated.
-static bool rxSessionDeduplicate(struct UdpardInternalRxSession* const self,
- const UdpardMicrosecond transfer_id_timeout_usec,
- struct UdpardRxTransfer* const transfer,
- const RxMemory memory)
+void udpard_tx_refcount_dec(const udpard_bytes_t tx_payload_view)
{
- UDPARD_ASSERT((self != NULL) && (transfer != NULL));
- const bool future_tid = (self->last_transfer_id == TRANSFER_ID_UNSET) || //
- (transfer->transfer_id > self->last_transfer_id);
- const bool tid_timeout = (self->last_ts_usec == TIMESTAMP_UNSET) ||
- ((transfer->timestamp_usec >= self->last_ts_usec) &&
- ((transfer->timestamp_usec - self->last_ts_usec) >= transfer_id_timeout_usec));
- const bool accept = future_tid || tid_timeout;
- if (accept)
- {
- self->last_ts_usec = transfer->timestamp_usec;
- self->last_transfer_id = transfer->transfer_id;
+ if (tx_payload_view.data != NULL) {
+ tx_frame_t* const frame = tx_frame_from_view(tx_payload_view);
+ UDPARD_ASSERT(frame->refcount > 0); // NOLINT(*ArrayBound)
+ // TODO: if C11 is enabled, use stdatomic here
+ frame->refcount--;
+ if (frame->refcount == 0U) {
+ --*frame->objcount;
+ frame->deleter.vtable->free(frame->deleter.context, sizeof(tx_frame_t) + tx_payload_view.size, frame);
+ }
}
- else // This is a duplicate: received from another interface, a FEC retransmission, or a network glitch.
- {
- memFreePayload(memory.payload, transfer->payload.origin);
- rxFragmentDestroyList(transfer->payload.next, memory);
- transfer->payload_size = 0;
- transfer->payload = (struct UdpardFragment) {.next = NULL,
- .view = {.size = 0, .data = NULL},
- .origin = {.size = 0, .data = NULL}};
- }
- return accept;
-}
-
-/// Takes ownership of the frame payload buffer.
-static int_fast8_t rxSessionAccept(struct UdpardInternalRxSession* const self,
- const uint_fast8_t redundant_iface_index,
- const UdpardMicrosecond ts_usec,
- const RxFrame frame,
- const size_t extent,
- const UdpardMicrosecond transfer_id_timeout_usec,
- const RxMemory memory,
- struct UdpardRxTransfer* const out_transfer)
-{
- UDPARD_ASSERT((self != NULL) && (redundant_iface_index < UDPARD_NETWORK_INTERFACE_COUNT_MAX) &&
- (out_transfer != NULL));
- int_fast8_t result = rxIfaceAccept(&self->ifaces[redundant_iface_index],
- ts_usec,
- frame,
- extent,
- transfer_id_timeout_usec,
- memory,
- out_transfer);
- UDPARD_ASSERT(result <= 1);
- if (result > 0)
- {
- result = rxSessionDeduplicate(self, transfer_id_timeout_usec, out_transfer, memory) ? 1 : 0;
+}
+
+void udpard_tx_free(udpard_tx_t* const self)
+{
+ if (self != NULL) {
+ while (self->index_transfer != NULL) {
+ tx_transfer_t* tr = CAVL2_TO_OWNER(self->index_transfer, tx_transfer_t, index_transfer);
+ tx_transfer_retire(self, tr, false);
+ }
}
- return result;
}
-static void rxSessionInit(struct UdpardInternalRxSession* const self, const RxMemory memory)
+// ---------------------------------------------------------------------------------------------------------------------
+// --------------------------------------------- RX PIPELINE ---------------------------------------------
+// ---------------------------------------------------------------------------------------------------------------------
+//
+// The RX pipeline is a layered solution: PORT -> SESSION -> SLOT -> FRAGMENT TREE.
+//
+// Ports are created by the application per subject to subscribe to. There are various parameters defined per port,
+// such as the extent (max payload size to accept) and the reassembly mode (ORDERED, UNORDERED, STATELESS).
+//
+// Each port automatically dynamically creates a dedicated session per remote node that publishes on that subject
+// (unless the STATELESS mode is used, which is simple and limited). Sessions are automatically cleaned up and
+// removed when the remote node ceases to publish for a certain (large) timeout period.
+//
+// Each session holds RX_SLOT_COUNT slots for concurrent transfers from the same remote node on the same subject;
+// concurrent transfers may occur due to spontaneous datagram reordering or when the sender needs to emit a higher-
+// priority transfer while a lower-priority transfer is still ongoing (this is why there needs to be at least as many
+// slots as there are priority levels). Each slot accepts frames from all redundant network interfaces at once and
+// runs an efficient fragment tree reassembler to reconstruct the original transfer payload with automatic deduplication
+// and defragmentation; since all interfaces are pooled together, the reassembler is completely insensitive to
+// permanent or transient failure of any of the redundant interfaces; as long as at least one of them is able to
+// deliver frames, the link will function; further, transient packet loss in one of the interfaces does not affect
+// the overall reliability. The message reception machine always operates at the throughput and latency of the
+// best-performing interface at any given time with seamless failover.
+//
+// Each session keeps track of recently received/seen transfers, which is used for ack retransmission
+// if the remote end attempts to retransmit a transfer that was already fully received, and is also used for duplicate
+// rejection. In the ORDERED mode, late transfers (those arriving out of order past the reordering window closure)
+// are never acked, but they may still be received and acked by some other nodes in the network that were able to
+// accept them.
+//
+// Acks are transmitted immediately upon successful reception of a transfer. If the remote end retransmits the transfer
+// (e.g., if the first ack was lost or due to a spurious duplication), repeat acks are only retransmitted
+// for the first frame of the transfer because we don't want to flood the network with duplicate ACKs for every
+//
+// The redundant interfaces may have distinct MTUs, so the fragment offsets and sizes may vary significantly.
+// The reassembler decides if a newly arrived fragment is needed based on gap/overlap detection in the fragment tree.
+// An accepted fragment may overlap with neighboring fragments; however, the reassembler guarantees that no fragment is
+// fully contained within another fragment; this also implies that there are no fragments sharing the same offset,
+// and that fragments ordered by offset are also ordered by their ends.
+// The reassembler prefers to keep fewer large fragments over many small fragments to reduce the overhead of
+// managing the fragment tree and the amount of auxiliary memory required for it.
+//
+// The code here does a lot of linear lookups. This is intentional and is not expected to bring any performance issues
+// because all loops are tightly bounded with a compile-time known maximum number of iterations that is very small
+// in practice (e.g., number of slots per session, number of priority levels, number of interfaces). For small
+// number of iterations this is much faster than more sophisticated lookup structures.
+
+/// All but the transfer metadata: fields that change from frame to frame within the same transfer.
+typedef struct
{
- UDPARD_ASSERT(self != NULL);
- memZero(sizeof(*self), self);
- self->remote_node_id = UDPARD_NODE_ID_UNSET;
- self->last_ts_usec = TIMESTAMP_UNSET;
- self->last_transfer_id = TRANSFER_ID_UNSET;
- for (uint_fast8_t i = 0; i < UDPARD_NETWORK_INTERFACE_COUNT_MAX; i++)
- {
- rxIfaceInit(&self->ifaces[i], memory);
+ size_t offset; ///< Offset of this fragment's payload within the full transfer payload.
+ udpard_bytes_t payload; ///< Does not include the header, just pure payload.
+ udpard_bytes_mut_t origin; ///< The entirety of the free-able buffer passed from the application.
+ uint32_t crc; ///< CRC of all preceding payload bytes in the transfer plus this fragment's payload.
+} rx_frame_base_t;
+
+/// Full frame state.
+typedef struct rx_frame_t
+{
+ rx_frame_base_t base;
+ meta_t meta;
+} rx_frame_t;
+
+// --------------------------------------------- FRAGMENT TREE ---------------------------------------------
+
+/// Finds the number of contiguous payload bytes received from offset zero after accepting a new fragment.
+/// The transfer is considered fully received when covered_prefix >= min(extent, transfer_payload_size).
+/// This should be invoked after the fragment tree accepted a new fragment at frag_offset with frag_size.
+/// The complexity is amortized-logarithmic, worst case is linear in the number of frames in the transfer.
+static size_t rx_fragment_tree_update_covered_prefix(udpard_tree_t* const root,
+ const size_t old_prefix,
+ const size_t frag_offset,
+ const size_t frag_size)
+{
+ const size_t end = frag_offset + frag_size;
+ if ((frag_offset > old_prefix) || (end <= old_prefix)) {
+ return old_prefix; // The new fragment does not cross the frontier, so it cannot affect the prefix.
+ }
+ udpard_fragment_t* fr = (udpard_fragment_t*)cavl2_predecessor(root, &old_prefix, &cavl_compare_fragment_offset);
+ UDPARD_ASSERT(fr != NULL);
+ size_t out = old_prefix;
+ while ((fr != NULL) && (fr->offset <= out)) {
+ out = larger(out, fr->offset + fr->view.size);
+ fr = (udpard_fragment_t*)cavl2_next_greater(&fr->index_offset);
}
+ return out;
}
-/// Frees all ifaces in the session, all children in the session tree recursively, and destroys the session itself.
-/// The maximum recursion depth is ceil(1.44*log2(UDPARD_NODE_ID_MAX+1)-0.328) = 23 levels.
-// NOLINTNEXTLINE(*-no-recursion) MISRA C:2012 rule 17.2
-static void rxSessionDestroyTree(struct UdpardInternalRxSession* const self,
- const struct UdpardRxMemoryResources memory)
+/// If NULL, the payload ownership could not be transferred due to OOM. The caller still owns the payload.
+static udpard_fragment_t* rx_fragment_new(const udpard_mem_t memory,
+ const udpard_deleter_t payload_deleter,
+ const rx_frame_base_t frame)
+{
+ udpard_fragment_t* const mew = mem_alloc(memory, sizeof(udpard_fragment_t));
+ if (mew != NULL) {
+ mem_zero(sizeof(*mew), mew);
+ mew->index_offset = (udpard_tree_t){ NULL, { NULL, NULL }, 0 };
+ mew->offset = frame.offset;
+ mew->view.data = frame.payload.data;
+ mew->view.size = frame.payload.size;
+ mew->origin.data = frame.origin.data;
+ mew->origin.size = frame.origin.size;
+ mew->payload_deleter = payload_deleter;
+ }
+ return mew;
+}
+
+typedef enum
+{
+ rx_fragment_tree_rejected, ///< The newly received fragment was not needed for the tree and was freed.
+ rx_fragment_tree_accepted, ///< The newly received fragment was accepted into the tree, possibly replacing another.
+ rx_fragment_tree_done, ///< The newly received fragment completed the transfer; the caller must extract payload.
+ rx_fragment_tree_oom, ///< The fragment could not be accepted, but a possible future duplicate may work.
+} rx_fragment_tree_update_result_t;
+
+/// Takes ownership of the frame payload; either a new fragment is inserted or the payload is freed.
+static rx_fragment_tree_update_result_t rx_fragment_tree_update(udpard_tree_t** const root,
+ const udpard_mem_t fragment_memory,
+ const udpard_deleter_t payload_deleter,
+ const rx_frame_base_t frame,
+ const size_t transfer_payload_size,
+ const size_t extent,
+ size_t* const covered_prefix_io)
+{
+ const size_t left = frame.offset;
+ const size_t right = frame.offset + frame.payload.size;
+
+ // Ignore frames beyond the extent. Zero extent requires special handling because from the reassembler's
+ // view such transfers are useless, but we still want them.
+ if (((extent > 0) && (left >= extent)) || ((extent == 0) && (left > extent))) {
+ mem_free_payload(payload_deleter, frame.origin);
+ return rx_fragment_tree_rejected; // New fragment is beyond the extent, discard.
+ }
+
+ // Check if the new fragment is fully contained within an existing fragment, or is an exact replica of one.
+ // We discard those early to maintain an essential invariant of the fragment tree: no fully-contained fragments.
+ {
+ const udpard_fragment_t* const frag =
+ (udpard_fragment_t*)cavl2_predecessor(*root, &left, &cavl_compare_fragment_offset);
+ if ((frag != NULL) && ((frag->offset + frag->view.size) >= right)) {
+ mem_free_payload(payload_deleter, frame.origin);
+ return rx_fragment_tree_rejected; // New fragment is fully contained within an existing one, discard.
+ }
+ }
+
+ // Find the left and right neighbors, if any, with possible (likely) overlap. Consider new fragment X with A, B, C:
+ // |----X----|
+ // |--A--|
+ // |--B--|
+ // |--C--|
+ // Here, only A is the left neighbor, and only C is the right neighbor. B is a victim.
+ // If A.right >= C.left, then there is neither a gap nor a victim to remove.
+ //
+ // To find the left neighbor, we need to find the fragment crossing the left boundary whose offset is the smallest.
+ // To do that, we simply need to find the fragment with the smallest right boundary that is on the right of our
+ // left boundary. This works because by construction we guarantee that our tree has no fully-contained fragments,
+ // implying that ordering by left is also ordering by right.
+ //
+ // The right neighbor is found by analogy: find the fragment with the largest left boundary that is on the left
+ // of our right boundary. This guarantees that the new virtual right boundary will max out to the right.
+ const udpard_fragment_t* n_left = (udpard_fragment_t*)cavl2_lower_bound(*root, &left, &cavl_compare_fragment_end);
+ if ((n_left != NULL) && (n_left->offset >= left)) {
+ n_left = NULL; // There is no left neighbor.
+ }
+ const udpard_fragment_t* n_right =
+ (udpard_fragment_t*)cavl2_predecessor(*root, &right, &cavl_compare_fragment_offset);
+ if ((n_right != NULL) && ((n_right->offset + n_right->view.size) <= right)) {
+ n_right = NULL; // There is no right neighbor.
+ }
+ const size_t n_left_size = (n_left != NULL) ? n_left->view.size : 0U;
+ const size_t n_right_size = (n_right != NULL) ? n_right->view.size : 0U;
+
+ // Simple acceptance heuristic -- if the new fragment adds new payload, allows to eliminate a smaller fragment,
+ // or is larger than either neighbor, we accept it. The 'larger' condition is intended to allow
+ // eventual replacement of many small fragments with fewer large fragments.
+ // Consider the following scenario:
+ // |--A--|--B--|--C--|--D--| <-- small MTU set
+ // |---X---|---Y---|---Z---| <-- large MTU set
+ // Suppose we already have A..D received. Arrival of either X or Z allows eviction of A/D immediately.
+ // Arrival of Y does not allow an immediate eviction of any fragment, but if we had rejected it because it added
+ // no new coverage, we would miss the opportunity to evict B/C when X or Z arrive later. By this logic alone,
+ // we would also have to accept B and C if they were to arrive after X/Y/Z, which is however unnecessary because
+ // these fragments add no new information AND are smaller than the existing fragments, meaning that they offer
+ // no prospect of eventual defragmentation, so we reject them immediately.
+ const bool accept = (n_left == NULL) || (n_right == NULL) ||
+ ((n_left->offset + n_left->view.size) < n_right->offset) ||
+ (frame.payload.size > smaller(n_left_size, n_right_size));
+ if (!accept) {
+ mem_free_payload(payload_deleter, frame.origin);
+ return rx_fragment_tree_rejected; // New fragment is not expected to be useful.
+ }
+
+ // Ensure we can allocate the fragment header for the new frame before pruning the tree to avoid data loss.
+ udpard_fragment_t* const mew = rx_fragment_new(fragment_memory, payload_deleter, frame);
+ if (mew == NULL) {
+ mem_free_payload(payload_deleter, frame.origin);
+ return rx_fragment_tree_oom; // Cannot allocate fragment header. Maybe we will succeed later.
+ }
+
+ // The addition of a new fragment that joins adjacent fragments together into a larger contiguous block may
+ // render smaller fragments crossing its boundaries redundant.
+ // To check for that, we create a new virtual fragment that represents the new fragment together with those
+ // that join it on either end, if any, and then look for fragments contained within the virtual one.
+ // The virtual boundaries are adjusted by 1 to ensure that the neighbors themselves are not marked for eviction.
+ // Example:
+ // |--A--|--B--|
+ // |--X--|
+ // The addition of fragment A or B will render X redundant, even though it is not contained within either.
+ // This algorithm will detect that and mark X for removal.
+ const size_t v_left = smaller(left, (n_left == NULL) ? SIZE_MAX : (n_left->offset + 1U));
+ const size_t v_right =
+ larger(right, (n_right == NULL) ? 0 : (larger(n_right->offset + n_right->view.size, 1U) - 1U));
+ UDPARD_ASSERT((v_left <= left) && (right <= v_right));
+
+ // Remove all redundant fragments before inserting the new one.
+ // No need to repeat tree lookup at every iteration, we just step through the nodes using the next_greater lookup.
+ udpard_fragment_t* victim = (udpard_fragment_t*)cavl2_lower_bound(*root, &v_left, &cavl_compare_fragment_offset);
+ while ((victim != NULL) && (victim->offset >= v_left) && ((victim->offset + victim->view.size) <= v_right)) {
+ udpard_fragment_t* const next = (udpard_fragment_t*)cavl2_next_greater(&victim->index_offset);
+ cavl2_remove(root, &victim->index_offset);
+ mem_free_payload(victim->payload_deleter, victim->origin);
+ mem_free(fragment_memory, sizeof(udpard_fragment_t), victim);
+ victim = next;
+ }
+ // Insert the new fragment.
+ const udpard_tree_t* const res = cavl2_find_or_insert(root, //
+ &mew->offset,
+ &cavl_compare_fragment_offset,
+ &mew->index_offset,
+ &cavl2_trivial_factory);
+ UDPARD_ASSERT(res == &mew->index_offset);
+ (void)res;
+ // Update the covered prefix. This requires only a single full scan across all iterations!
+ *covered_prefix_io = rx_fragment_tree_update_covered_prefix(*root, //
+ *covered_prefix_io,
+ frame.offset,
+ frame.payload.size);
+ return (*covered_prefix_io >= smaller(extent, transfer_payload_size)) ? rx_fragment_tree_done
+ : rx_fragment_tree_accepted;
+}
+
+/// 1. Eliminates payload overlaps. They may appear if redundant interfaces with different MTU settings are used.
+/// 2. Verifies the end-to-end CRC of the full reassembled payload.
+/// Returns true iff the transfer is valid and safe to deliver to the application.
+/// Observe that this function alters the tree ordering keys, but it does not alter the tree topology,
+/// because each fragment's offset is changed within the bounds that preserve the ordering.
+static bool rx_fragment_tree_finalize(udpard_tree_t* const root, const uint32_t crc_expected)
+{
+ uint32_t crc_computed = CRC_INITIAL;
+ size_t offset = 0;
+ for (udpard_tree_t* p = cavl2_min(root); p != NULL; p = cavl2_next_greater(p)) {
+ udpard_fragment_t* const frag = (udpard_fragment_t*)p;
+ UDPARD_ASSERT(frag->offset <= offset); // The tree reassembler cannot leave gaps.
+ const size_t trim = offset - frag->offset;
+ // The tree reassembler evicts redundant fragments, so there must be some payload, unless the transfer is empty.
+ UDPARD_ASSERT((trim < frag->view.size) || ((frag->view.size == 0) && (trim == 0) && (offset == 0)));
+ frag->offset += trim;
+ frag->view.data = (const byte_t*)frag->view.data + trim;
+ frag->view.size -= trim;
+ offset += frag->view.size;
+ crc_computed = crc_add(crc_computed, frag->view.size, frag->view.data);
+ }
+ return (crc_computed ^ CRC_OUTPUT_XOR) == crc_expected;
+}
+
+// --------------------------------------------- SLOT ---------------------------------------------
+
+typedef enum
+{
+ rx_slot_idle = 0,
+ rx_slot_busy = 1,
+ rx_slot_done = 2,
+} rx_slot_state_t;
+
+/// Frames from all redundant interfaces are pooled into the same reassembly slot per transfer-ID.
+/// The redundant interfaces may use distinct MTU, which requires special fragment tree handling.
+typedef struct
{
- if (self != NULL)
- {
- for (uint_fast8_t i = 0; i < UDPARD_NETWORK_INTERFACE_COUNT_MAX; i++)
- {
- rxIfaceFree(&self->ifaces[i], (RxMemory) {.fragment = memory.fragment, .payload = memory.payload});
+ rx_slot_state_t state;
+
+ uint64_t transfer_id; ///< Which transfer we're reassembling here.
+
+ udpard_us_t ts_min; ///< Earliest frame timestamp, aka transfer reception timestamp.
+ udpard_us_t ts_max; ///< Latest frame timestamp, aka transfer completion timestamp.
+
+ size_t covered_prefix; ///< Number of bytes received contiguously from offset zero.
+ size_t total_size; ///< The total size of the transfer payload being transmitted (we may only use part of it).
+
+ size_t crc_end; ///< The end offset of the frame whose CRC is stored in `crc`.
+ uint32_t crc; ///< Once the reassembly is done, holds the CRC of the entire transfer.
+
+ udpard_prio_t priority;
+
+ udpard_tree_t* fragments;
+} rx_slot_t;
+
+static void rx_slot_reset(rx_slot_t* const slot, const udpard_mem_t fragment_memory)
+{
+ udpard_fragment_free_all((udpard_fragment_t*)slot->fragments, udpard_make_deleter(fragment_memory));
+ slot->fragments = NULL;
+ slot->state = rx_slot_idle;
+ slot->covered_prefix = 0U;
+ slot->crc_end = 0U;
+ slot->crc = CRC_INITIAL;
+}
+
+/// The caller will accept the ownership of the fragments iff the resulting state is done.
+static void rx_slot_update(rx_slot_t* const slot,
+ const udpard_us_t ts,
+ const udpard_mem_t fragment_memory,
+ const udpard_deleter_t payload_deleter,
+ rx_frame_t* const frame,
+ const size_t extent,
+ uint64_t* const errors_oom,
+ uint64_t* const errors_transfer_malformed)
+{
+ if (slot->state != rx_slot_busy) {
+ rx_slot_reset(slot, fragment_memory);
+ slot->state = rx_slot_busy;
+ slot->transfer_id = frame->meta.transfer_id;
+ slot->ts_min = ts;
+ slot->ts_max = ts;
+ // Some metadata is only needed to pass it over to the application once the transfer is done.
+ slot->total_size = frame->meta.transfer_payload_size;
+ slot->priority = frame->meta.priority;
+ }
+ // Enforce consistent per-frame values throughout the transfer.
+ if ((slot->total_size != frame->meta.transfer_payload_size) || (slot->priority != frame->meta.priority)) {
+ ++*errors_transfer_malformed;
+ mem_free_payload(payload_deleter, frame->base.origin);
+ rx_slot_reset(slot, fragment_memory);
+ return;
+ }
+ const rx_fragment_tree_update_result_t tree_res = rx_fragment_tree_update(&slot->fragments,
+ fragment_memory,
+ payload_deleter,
+ frame->base,
+ frame->meta.transfer_payload_size,
+ extent,
+ &slot->covered_prefix);
+ if ((tree_res == rx_fragment_tree_accepted) || (tree_res == rx_fragment_tree_done)) {
+ slot->ts_max = later(slot->ts_max, ts);
+ slot->ts_min = earlier(slot->ts_min, ts);
+ const size_t crc_end = frame->base.offset + frame->base.payload.size;
+ if (crc_end >= slot->crc_end) {
+ slot->crc_end = crc_end;
+ slot->crc = frame->base.crc;
}
- for (uint_fast8_t i = 0; i < 2; i++)
- {
- struct UdpardInternalRxSession* const child = (struct UdpardInternalRxSession*) (void*) self->base.lr[i];
- if (child != NULL)
- {
- UDPARD_ASSERT(child->base.up == &self->base);
- rxSessionDestroyTree(child, memory); // NOSONAR recursion
- }
+ }
+ if (tree_res == rx_fragment_tree_oom) {
+ ++*errors_oom;
+ }
+ if (tree_res == rx_fragment_tree_done) {
+ if (rx_fragment_tree_finalize(slot->fragments, slot->crc)) {
+ slot->state = rx_slot_done; // The caller will handle the completed transfer.
+ } else {
+ ++*errors_transfer_malformed;
+ rx_slot_reset(slot, fragment_memory);
}
- memFree(memory.session, sizeof(struct UdpardInternalRxSession), self);
}
}
-// -------------------------------------------------- RX PORT --------------------------------------------------
+// --------------------------------------------- SESSION & PORT ---------------------------------------------
-typedef struct
+/// The number of times `from` must be incremented (modulo 2^64) to reach `to`.
+static uint64_t rx_transfer_id_forward_distance(const uint64_t from, const uint64_t to) { return to - from; }
+
+/// Keep in mind that we have a dedicated session object per remote node per port; this means that the states
+/// kept here are specific per remote node, as it should be.
+typedef struct rx_session_t
{
- UdpardNodeID remote_node_id;
- struct UdpardRxMemoryResources memory;
-} RxPortSessionSearchContext;
+ udpard_tree_t index_remote_uid; ///< Must be the first member.
+ udpard_remote_t remote; ///< Most recent discovered reverse path for P2P to the sender.
+
+ udpard_rx_port_t* port;
+
+ /// Sessions interned for the reordering window closure.
+ udpard_tree_t index_reordering_window;
+ udpard_us_t reordering_window_deadline;
+
+ /// LRU last animated list for automatic retirement of stale sessions.
+ udpard_listed_t list_by_animation;
+ udpard_us_t last_animated_ts;
-static int_fast8_t rxPortSessionSearch(void* const user_reference, // NOSONAR non-const API
- const struct UdpardTreeNode* node)
+ /// Most recently received transfer-IDs, used for duplicate detection and ACK retransmission.
+ /// The index is always in [0,RX_TRANSFER_HISTORY_COUNT), pointing to the last added (newest) entry.
+ uint64_t history[RX_TRANSFER_HISTORY_COUNT];
+ uint_fast8_t history_current;
+
+ bool initialized; ///< Set after the first frame is seen.
+
+ rx_slot_t slots[RX_SLOT_COUNT];
+} rx_session_t;
+
+/// The reassembly strategy is composed once at initialization time by choosing a vtable with the desired behavior.
+typedef struct udpard_rx_port_vtable_private_t
{
- UDPARD_ASSERT((user_reference != NULL) && (node != NULL));
- return compare32(((const RxPortSessionSearchContext*) user_reference)->remote_node_id,
- ((const struct UdpardInternalRxSession*) (const void*) node)->remote_node_id);
-}
+ /// Takes ownership of the frame payload.
+ void (*accept)(udpard_rx_t*,
+ udpard_rx_port_t*,
+ udpard_us_t,
+ udpard_udpip_ep_t,
+ rx_frame_t*,
+ udpard_deleter_t,
+ uint_fast8_t);
+ /// Takes ownership of the frame payload.
+ void (*update_session)(rx_session_t*, udpard_rx_t*, udpard_us_t, rx_frame_t*, udpard_deleter_t);
+} udpard_rx_port_vtable_private_t;
-static struct UdpardTreeNode* rxPortSessionFactory(void* const user_reference) // NOSONAR non-const API
+/// True iff the given transfer-ID was recently ejected.
+static bool rx_session_is_transfer_ejected(const rx_session_t* const self, const uint64_t transfer_id)
{
- const RxPortSessionSearchContext* const ctx = (const RxPortSessionSearchContext*) user_reference;
- UDPARD_ASSERT((ctx != NULL) && (ctx->remote_node_id <= UDPARD_NODE_ID_MAX));
- struct UdpardTreeNode* out = NULL;
- struct UdpardInternalRxSession* const session =
- memAlloc(ctx->memory.session, sizeof(struct UdpardInternalRxSession));
- if (session != NULL)
- {
- rxSessionInit(session, (RxMemory) {.payload = ctx->memory.payload, .fragment = ctx->memory.fragment});
- session->remote_node_id = ctx->remote_node_id;
- out = &session->base;
- }
- return out; // OOM handled by the caller
-}
-
-/// Accepts a frame into a port, possibly creating a new session along the way.
-/// The frame shall not be anonymous. Takes ownership of the frame payload buffer.
-static int_fast8_t rxPortAccept(struct UdpardRxPort* const self,
- const uint_fast8_t redundant_iface_index,
- const UdpardMicrosecond ts_usec,
- const RxFrame frame,
- const struct UdpardRxMemoryResources memory,
- struct UdpardRxTransfer* const out_transfer)
-{
- UDPARD_ASSERT((self != NULL) && (redundant_iface_index < UDPARD_NETWORK_INTERFACE_COUNT_MAX) &&
- (out_transfer != NULL) && (frame.meta.src_node_id != UDPARD_NODE_ID_UNSET));
- int_fast8_t result = 0;
- struct UdpardInternalRxSession* const session = (struct UdpardInternalRxSession*) (void*)
- cavlSearch((struct UdpardTreeNode**) &self->sessions,
- &(RxPortSessionSearchContext) {.remote_node_id = frame.meta.src_node_id, .memory = memory},
- &rxPortSessionSearch,
- &rxPortSessionFactory);
- if (session != NULL)
- {
- UDPARD_ASSERT(session->remote_node_id == frame.meta.src_node_id);
- result = rxSessionAccept(session, // The callee takes ownership of the memory.
- redundant_iface_index,
- ts_usec,
- frame,
- self->extent,
- self->transfer_id_timeout_usec,
- (RxMemory) {.payload = memory.payload, .fragment = memory.fragment},
- out_transfer);
- }
- else // Failed to allocate a new session.
- {
- result = -UDPARD_ERROR_MEMORY;
- memFreePayload(memory.payload, frame.base.origin);
+ for (size_t i = 0; i < RX_TRANSFER_HISTORY_COUNT; i++) { // dear compiler, please unroll this loop
+ if (transfer_id == self->history[i]) {
+ return true;
+ }
}
- return result;
+ return false;
}
-/// A special case of rxPortAccept() for anonymous transfers. Accepts all transfers unconditionally.
-/// Does not allocate new memory. Takes ownership of the frame payload buffer.
-static int_fast8_t rxPortAcceptAnonymous(const UdpardMicrosecond ts_usec,
- const RxFrame frame,
- const struct UdpardMemoryDeleter memory,
- struct UdpardRxTransfer* const out_transfer)
+/// True iff the given transfer-ID is shortly before one of the recently ejected ones or equals one.
+/// In the ORDERED mode, this indicates that the transfer is late and can no longer be ejected.
+static bool rx_session_is_transfer_late_or_ejected(const rx_session_t* const self, const uint64_t transfer_id)
{
- UDPARD_ASSERT((out_transfer != NULL) && (frame.meta.src_node_id == UDPARD_NODE_ID_UNSET));
- int_fast8_t result = 0;
- const bool size_ok = frame.base.payload.size >= TRANSFER_CRC_SIZE_BYTES;
- const bool crc_ok =
- transferCRCCompute(frame.base.payload.size, frame.base.payload.data) == TRANSFER_CRC_RESIDUE_AFTER_OUTPUT_XOR;
- if (size_ok && crc_ok)
- {
- result = 1;
- memZero(sizeof(*out_transfer), out_transfer);
- // Copy relevant metadata from the frame. Remember that anonymous transfers are always single-frame.
- out_transfer->timestamp_usec = ts_usec;
- out_transfer->priority = frame.meta.priority;
- out_transfer->source_node_id = frame.meta.src_node_id;
- out_transfer->transfer_id = frame.meta.transfer_id;
- // Manually set up the transfer payload to point to the relevant slice inside the frame payload.
- out_transfer->payload.next = NULL;
- out_transfer->payload.view.size = frame.base.payload.size - TRANSFER_CRC_SIZE_BYTES;
- out_transfer->payload.view.data = frame.base.payload.data;
- out_transfer->payload.origin = frame.base.origin;
- out_transfer->payload_size = out_transfer->payload.view.size;
- }
- else
- {
- memFreePayload(memory, frame.base.origin);
+ for (size_t i = 0; i < RX_TRANSFER_HISTORY_COUNT; i++) {
+ if (rx_transfer_id_forward_distance(transfer_id, self->history[i]) < RX_TRANSFER_ORDERING_WINDOW) {
+ return true;
+ }
}
- return result;
+ return false;
}
-/// Accepts a raw frame and, if valid, passes it on to rxPortAccept() for further processing.
-/// Takes ownership of the frame payload buffer.
-static int_fast8_t rxPortAcceptFrame(struct UdpardRxPort* const self,
- const uint_fast8_t redundant_iface_index,
- const UdpardMicrosecond ts_usec,
- const struct UdpardMutablePayload datagram_payload,
- const struct UdpardRxMemoryResources memory,
- struct UdpardRxTransfer* const out_transfer)
+/// True iff the transfer is already received but is not yet ejected to maintain ordering. Only useful for ORDERED mode.
+static bool rx_session_is_transfer_interned(const rx_session_t* const self, const uint64_t transfer_id)
{
- int_fast8_t result = 0;
- RxFrame frame = {0};
- if (rxParseFrame(datagram_payload, &frame))
- {
- if (frame.meta.src_node_id != UDPARD_NODE_ID_UNSET)
- {
- result = rxPortAccept(self, redundant_iface_index, ts_usec, frame, memory, out_transfer);
- }
- else
- {
- result = rxPortAcceptAnonymous(ts_usec, frame, memory.payload, out_transfer);
+ for (size_t i = 0; i < RX_SLOT_COUNT; i++) {
+ if ((self->slots[i].state == rx_slot_done) && (self->slots[i].transfer_id == transfer_id)) {
+ return true;
}
}
- else // Malformed datagram or unsupported header version, drop.
- {
- memFreePayload(memory.payload, datagram_payload);
- }
- return result;
+ return false;
}
-static void rxPortInit(struct UdpardRxPort* const self)
+static int32_t cavl_compare_rx_session_by_remote_uid(const void* const user, const udpard_tree_t* const node)
{
- memZero(sizeof(*self), self);
- self->extent = SIZE_MAX; // Unlimited extent by default.
- self->transfer_id_timeout_usec = UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC;
- self->sessions = NULL;
+ const uint64_t uid_a = *(const uint64_t*)user;
+ const uint64_t uid_b = ((const rx_session_t*)(const void*)node)->remote.uid; // clang-format off
+ if (uid_a < uid_b) { return -1; }
+ if (uid_a > uid_b) { return +1; }
+ return 0; // clang-format on
}
-static void rxPortFree(struct UdpardRxPort* const self, const struct UdpardRxMemoryResources memory)
+// Key for reordering deadline ordering with stable tiebreaking.
+typedef struct
{
- rxSessionDestroyTree(self->sessions, memory);
- self->sessions = NULL;
-}
+ udpard_us_t deadline;
+ uint64_t remote_uid;
+} rx_reordering_key_t;
-static int_fast8_t rxRPCSearch(void* const user_reference, // NOSONAR Cavl API requires non-const.
- const struct UdpardTreeNode* node)
+// Compare sessions by reordering deadline then by remote UID.
+static int32_t cavl_compare_rx_session_by_reordering_deadline(const void* const user, const udpard_tree_t* const node)
{
- UDPARD_ASSERT((user_reference != NULL) && (node != NULL));
- return compare32(((const struct UdpardRxRPCPort*) user_reference)->service_id,
- ((const struct UdpardRxRPCPort*) (const void*) node)->service_id);
+ const rx_reordering_key_t* const key = (const rx_reordering_key_t*)user;
+ const rx_session_t* const ses = CAVL2_TO_OWNER(node, rx_session_t, index_reordering_window); // clang-format off
+ if (key->deadline < ses->reordering_window_deadline) { return -1; }
+ if (key->deadline > ses->reordering_window_deadline) { return +1; }
+ if (key->remote_uid < ses->remote.uid) { return -1; }
+ if (key->remote_uid > ses->remote.uid) { return +1; }
+ return 0; // clang-format on
}
-static int_fast8_t rxRPCSearchByServiceID(void* const user_reference, // NOSONAR Cavl API requires non-const.
- const struct UdpardTreeNode* node)
+typedef struct
{
- UDPARD_ASSERT((user_reference != NULL) && (node != NULL));
- return compare32(*(const UdpardPortID*) user_reference,
- ((const struct UdpardRxRPCPort*) (const void*) node)->service_id);
+ udpard_rx_port_t* owner;
+ udpard_list_t* sessions_by_animation;
+ uint64_t remote_uid;
+ udpard_us_t now;
+} rx_session_factory_args_t;
+
+static udpard_tree_t* cavl_factory_rx_session_by_remote_uid(void* const user)
+{
+ const rx_session_factory_args_t* const args = (const rx_session_factory_args_t*)user;
+ rx_session_t* const out = mem_alloc(args->owner->memory.session, sizeof(rx_session_t));
+ if (out != NULL) {
+ mem_zero(sizeof(*out), out);
+ out->index_remote_uid = (udpard_tree_t){ NULL, { NULL, NULL }, 0 };
+ out->index_reordering_window = (udpard_tree_t){ NULL, { NULL, NULL }, 0 };
+ out->reordering_window_deadline = BIG_BANG;
+ out->list_by_animation = (udpard_listed_t){ NULL, NULL };
+ for (size_t i = 0; i < RX_SLOT_COUNT; i++) {
+ out->slots[i].fragments = NULL;
+ rx_slot_reset(&out->slots[i], args->owner->memory.fragment);
+ }
+ out->remote.uid = args->remote_uid;
+ out->port = args->owner;
+ out->last_animated_ts = args->now;
+ out->history_current = 0;
+ out->initialized = false;
+ enlist_head(args->sessions_by_animation, &out->list_by_animation);
+ }
+ return (udpard_tree_t*)out;
}
-// -------------------------------------------------- RX API --------------------------------------------------
-
-void udpardRxFragmentFree(const struct UdpardFragment head,
- const struct UdpardMemoryResource memory_fragment,
- const struct UdpardMemoryDeleter memory_payload)
+/// Removes the instance from all indexes and frees all associated memory.
+static void rx_session_free(rx_session_t* const self,
+ udpard_list_t* const sessions_by_animation,
+ udpard_tree_t** const sessions_by_reordering)
{
- // The head is not heap-allocated so not freed.
- memFreePayload(memory_payload, head.origin); // May be NULL, is okay.
- rxFragmentDestroyList(head.next, (RxMemory) {.fragment = memory_fragment, .payload = memory_payload});
+ for (size_t i = 0; i < RX_SLOT_COUNT; i++) {
+ rx_slot_reset(&self->slots[i], self->port->memory.fragment);
+ }
+ cavl2_remove(&self->port->index_session_by_remote_uid, &self->index_remote_uid);
+ (void)cavl2_remove_if(sessions_by_reordering, &self->index_reordering_window);
+ delist(sessions_by_animation, &self->list_by_animation);
+ mem_free(self->port->memory.session, sizeof(rx_session_t), self);
}
-int_fast8_t udpardRxSubscriptionInit(struct UdpardRxSubscription* const self,
- const UdpardPortID subject_id,
- const size_t extent,
- const struct UdpardRxMemoryResources memory)
+/// The payload ownership is transferred to the application. The history log and the window will be updated.
+static void rx_session_eject(rx_session_t* const self, udpard_rx_t* const rx, rx_slot_t* const slot)
{
- int_fast8_t result = -UDPARD_ERROR_ARGUMENT;
- if ((self != NULL) && (subject_id <= UDPARD_SUBJECT_ID_MAX) && rxValidateMemoryResources(memory))
- {
- memZero(sizeof(*self), self);
- rxPortInit(&self->port);
- self->port.extent = extent;
- self->udp_ip_endpoint = makeSubjectUDPIPEndpoint(subject_id);
- self->memory = memory;
- result = 0;
+ UDPARD_ASSERT(slot->state == rx_slot_done);
+
+ // Update the history -- overwrite the oldest entry.
+ self->history_current = (self->history_current + 1U) % RX_TRANSFER_HISTORY_COUNT;
+ self->history[self->history_current] = slot->transfer_id;
+
+ // Construct the arguments and invoke the callback.
+ const udpard_rx_transfer_t transfer = {
+ .timestamp = slot->ts_min,
+ .priority = slot->priority,
+ .transfer_id = slot->transfer_id,
+ .remote = self->remote,
+ .payload_size_stored = slot->covered_prefix,
+ .payload_size_wire = slot->total_size,
+ .payload = (udpard_fragment_t*)slot->fragments,
+ };
+ self->port->vtable->on_message(rx, self->port, transfer);
+
+ // Finally, reset the slot.
+ slot->fragments = NULL; // Transfer ownership to the application.
+ rx_slot_reset(slot, self->port->memory.fragment);
+}
+
+/// In the ORDERED mode, checks which slots can be ejected or interned in the reordering window.
+/// This is only useful for the ORDERED mode. This mode is much more complex and CPU-heavy than the UNORDERED mode.
+/// Should be invoked whenever a slot MAY or MUST be ejected (i.e., on completion or when an empty slot is required).
+/// If the force flag is set, at least one DONE slot will be ejected even if its reordering window is still open;
+/// this is used to forcibly free up at least one slot when no slot is idle and a new transfer arrives.
+static void rx_session_ordered_scan_slots(rx_session_t* const self,
+ udpard_rx_t* const rx,
+ const udpard_us_t ts,
+ const bool force_one)
+{
+ // Reset the reordering window timer because we will either eject everything or arm it again later.
+ if (cavl2_remove_if(&rx->index_session_by_reordering, &self->index_reordering_window)) {
+ self->reordering_window_deadline = BIG_BANG;
+ }
+ // We need to repeat the scan because each ejection may open up the window for the next in-sequence transfer.
+ for (size_t iter = 0; iter < RX_SLOT_COUNT; iter++) {
+ // Find the slot closest to the next in-sequence transfer-ID.
+ const uint64_t tid_expected = self->history[self->history_current] + 1U;
+ uint64_t min_tid_dist = UINT64_MAX;
+ rx_slot_t* slot = NULL;
+ for (size_t i = 0; i < RX_SLOT_COUNT; i++) {
+ const uint64_t dist = rx_transfer_id_forward_distance(tid_expected, self->slots[i].transfer_id);
+ if ((self->slots[i].state == rx_slot_done) && (dist < min_tid_dist)) {
+ min_tid_dist = dist;
+ slot = &self->slots[i];
+ if (dist == 0) {
+ break; // Fast path for a common case.
+ }
+ }
+ }
+ // The slot needs to be ejected if it's in-sequence, if it's reordering window is closed, or if we're
+ // asked to force an ejection and we haven't done so yet.
+ // The reordering window timeout implies that earlier transfers will be dropped if ORDERED mode is used.
+ const bool eject =
+ (slot != NULL) && ((slot->transfer_id == tid_expected) ||
+ (ts >= (slot->ts_min + self->port->reordering_window)) || (force_one && (iter == 0)));
+ if (!eject) {
+ // The slot is done but cannot be ejected yet; arm the reordering window timer.
+ // There may be transfers with future (more distant) transfer-IDs with an earlier reordering window
+ // closure deadline, but we ignore them because the nearest transfer overrides the more distant ones.
+ if (slot != NULL) {
+ self->reordering_window_deadline = slot->ts_min + self->port->reordering_window;
+ // Insert into reordering index with deterministic tie-breaking.
+ const rx_reordering_key_t key = { .deadline = self->reordering_window_deadline,
+ .remote_uid = self->remote.uid };
+ const udpard_tree_t* res = cavl2_find_or_insert(&rx->index_session_by_reordering, //----------------
+ &key,
+ &cavl_compare_rx_session_by_reordering_deadline,
+ &self->index_reordering_window,
+ &cavl2_trivial_factory);
+ UDPARD_ASSERT(res == &self->index_reordering_window);
+ (void)res;
+ }
+ break; // No more slots can be ejected at this time.
+ }
+ // We always pick the next transfer to eject with the nearest transfer-ID, which guarantees that the other
+ // DONE transfers will not end up being late.
+ // Some of the in-progress slots may be obsoleted by this move, which will be taken care of later.
+ UDPARD_ASSERT((slot != NULL) && (slot->state == rx_slot_done));
+ rx_session_eject(self, rx, slot);
+ }
+ // Ensure that in-progress slots, if any, have not ended up within the accepted window after the update.
+ // We can release them early to avoid holding the payload buffers that won't be used anyway.
+ for (size_t i = 0; i < RX_SLOT_COUNT; i++) {
+ rx_slot_t* const slot = &self->slots[i];
+ if ((slot->state == rx_slot_busy) && rx_session_is_transfer_late_or_ejected(self, slot->transfer_id)) {
+ rx_slot_reset(slot, self->port->memory.fragment);
+ }
}
- return result;
}
-void udpardRxSubscriptionFree(struct UdpardRxSubscription* const self)
+/// Finds an existing in-progress slot with the specified transfer-ID, or allocates a new one.
+/// Allocation always succeeds so the result is never NULL, but it may cause early ejection of an interned DONE slot.
+/// THIS IS POTENTIALLY DESTRUCTIVE IN THE ORDERED MODE because it may force an early reordering window closure.
+static rx_slot_t* rx_session_get_slot(rx_session_t* const self,
+ udpard_rx_t* const rx,
+ const udpard_us_t ts,
+ const uint64_t transfer_id)
{
- if (self != NULL)
- {
- rxPortFree(&self->port, self->memory);
+ // First, check if one is in progress already; resume it if so.
+ for (size_t i = 0; i < RX_SLOT_COUNT; i++) {
+ if ((self->slots[i].state == rx_slot_busy) && (self->slots[i].transfer_id == transfer_id)) {
+ return &self->slots[i];
+ }
+ }
+ // Use this opportunity to check for timed-out in-progress slots. This may free up a slot for the search below.
+ for (size_t i = 0; i < RX_SLOT_COUNT; i++) {
+ if ((self->slots[i].state == rx_slot_busy) && (ts >= (self->slots[i].ts_max + SESSION_LIFETIME))) {
+ rx_slot_reset(&self->slots[i], self->port->memory.fragment);
+ }
}
+ // This appears to be a new transfer, so we will need to allocate a new slot for it.
+ for (size_t i = 0; i < RX_SLOT_COUNT; i++) {
+ if (self->slots[i].state == rx_slot_idle) {
+ return &self->slots[i];
+ }
+ }
+ // All slots are currently occupied; find the oldest slot to sacrifice, which may be busy or done.
+ rx_slot_t* slot = NULL;
+ udpard_us_t oldest_ts = HEAT_DEATH;
+ for (size_t i = 0; i < RX_SLOT_COUNT; i++) {
+ UDPARD_ASSERT(self->slots[i].state != rx_slot_idle); // Checked this already.
+ if (self->slots[i].ts_max < oldest_ts) {
+ oldest_ts = self->slots[i].ts_max;
+ slot = &self->slots[i];
+ }
+ }
+ UDPARD_ASSERT((slot != NULL) && ((slot->state == rx_slot_busy) || (slot->state == rx_slot_done)));
+ // If it's busy, it is probably just a stale transfer, so it's a no-brainer to evict it.
+ // If it's done, we have to force the reordering window to close early to free up a slot without transfer loss.
+ if (slot->state == rx_slot_busy) {
+ rx_slot_reset(slot, self->port->memory.fragment); // Just a stale transfer, it's probably dead anyway.
+ } else {
+ UDPARD_ASSERT(slot->state == rx_slot_done);
+ // The oldest slot is DONE; we cannot just reset it, we must force an early ejection.
+ // The slot to eject will be chosen based on the transfer-ID, which may not be the oldest slot.
+ // Then we repeat the search looking for any IDLE slot, which must succeed now.
+ rx_session_ordered_scan_slots(self, rx, ts, true); // A slot will be ejected (we don't know which one).
+ slot = NULL;
+ for (size_t i = 0; i < RX_SLOT_COUNT; i++) {
+ if (self->slots[i].state == rx_slot_idle) {
+ slot = &self->slots[i];
+ break;
+ }
+ }
+ }
+ UDPARD_ASSERT((slot != NULL) && (slot->state == rx_slot_idle));
+ return slot;
}
-int_fast8_t udpardRxSubscriptionReceive(struct UdpardRxSubscription* const self,
- const UdpardMicrosecond timestamp_usec,
- const struct UdpardMutablePayload datagram_payload,
- const uint_fast8_t redundant_iface_index,
- struct UdpardRxTransfer* const out_transfer)
-{
- int_fast8_t result = -UDPARD_ERROR_ARGUMENT;
- if ((self != NULL) && (timestamp_usec != TIMESTAMP_UNSET) && (datagram_payload.data != NULL) &&
- (redundant_iface_index < UDPARD_NETWORK_INTERFACE_COUNT_MAX) && (out_transfer != NULL))
- {
- result = rxPortAcceptFrame(&self->port,
- redundant_iface_index,
- timestamp_usec,
- datagram_payload,
- self->memory,
- out_transfer);
- }
- else if (self != NULL)
- {
- memFreePayload(self->memory.payload, datagram_payload);
+static void rx_session_update(rx_session_t* const self,
+ udpard_rx_t* const rx,
+ const udpard_us_t ts,
+ const udpard_udpip_ep_t src_ep,
+ rx_frame_t* const frame,
+ const udpard_deleter_t payload_deleter,
+ const uint_fast8_t ifindex)
+{
+ UDPARD_ASSERT(self->remote.uid == frame->meta.sender_uid);
+ UDPARD_ASSERT(frame->meta.topic_hash == self->port->topic_hash); // must be checked by the caller beforehand
+
+ // Animate the session to prevent it from being retired.
+ enlist_head(&rx->list_session_by_animation, &self->list_by_animation);
+ self->last_animated_ts = ts;
+
+ // Update the return path discovery state.
+ // We identify nodes by their UID, allowing them to migrate across interfaces and IP addresses.
+ UDPARD_ASSERT(ifindex < UDPARD_IFACE_COUNT_MAX);
+ self->remote.endpoints[ifindex] = src_ep;
+
+ // Do-once initialization to ensure we don't lose any transfers by choosing the initial transfer-ID poorly.
+ // Any transfers with prior transfer-ID values arriving later will be rejected, which is acceptable.
+ if (!self->initialized) {
+ self->initialized = true;
+ self->history_current = 0;
+ for (size_t i = 0; i < RX_TRANSFER_HISTORY_COUNT; i++) {
+ self->history[i] = frame->meta.transfer_id - 1U;
+ }
}
- else
- {
- (void) 0;
+ self->port->vtable_private->update_session(self, rx, ts, frame, payload_deleter);
+}
+
+/// The ORDERED mode implementation. May delay incoming transfers to maintain strict transfer-ID ordering.
+/// The ORDERED mode is much more complex and CPU-heavy.
+static void rx_session_update_ordered(rx_session_t* const self,
+ udpard_rx_t* const rx,
+ const udpard_us_t ts,
+ rx_frame_t* const frame,
+ const udpard_deleter_t payload_deleter)
+{
+ // The queries here may be a bit time-consuming. If this becomes a problem, there are many ways to optimize this.
+ const bool is_ejected = rx_session_is_transfer_ejected(self, frame->meta.transfer_id);
+ const bool is_late_or_ejected = rx_session_is_transfer_late_or_ejected(self, frame->meta.transfer_id);
+ const bool is_interned = rx_session_is_transfer_interned(self, frame->meta.transfer_id);
+ const bool is_new = !is_late_or_ejected && !is_interned;
+ if (is_new) {
+ rx_slot_t* const slot = rx_session_get_slot(self, rx, ts, frame->meta.transfer_id);
+ UDPARD_ASSERT((slot != NULL) && (slot->state != rx_slot_done));
+ UDPARD_ASSERT((slot->state == rx_slot_idle) ||
+ ((slot->state == rx_slot_busy) && (slot->transfer_id == frame->meta.transfer_id)));
+ rx_slot_update(slot,
+ ts,
+ self->port->memory.fragment,
+ payload_deleter,
+ frame,
+ self->port->extent,
+ &rx->errors_oom,
+ &rx->errors_transfer_malformed);
+ if (slot->state == rx_slot_done) {
+ UDPARD_ASSERT(rx_session_is_transfer_interned(self, slot->transfer_id));
+ if (frame->meta.flag_reliable) {
+ // Payload view: ((udpard_fragment_t*)cavl2_min(slot->fragments))->view
+ tx_send_ack(rx, ts, slot->priority, self->port->topic_hash, slot->transfer_id, self->remote);
+ }
+ rx_session_ordered_scan_slots(self, rx, ts, false);
+ }
+ } else { // retransmit ACK if needed
+ // Note: transfers that are no longer retained in the history will not solicit an ACK response,
+ // meaning that the sender will not get a confirmation if the retransmitted transfer is too old.
+ // We assume that RX_TRANSFER_HISTORY_COUNT is enough to cover all sensible use cases.
+ if ((is_interned || is_ejected) && frame->meta.flag_reliable && (frame->base.offset == 0U)) {
+ // Payload view: frame->base.payload
+ tx_send_ack(rx, ts, frame->meta.priority, self->port->topic_hash, frame->meta.transfer_id, self->remote);
+ }
+ mem_free_payload(payload_deleter, frame->base.origin);
+ }
+}
+
+/// The UNORDERED mode implementation. Ejects every transfer immediately upon completion without delay.
+/// The reordering timer is not used.
+static void rx_session_update_unordered(rx_session_t* const self,
+ udpard_rx_t* const rx,
+ const udpard_us_t ts,
+ rx_frame_t* const frame,
+ const udpard_deleter_t payload_deleter)
+{
+ UDPARD_ASSERT(self->port->mode == udpard_rx_unordered);
+ UDPARD_ASSERT(self->port->reordering_window == 0);
+ // We do not check interned transfers because in the UNORDERED mode they are never interned, always ejected ASAP.
+ // We don't care about the ordering, either; we just accept anything that looks new.
+ if (!rx_session_is_transfer_ejected(self, frame->meta.transfer_id)) {
+ rx_slot_t* const slot = rx_session_get_slot(self, rx, ts, frame->meta.transfer_id); // new or continuation
+ UDPARD_ASSERT((slot != NULL) && (slot->state != rx_slot_done));
+ UDPARD_ASSERT((slot->state == rx_slot_idle) ||
+ ((slot->state == rx_slot_busy) && (slot->transfer_id == frame->meta.transfer_id)));
+ rx_slot_update(slot,
+ ts,
+ self->port->memory.fragment,
+ payload_deleter,
+ frame,
+ self->port->extent,
+ &rx->errors_oom,
+ &rx->errors_transfer_malformed);
+ if (slot->state == rx_slot_done) {
+ if (frame->meta.flag_reliable) { // Payload view: ((udpard_fragment_t*)cavl2_min(slot->fragments))->view
+ tx_send_ack(rx, ts, slot->priority, self->port->topic_hash, slot->transfer_id, self->remote);
+ }
+ rx_session_eject(self, rx, slot);
+ }
+ } else { // retransmit ACK if needed
+ if (frame->meta.flag_reliable && (frame->base.offset == 0U)) { // Payload view: frame->base.payload
+ UDPARD_ASSERT(rx_session_is_transfer_ejected(self, frame->meta.transfer_id));
+ tx_send_ack(rx, ts, frame->meta.priority, self->port->topic_hash, frame->meta.transfer_id, self->remote);
+ }
+ mem_free_payload(payload_deleter, frame->base.origin);
+ }
+}
+
+/// The stateful strategy maintains a dedicated session per remote node, indexed in a fast AVL tree.
+static void rx_port_accept_stateful(udpard_rx_t* const rx,
+ udpard_rx_port_t* const port,
+ const udpard_us_t timestamp,
+ const udpard_udpip_ep_t source_ep,
+ rx_frame_t* const frame,
+ const udpard_deleter_t payload_deleter,
+ const uint_fast8_t iface_index)
+{
+ rx_session_factory_args_t fac_args = { .owner = port,
+ .sessions_by_animation = &rx->list_session_by_animation,
+ .remote_uid = frame->meta.sender_uid,
+ .now = timestamp };
+ rx_session_t* const ses = // Will find an existing one or create a new one.
+ (rx_session_t*)(void*)cavl2_find_or_insert(&port->index_session_by_remote_uid,
+ &frame->meta.sender_uid,
+ &cavl_compare_rx_session_by_remote_uid,
+ &fac_args,
+ &cavl_factory_rx_session_by_remote_uid);
+ if (ses != NULL) {
+ rx_session_update(ses, rx, timestamp, source_ep, frame, payload_deleter, iface_index);
+ } else {
+ mem_free_payload(payload_deleter, frame->base.origin);
+ ++rx->errors_oom;
+ }
+}
+
+/// The stateless strategy accepts only single-frame transfers and does not maintain any session state.
+/// It could be trivially extended to fallback to UNORDERED when multi-frame transfers are detected.
+static void rx_port_accept_stateless(udpard_rx_t* const rx,
+ udpard_rx_port_t* const port,
+ const udpard_us_t timestamp,
+ const udpard_udpip_ep_t source_ep,
+ rx_frame_t* const frame,
+ const udpard_deleter_t payload_deleter,
+ const uint_fast8_t iface_index)
+{
+ const size_t required_size = smaller(port->extent, frame->meta.transfer_payload_size);
+ const bool full_transfer = (frame->base.offset == 0) && (frame->base.payload.size >= required_size);
+ if (full_transfer) {
+ // The fragment allocation is only needed to uphold the callback protocol.
+ // Maybe we could do something about it in the future to avoid this allocation.
+ udpard_fragment_t* const frag = rx_fragment_new(port->memory.fragment, payload_deleter, frame->base);
+ if (frag != NULL) {
+ udpard_remote_t remote = { .uid = frame->meta.sender_uid };
+ remote.endpoints[iface_index] = source_ep;
+ // The CRC is validated by the frame parser for the first frame of any transfer. It is certainly correct.
+ UDPARD_ASSERT(frame->base.crc == crc_full(frame->base.payload.size, frame->base.payload.data));
+ const udpard_rx_transfer_t transfer = {
+ .timestamp = timestamp,
+ .priority = frame->meta.priority,
+ .transfer_id = frame->meta.transfer_id,
+ .remote = remote,
+ .payload_size_stored = required_size,
+ .payload_size_wire = frame->meta.transfer_payload_size,
+ .payload = frag,
+ };
+ port->vtable->on_message(rx, port, transfer);
+ } else {
+ mem_free_payload(payload_deleter, frame->base.origin);
+ ++rx->errors_oom;
+ }
+ } else {
+ mem_free_payload(payload_deleter, frame->base.origin);
+ ++rx->errors_transfer_malformed; // The stateless mode expects only single-frame transfers.
}
- return result;
}
-int_fast8_t udpardRxRPCDispatcherInit(struct UdpardRxRPCDispatcher* const self,
- const struct UdpardRxMemoryResources memory)
+static const udpard_rx_port_vtable_private_t rx_port_vtb_ordered = { .accept = rx_port_accept_stateful,
+ .update_session = rx_session_update_ordered };
+static const udpard_rx_port_vtable_private_t rx_port_vtb_unordered = { .accept = rx_port_accept_stateful,
+ .update_session = rx_session_update_unordered };
+static const udpard_rx_port_vtable_private_t rx_port_vtb_stateless = { .accept = rx_port_accept_stateless,
+ .update_session = NULL };
+
+// --------------------------------------------- RX PUBLIC API ---------------------------------------------
+
+static bool rx_validate_mem_resources(const udpard_rx_mem_resources_t memory)
{
- int_fast8_t result = -UDPARD_ERROR_ARGUMENT;
- if ((self != NULL) && rxValidateMemoryResources(memory))
- {
- memZero(sizeof(*self), self);
- self->local_node_id = UDPARD_NODE_ID_UNSET;
- self->memory = memory;
- self->request_ports = NULL;
- self->response_ports = NULL;
- result = 0;
- }
- return result;
+ return (memory.session.vtable != NULL) && (memory.session.vtable->base.free != NULL) &&
+ (memory.session.vtable->alloc != NULL) && //
+ (memory.fragment.vtable != NULL) && (memory.fragment.vtable->base.free != NULL) &&
+ (memory.fragment.vtable->alloc != NULL);
}
-int_fast8_t udpardRxRPCDispatcherStart(struct UdpardRxRPCDispatcher* const self,
- const UdpardNodeID local_node_id,
- struct UdpardUDPIPEndpoint* const out_udp_ip_endpoint)
+void udpard_rx_new(udpard_rx_t* const self, udpard_tx_t* const tx)
{
- int_fast8_t result = -UDPARD_ERROR_ARGUMENT;
- if ((self != NULL) && (out_udp_ip_endpoint != NULL) && (local_node_id <= UDPARD_NODE_ID_MAX) &&
- (self->local_node_id > UDPARD_NODE_ID_MAX))
- {
- self->local_node_id = local_node_id;
- *out_udp_ip_endpoint = makeServiceUDPIPEndpoint(local_node_id);
- result = 0;
- }
- return result;
+ UDPARD_ASSERT(self != NULL);
+ mem_zero(sizeof(*self), self);
+ self->list_session_by_animation = (udpard_list_t){ NULL, NULL };
+ self->index_session_by_reordering = NULL;
+ self->errors_oom = 0;
+ self->errors_frame_malformed = 0;
+ self->errors_transfer_malformed = 0;
+ self->tx = tx;
+ self->user = NULL;
}
-int_fast8_t udpardRxRPCDispatcherListen(struct UdpardRxRPCDispatcher* const self,
- struct UdpardRxRPCPort* const port,
- const UdpardPortID service_id,
- const bool is_request,
- const size_t extent)
+void udpard_rx_poll(udpard_rx_t* const self, const udpard_us_t now)
{
- int_fast8_t result = -UDPARD_ERROR_ARGUMENT;
- if ((self != NULL) && (port != NULL) && (service_id <= UDPARD_SERVICE_ID_MAX))
- {
- const int_fast8_t cancel_result = udpardRxRPCDispatcherCancel(self, service_id, is_request);
- UDPARD_ASSERT((cancel_result == 0) || (cancel_result == 1)); // We already checked the arguments.
- memZero(sizeof(*port), port);
- port->service_id = service_id;
- rxPortInit(&port->port);
- port->port.extent = extent;
- port->user_reference = NULL;
- // Insert the newly initialized service into the tree.
- const struct UdpardTreeNode* const item = cavlSearch(is_request ? &self->request_ports : &self->response_ports,
- port,
- &rxRPCSearch,
- &avlTrivialFactory);
- UDPARD_ASSERT((item != NULL) && (item == &port->base));
- (void) item;
- result = (cancel_result > 0) ? 0 : 1;
- }
- return result;
-}
-
-int_fast8_t udpardRxRPCDispatcherCancel(struct UdpardRxRPCDispatcher* const self,
- const UdpardPortID service_id,
- const bool is_request)
-{
- int_fast8_t result = -UDPARD_ERROR_ARGUMENT;
- if ((self != NULL) && (service_id <= UDPARD_SERVICE_ID_MAX))
+ // Retire timed out sessions. We retire at most one per poll to avoid burstiness -- session retirement
+ // may potentially free up a lot of memory at once.
{
- UdpardPortID service_id_mutable = service_id;
- struct UdpardTreeNode** const root = is_request ? &self->request_ports : &self->response_ports;
- struct UdpardRxRPCPort* const item =
- (struct UdpardRxRPCPort*) (void*) cavlSearch(root, &service_id_mutable, &rxRPCSearchByServiceID, NULL);
- if (item != NULL)
- {
- cavlRemove(root, &item->base);
- rxPortFree(&item->port, self->memory);
+ rx_session_t* const ses = LIST_TAIL(self->list_session_by_animation, rx_session_t, list_by_animation);
+ if ((ses != NULL) && (now >= (ses->last_animated_ts + SESSION_LIFETIME))) {
+ rx_session_free(ses, &self->list_session_by_animation, &self->index_session_by_reordering);
}
- result = (item == NULL) ? 0 : 1;
}
- return result;
+ // Process reordering window timeouts.
+ // We may process more than one to minimize transfer delays; this is also expected to be quick.
+ while (true) {
+ rx_session_t* const ses =
+ CAVL2_TO_OWNER(cavl2_min(self->index_session_by_reordering), rx_session_t, index_reordering_window);
+ if ((ses == NULL) || (now < ses->reordering_window_deadline)) {
+ break;
+ }
+ rx_session_ordered_scan_slots(ses, self, now, false);
+ }
+}
+
+bool udpard_rx_port_new(udpard_rx_port_t* const self,
+ const uint64_t topic_hash,
+ const size_t extent,
+ const udpard_rx_mode_t mode,
+ const udpard_us_t reordering_window,
+ const udpard_rx_mem_resources_t memory,
+ const udpard_rx_port_vtable_t* const vtable)
+{
+ bool ok = (self != NULL) && rx_validate_mem_resources(memory) && (reordering_window >= 0) && (vtable != NULL) &&
+ (vtable->on_message != NULL);
+ if (ok) {
+ mem_zero(sizeof(*self), self);
+ self->topic_hash = topic_hash;
+ self->extent = extent;
+ self->mode = mode;
+ self->memory = memory;
+ self->index_session_by_remote_uid = NULL;
+ self->vtable = vtable;
+ self->user = NULL;
+ switch (mode) {
+ case udpard_rx_stateless:
+ self->vtable_private = &rx_port_vtb_stateless;
+ self->reordering_window = 0;
+ break;
+ case udpard_rx_unordered:
+ self->vtable_private = &rx_port_vtb_unordered;
+ self->reordering_window = 0;
+ break;
+ case udpard_rx_ordered:
+ self->vtable_private = &rx_port_vtb_ordered;
+ self->reordering_window = reordering_window;
+ UDPARD_ASSERT(self->reordering_window >= 0);
+ break;
+ default:
+ ok = false;
+ }
+ }
+ return ok;
}
-int_fast8_t udpardRxRPCDispatcherReceive(struct UdpardRxRPCDispatcher* const self,
- const UdpardMicrosecond timestamp_usec,
- const struct UdpardMutablePayload datagram_payload,
- const uint_fast8_t redundant_iface_index,
- struct UdpardRxRPCPort** const out_port,
- struct UdpardRxRPCTransfer* const out_transfer)
+void udpard_rx_port_free(udpard_rx_t* const rx, udpard_rx_port_t* const port)
{
- bool release = true;
- int_fast8_t result = -UDPARD_ERROR_ARGUMENT;
- if ((self != NULL) && (timestamp_usec != TIMESTAMP_UNSET) && (datagram_payload.data != NULL) &&
- (redundant_iface_index < UDPARD_NETWORK_INTERFACE_COUNT_MAX) && (out_transfer != NULL))
- {
- result = 0; // Invalid frames cannot complete a transfer, so zero is the new default.
- RxFrame frame = {0};
- const bool accept = rxParseFrame(datagram_payload, &frame) &&
- ((frame.meta.data_specifier & DATA_SPECIFIER_SERVICE_NOT_MESSAGE_MASK) != 0) &&
- (frame.meta.dst_node_id == self->local_node_id);
- if (accept)
- {
- // Service transfers cannot be anonymous. This is enforced by the rxParseFrame function; we re-check this.
- UDPARD_ASSERT(frame.meta.src_node_id != UDPARD_NODE_ID_UNSET);
- // Parse the data specifier in the frame.
- out_transfer->is_request =
- (frame.meta.data_specifier & DATA_SPECIFIER_SERVICE_REQUEST_NOT_RESPONSE_MASK) != 0;
- out_transfer->service_id = frame.meta.data_specifier & DATA_SPECIFIER_SERVICE_ID_MASK;
- // Search for the RPC-port that is registered for this service transfer in the tree.
- struct UdpardRxRPCPort* const item =
- (struct UdpardRxRPCPort*) (void*) cavlSearch(out_transfer->is_request ? &self->request_ports
- : &self->response_ports,
- &out_transfer->service_id,
- &rxRPCSearchByServiceID,
- NULL);
- // If such a port is found, accept the frame on it.
- if (item != NULL)
- {
- result = rxPortAccept(&item->port,
- redundant_iface_index,
- timestamp_usec,
- frame,
- self->memory,
- &out_transfer->base);
- release = false;
- } // else, the application is not interested in this service-ID (does not know how to handle it).
- // Expose the port instance to the caller if requested.
- if (out_port != NULL)
- {
- *out_port = item;
- }
- } // else, we didn't accept so we just ignore this frame
- }
- if ((self != NULL) && release)
- {
- memFreePayload(self->memory.payload, datagram_payload);
+ if ((rx != NULL) && (port != NULL)) {
+ while (port->index_session_by_remote_uid != NULL) {
+ rx_session_free((rx_session_t*)(void*)port->index_session_by_remote_uid,
+ &rx->list_session_by_animation,
+ &rx->index_session_by_reordering);
+ }
}
- return result;
}
-// =====================================================================================================================
-// ==================================================== MISC =====================================================
-// =====================================================================================================================
-
-size_t udpardGather(const struct UdpardFragment head, const size_t destination_size_bytes, void* const destination)
-{
- size_t offset = 0;
- if (NULL != destination)
- {
- const struct UdpardFragment* frag = &head;
- while ((frag != NULL) && (offset < destination_size_bytes))
- {
- UDPARD_ASSERT(frag->view.data != NULL);
- const size_t frag_size = smaller(frag->view.size, destination_size_bytes - offset);
- // NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling)
- (void) memmove(((byte_t*) destination) + offset, frag->view.data, frag_size);
- offset += frag_size;
- UDPARD_ASSERT(offset <= destination_size_bytes);
- frag = frag->next;
+static void rx_accept_ack(udpard_rx_t* const rx, const udpard_bytes_t message)
+{
+ if (message.size >= ACK_SIZE_BYTES) {
+ uint64_t topic_hash = 0;
+ uint64_t transfer_id = 0;
+ (void)deserialize_u64(((const byte_t*)message.data) + 0U, &topic_hash);
+ (void)deserialize_u64(((const byte_t*)message.data) + 8U, &transfer_id);
+ tx_receive_ack(rx, topic_hash, transfer_id);
+ }
+}
+
+bool udpard_rx_port_push(udpard_rx_t* const rx,
+ udpard_rx_port_t* const port,
+ const udpard_us_t timestamp,
+ const udpard_udpip_ep_t source_ep,
+ const udpard_bytes_mut_t datagram_payload,
+ const udpard_deleter_t payload_deleter,
+ const uint_fast8_t iface_index)
+{
+ const bool ok = (rx != NULL) && (port != NULL) && (timestamp >= 0) && udpard_is_valid_endpoint(source_ep) &&
+ (datagram_payload.data != NULL) && (iface_index < UDPARD_IFACE_COUNT_MAX) &&
+ (payload_deleter.vtable != NULL) && (payload_deleter.vtable->free != NULL);
+ if (ok) {
+ rx_frame_t frame = { 0 };
+ uint32_t frame_index = 0;
+ uint32_t offset_32 = 0;
+ const bool frame_valid = header_deserialize(
+ datagram_payload, &frame.meta, &frame_index, &offset_32, &frame.base.crc, &frame.base.payload);
+ frame.base.offset = (size_t)offset_32;
+ (void)frame_index; // currently not used by this reassembler implementation.
+ frame.base.origin = datagram_payload; // Take ownership of the payload.
+ if (frame_valid) {
+ if (frame.meta.topic_hash == port->topic_hash) {
+ if (!frame.meta.flag_acknowledgement) {
+ port->vtable_private->accept(rx, port, timestamp, source_ep, &frame, payload_deleter, iface_index);
+ } else {
+ UDPARD_ASSERT(frame.base.offset == 0); // checked by the frame parser
+ rx_accept_ack(rx, frame.base.payload);
+ mem_free_payload(payload_deleter, frame.base.origin);
+ }
+ } else { // Collisions are discovered early so that we don't attempt to allocate sessions for them.
+ mem_free_payload(payload_deleter, frame.base.origin);
+ udpard_remote_t remote = { .uid = frame.meta.sender_uid };
+ remote.endpoints[iface_index] = source_ep;
+ if (port->vtable->on_collision != NULL) {
+ port->vtable->on_collision(rx, port, remote);
+ }
+ }
+ } else {
+ mem_free_payload(payload_deleter, frame.base.origin);
+ ++rx->errors_frame_malformed;
}
}
- return offset;
+ return ok;
}
diff --git a/libudpard/udpard.h b/libudpard/udpard.h
index 2c8a5e1..4e2ce06 100644
--- a/libudpard/udpard.h
+++ b/libudpard/udpard.h
@@ -5,189 +5,44 @@
/// `____/ .___/`___/_/ /_/`____/`__, / .___/_/ /_/`__,_/_/
/// /_/ /____/_/
///
-/// LibUDPard is a compact implementation of the Cyphal/UDP protocol for high-integrity real-time embedded systems.
-/// It is designed for use in robust deterministic embedded systems equipped with at least 64K ROM and RAM.
-/// The codebase is compliant with a large subset of MISRA C, has full test coverage, and is validated by at least
-/// two static analyzers. The library is designed to be compatible with any conventional target platform and
-/// instruction set architecture, from 8 to 64 bit, little- and big-endian, RTOS-based or baremetal,
-/// as long as there is a standards-compliant ISO C99 compiler available.
+/// LibUDPard is a compact implementation of the Cyphal/UDP transport for high-integrity real-time embedded systems.
+/// It is designed for use in robust deterministic embedded systems equipped with at least ~100K ROM and RAM,
+/// as well as in general-purpose software.
///
-/// The library offers a very low-level API that may be cumbersome to use in many applications.
-/// Users seeking a higher-level API are encouraged to use LibCyphal instead, which builds on top of LibUDPard et al.
+/// The codebase is compliant with a large subset of MISRA C and is fully covered by unit and end-to-end tests.
+/// The library is designed to be compatible with any conventional target platform, from 8 to 64 bit, little- and
+/// big-endian, RTOS-based or baremetal, as long as there is a standards-compliant ISO C99 or C11 compiler available.
///
-///
-/// INTEGRATION
-///
-/// The library is intended to be integrated into the end application by simply copying its source files into the
+/// The library is intended to be integrated into the end application by simply copying udpard.c/.h into the
/// source tree of the project; it does not require any special compilation options and should work out of the box.
/// There are build-time configuration parameters defined near the top of udpard.c, but they are optional to use.
///
-/// As explained in this documentation, the library requires a deterministic constant-time dynamic memory allocator.
-/// If your target platform does not provide a deterministic memory manager, it is recommended to use O1Heap
-/// (MIT licensed): https://github.com/pavel-kirienko/o1heap.
-///
-/// To use the library, the application needs to provide an implementation of the UDP/IP stack with IGMP support.
+/// To use the library, the application needs to provide a minimal UDP/IPv4 stack supporting IGMP v2 and passive ARP.
/// POSIX-based systems may use the standard Berkeley sockets API, while more constrained embedded systems may choose
-/// to rely either on a third-party solution like LwIP or a custom UDP/IP stack implementation.
-///
-/// Cyphal/UDP leverages only multicast IP addresses; the underlying UDP/IP stack does not need to support ARP or ICMP.
-///
-///
-/// ARCHITECTURE
-///
-/// In the following description, the terms "local Cyphal node" and "application" are used interchangeably.
-/// Some applications may require more than one logical Cyphal node to operate side-by-side.
-/// Each logical node may utilize more than one network interface for redundancy.
-///
-/// This library implements the Cyphal/UDP transport protocol. Resembling the conventional OSI model, the Cyphal
-/// protocol stack --- when implemented with the help of this library --- consists of the following layers:
-///
-/// LAYER | DESCRIPTION
-/// ----------------|---------------------------------------------------------------------------------------
-/// APPLICATION | User-defined and Cyphal-standard application logic
-/// PRESENTATION | Autogenerated code for DSDL serialization/deserialization (see Nunavut)
-/// +-> TRANSPORT | THIS LIBRARY
-/// | NETWORK | The UDP/IP stack provided by the application (LwIP, custom, Berkeley sockets, etc).
-/// |
-/// +------ you are here
-///
-/// The library consists of three independent parts:
-///
-/// - The transmission pipeline (TX pipeline) for publishing messages and sending RPC-service requests & responses.
-/// - The reception pipeline (RX pipeline), which in turn is built from two sub-pipelines:
-/// - subscriptions -- for subscribing to subjects (aka topics);
-/// - service dispatcher -- for receiving service requests and responses; both clients and servers need this.
-///
-/// As these components share no resources within the library, they can be used in different threads,
-/// provided that there are no thread-unsafe resources shared between them in the application (such as heaps).
-///
-/// The library supports at most UDPARD_NETWORK_INTERFACE_COUNT_MAX redundant network interfaces.
-/// Transfers received from each interface are reassembled independently and the first interface to complete a
-/// transfer is always chosen to deliver the transfer to the application, while the transfers from the slower
-/// interface are discarded as duplicates. The application must assign each of the redundant interface a
-/// unique integer ID in the range [0, UDPARD_NETWORK_INTERFACE_COUNT_MAX) to allow the library to distinguish
-/// between them.
-///
-/// As will be shown below, a typical application with R redundant network interfaces and S topic subscriptions needs
-/// R*(S+2) sockets (or equivalent abstractions provided by the underlying UDP/IP stack).
-///
-/// As a matter of convention, resource disposal functions are named "free" if the memory of the resource itself is
-/// not deallocated, and "destroy" if the memory is deallocated.
-///
-///
-/// Transmission pipeline
-///
-/// The transmission pipeline is used to publish messages and send RPC-service requests and responses to the network
-/// through a particular redundant interface. A Cyphal node with R redundant network interfaces needs to instantiate
-/// R transmission pipelines, one per interface, unless the application is not interested in sending data at all.
-/// The transmission pipeline contains a prioritized queue of UDP datagrams scheduled for transmission via its
-/// network interface. The typical usage pattern is to enqueue Cyphal transfers using dedicated functions (see
-/// udpardTxPublish, udpardTxRequest, udpardTxRespond) into all instances of transmission pipelines
-/// (i.e., once per redundant interface) and periodically check the network interface for readiness to accept writes;
-/// once the interface is ready, pop the next datagram scheduled for transmission from the queue and send it.
-///
-/// Each transmission pipeline instance requires one socket (or a similar abstraction provided by the underlying
-/// UDP/IP stack) that is not connected to any specific remote endpoint (i.e., usable with sendto(),
-/// speaking in terms of Berkeley sockets). In the case of redundant interfaces, each socket may need to be configured
-/// to emit data through its specific interface.
-///
-/// Graphically, the transmission pipeline is arranged as follows:
-///
-/// +---> TX PIPELINE ---> UDP SOCKET ---> REDUNDANT INTERFACE A
-/// |
-/// SERIALIZED TRANSFERS ---+---> TX PIPELINE ---> UDP SOCKET ---> REDUNDANT INTERFACE B
-/// |
-/// +---> ...
-///
-/// The library supports configurable DSCP marking of the outgoing UDP datagrams as a function of Cyphal transfer
-/// priority level. This is configured separately per TX pipeline instance (i.e., per network interface).
-///
-/// The maximum transmission unit (MTU) can also be configured separately per TX pipeline instance.
-/// Applications that are interested in maximizing their wire compatibility should not change the default MTU setting.
-///
-///
-/// Reception pipeline
-///
-/// The reception pipelines are used to subscribe to subjects (aka topics) and to receive RPC-service requests and
-/// responses. The former are handled by "subscriptions" and the latter two are managed by a "service dispatcher".
-/// Said pipelines are entirely independent from each other and can be operated from different threads,
-/// as they share no resources.
-///
-/// The reception pipeline is able to accept datagrams with arbitrary MTU, frames delivered out-of-order (OOO) with
-/// arbitrary duplication, including duplication of non-adjacent frames, and/or frames interleaved between adjacent
-/// transfers. The support for OOO reassembly is particularly interesting when simple repetition coding FEC is used.
-///
-/// The application should instantiate one subscription instance per subject it needs to receive messages from,
-/// irrespective of the number of redundant interfaces. There needs to be one socket (or a similar abstraction
-/// provided by the underlying UDP/IP stack) per subscription instance per redundant interface,
-/// each socket bound to the same UDP/IP endpoint (IP address and UDP port) which is selected by the library when
-/// the subscription is created.
-/// The application needs to listen to all these sockets simultaneously and pass the received UDP datagrams to the
-/// corresponding subscription instance as they arrive, thus unifying the datagrams received from all redundant
-/// interface sockets into a single stream.
-/// At the output, subscription instances provide reassembled and deduplicated stream of Cyphal transfers ready for
-/// deserialization.
-///
-/// Graphically, the subscription pipeline is arranged as shown below.
-/// Remember that the application with S topic subscriptions would have S such pipelines, one per subscription.
-///
-/// REDUNDANT INTERFACE A ---> UDP SOCKET ---+
-/// |
-/// REDUNDANT INTERFACE B ---> UDP SOCKET ---+---> SUBSCRIPTION ---> SERIALIZED TRANSFERS
-/// |
-/// ... ---+
-///
-/// The application should instantiate a single service dispatcher instance irrespective of the number of redundant
-/// interfaces or the set of RPC-services it is interested in (unless it is not interested in RPC-services at all).
-/// The service dispatcher instance requires a single socket (or a similar abstraction provided by the underlying
-/// UDP/IP stack) per redundant interface, each socket bound to the same UDP/IP endpoint (IP address and UDP port)
-/// which is selected by the library when the service dispatcher is created.
-/// The application needs to listen to all these sockets simultaneously and pass the received UDP datagrams to
-/// the service dispatcher instance as they arrive, thus unifying the datagrams received from all redundant
-/// interface sockets into a single stream.
-///
-/// The service dispatcher by itself is not useful; it needs to be configured with the set of RPC-services
-/// that the application is interested in. This is done by creating RPC-service RX ports and registering them
-/// with the service dispatcher. The service dispatcher will then forward the received requests and responses
-/// to the corresponding RPC-service RX ports; the application can then deserialize and process them.
-///
-/// Graphically, the service dispatcher pipeline is arranged as shown below.
-///
-/// REDUNDANT INTERFACE A ---> UDP SOCKET ---+ +---> RPC PORT X ---> SERIALIZED TRANSFERS
-/// | |
-/// REDUNDANT INTERFACE B ---> UDP SOCKET ---+---> SERVICE DISPATCHER ---+---> RPC PORT Y ---> SERIALIZED TRANSFERS
-/// | |
-/// ... ---+ +---> ...
-///
-/// In summary, to make a service request, the application needs a TX pipeline to transmit the request and
-/// a service dispatcher with a registered RPC-service RX port to receive the response. Same holds if the
-/// application needs to handle a service request, except that the RX port will be used to accept the request
-/// and the TX pipeline will be used to transmit the response.
-///
-///
-/// Memory management
+/// to rely either on a third-party solution like LwIP or a custom minimal UDP/IP stack.
///
/// The library can be used either with a regular heap (preferably constant-time) or with a collection of fixed-size
-/// block pool allocators (in safety-certified systems). It is up to the application to choose the desired memory
-/// management strategy; the library is interfaced with the memory managers via a special memory resource abstraction.
-///
-/// Typically, if block pool allocators are used, the following block sizes should be served:
-///
-/// - MTU sized blocks for the TX and RX pipelines (usually less than 2048 bytes);
-/// - TX fragment item sized blocks for the TX pipeline (less than 128 bytes).
-/// - RX session object sized blocks for the RX pipeline (less than 512 bytes);
-/// - RX fragment handle sized blocks for the RX pipeline (less than 128 bytes).
-///
-/// The detailed information is given in the API documentation.
+/// block pool allocators (may be preferable in safety-certified systems).
+/// If block pool allocators are used, the following block sizes should be served:
+/// - MTU-sized blocks for the TX and RX pipelines (typically at most 1.5 KB unless jumbo frames are used).
+/// The TX pipeline adds a small overhead of sizeof(tx_frame_t).
+/// - sizeof(tx_transfer_t) blocks for the TX pipeline to store outgoing transfer metadata.
+/// - sizeof(rx_session_t) blocks for the RX pipeline to store incoming transfer session metadata.
+/// - sizeof(udpard_fragment_t) blocks for the RX pipeline to store received data fragments.
+///
+/// Suitable memory allocators may be found here:
+/// - Constant-time ultrafast deterministic heap: https://github.com/pavel-kirienko/o1heap
+/// - Single-header fixed-size block pool: https://gist.github.com/pavel-kirienko/daf89e0481e6eac0f1fa8a7614667f59
///
/// --------------------------------------------------------------------------------------------------------------------
-///
/// This software is distributed under the terms of the MIT License.
/// Copyright (C) OpenCyphal Development Team
/// Copyright Amazon.com Inc. or its affiliates.
/// SPDX-License-Identifier: MIT
/// Author: Pavel Kirienko
+// ReSharper disable CppUnusedIncludeDirective
+
#ifndef UDPARD_H_INCLUDED
#define UDPARD_H_INCLUDED
@@ -196,594 +51,754 @@
#include
#ifdef __cplusplus
-extern "C" {
+extern "C"
+{
#endif
/// Semantic version of this library (not the Cyphal specification).
/// API will be backward compatible within the same major version.
-#define UDPARD_VERSION_MAJOR 2
+#define UDPARD_VERSION_MAJOR 3
#define UDPARD_VERSION_MINOR 0
/// The version number of the Cyphal specification implemented by this library.
-#define UDPARD_CYPHAL_SPECIFICATION_VERSION_MAJOR 1
-#define UDPARD_CYPHAL_SPECIFICATION_VERSION_MINOR 0
-
-/// These error codes may be returned from the library API calls whose return type is a signed integer in the negated
-/// form (e.g., error code 2 returned as -2). A non-negative return value represents success.
-/// API calls whose return type is not a signed integer cannot fail by contract.
-/// No other error states may occur in the library.
-/// By contract, a well-characterized application with properly sized memory pools will never encounter errors.
-/// The error code 1 is not used because -1 is often used as a generic error code in 3rd-party code.
-#define UDPARD_ERROR_ARGUMENT 2
-#define UDPARD_ERROR_MEMORY 3
-#define UDPARD_ERROR_CAPACITY 4
-#define UDPARD_ERROR_ANONYMOUS 5
+#define UDPARD_CYPHAL_VERSION_MAJOR 1
+#define UDPARD_CYPHAL_VERSION_MINOR 1
/// RFC 791 states that hosts must be prepared to accept datagrams of up to 576 octets and it is expected that this
/// library will receive non IP-fragmented datagrams thus the minimum MTU should be larger than 576.
-/// That being said, the MTU here is set to 1408 which is derived as:
-/// 1500B Ethernet MTU (RFC 894) - 60B IPv4 max header - 8B UDP Header - 24B Cyphal header
-#define UDPARD_MTU_DEFAULT 1408U
-/// To guarantee a single frame transfer, the maximum payload size shall be 4 bytes less to accommodate for the CRC.
-#define UDPARD_MTU_DEFAULT_MAX_SINGLE_FRAME (UDPARD_MTU_DEFAULT - 4U)
-
-/// Parameter ranges are inclusive; the lower bound is zero for all. See Cyphal/UDP Specification for background.
-#define UDPARD_SUBJECT_ID_MAX 8191U
-#define UDPARD_SERVICE_ID_MAX 511U
-#define UDPARD_NODE_ID_MAX 0xFFFEU /// 2**16-1 is reserved for the anonymous/broadcast ID.
-#define UDPARD_PRIORITY_MAX 7U
-
-/// This value represents an undefined node-ID: broadcast destination or anonymous source.
-#define UDPARD_NODE_ID_UNSET 0xFFFFU
-
-/// This is the recommended transfer-ID timeout value given in the Cyphal Specification. The application may choose
-/// different values per subscription (i.e., per data specifier) depending on its timing requirements.
-#define UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC 2000000UL
-
-/// The library supports at most this many redundant network interfaces per Cyphal node.
-#define UDPARD_NETWORK_INTERFACE_COUNT_MAX 3U
-
-typedef uint64_t UdpardMicrosecond; ///< UINT64_MAX is not a valid timestamp value.
-typedef uint16_t UdpardPortID;
-typedef uint16_t UdpardNodeID;
-typedef uint64_t UdpardTransferID; ///< UINT64_MAX is not a valid transfer-ID value.
-
-/// Transfer priority level mnemonics per the recommendations given in the Cyphal Specification.
-/// For outgoing transfers they are mapped to DSCP values as configured per redundant interface (per UdpardTx instance).
-enum UdpardPriority
+/// That being said, the MTU here is set to a larger value that is derived as:
+/// 1500B Ethernet MTU (RFC 894) - 60B IPv4 max header - 8B UDP Header - 48B Cyphal header
+/// This is also the default maximum size of a single-frame transfer.
+/// The application can change this value at runtime as needed.
+#define UDPARD_MTU_DEFAULT 1384U
+
+/// MTU less than this should not be used. This value may be increased in a future version of the library.
+#define UDPARD_MTU_MIN 460U
+
+/// The library supports at most this many local redundant network interfaces.
+#define UDPARD_IFACE_COUNT_MAX 3U
+
+#define UDPARD_IFACE_BITMAP_ALL ((1U << UDPARD_IFACE_COUNT_MAX) - 1U)
+
+/// Timestamps supplied by the application must be non-negative monotonically increasing counts of microseconds.
+typedef int64_t udpard_us_t;
+
+/// See udpard_tx_t::ack_baseline_timeout.
+/// This default value might be a good starting point for many applications running over a local network.
+/// The baseline timeout should be greater than the expected round-trip time (RTT) between the most distant
+/// nodes in the network for a message at the highest priority level.
+#define UDPARD_TX_ACK_BASELINE_TIMEOUT_DEFAULT_us 16000LL
+
+/// The subject-ID only affects the formation of the multicast UDP/IP endpoint address.
+/// In IPv4 networks, it is limited to 23 bits only due to the limited MAC multicast address space.
+/// In IPv6 networks, 32 bits are supported.
+#define UDPARD_IPv4_SUBJECT_ID_MAX 0x7FFFFFUL
+
+typedef enum udpard_prio_t
{
- UdpardPriorityExceptional = 0,
- UdpardPriorityImmediate = 1,
- UdpardPriorityFast = 2,
- UdpardPriorityHigh = 3,
- UdpardPriorityNominal = 4, ///< Nominal priority level should be the default.
- UdpardPriorityLow = 5,
- UdpardPrioritySlow = 6,
- UdpardPriorityOptional = 7,
-};
+ udpard_prio_exceptional = 0,
+ udpard_prio_immediate = 1,
+ udpard_prio_fast = 2,
+ udpard_prio_high = 3,
+ udpard_prio_nominal = 4, ///< Nominal priority level should be the default.
+ udpard_prio_low = 5,
+ udpard_prio_slow = 6,
+ udpard_prio_optional = 7,
+} udpard_prio_t;
+#define UDPARD_PRIORITY_COUNT 8U
+
+typedef struct udpard_tree_t
+{
+ struct udpard_tree_t* up;
+ struct udpard_tree_t* lr[2];
+ int_fast8_t bf;
+} udpard_tree_t;
-/// The AVL tree node structure is exposed here to avoid pointer casting/arithmetics inside the library.
-/// The user code is not expected to interact with this type except if advanced introspection is required.
-struct UdpardTreeNode
+typedef struct udpard_listed_t
{
- struct UdpardTreeNode* up; ///< Do not access this field.
- struct UdpardTreeNode* lr[2]; ///< Left and right children of this node may be accessed for tree traversal.
- int_fast8_t bf; ///< Do not access this field.
-};
+ struct udpard_listed_t* next;
+ struct udpard_listed_t* prev;
+} udpard_listed_t;
-struct UdpardMutablePayload
+typedef struct udpard_list_t
{
- size_t size;
- void* data;
-};
+ udpard_listed_t* head; ///< NULL if list empty
+ udpard_listed_t* tail; ///< NULL if list empty
+} udpard_list_t;
-struct UdpardPayload
+typedef struct udpard_bytes_t
{
size_t size;
const void* data;
-};
+} udpard_bytes_t;
-/// This type represents payload as an ordered sequence of its fragments to eliminate data copying.
-/// To free a fragmented payload buffer, the application needs to traverse the list and free each fragment's payload
-/// as well as the payload structure itself, assuming that it is also heap-allocated.
-/// The model is as follows:
-///
-/// (payload header) ---> UdpardFragment:
-/// next ---> UdpardFragment...
-/// origin ---> (the free()able payload data buffer)
-/// view ---> (somewhere inside the payload data buffer)
-///
-/// Payloads of received transfers are represented using this type, where each fragment corresponds to a frame.
-/// The application can either consume them directly or to copy the data into a contiguous buffer beforehand
-/// at the expense of extra time and memory utilization.
-struct UdpardFragment
+typedef struct udpard_bytes_scattered_t
{
- /// Points to the next fragment in the fragmented buffer; NULL if this is the last fragment.
- struct UdpardFragment* next;
+ udpard_bytes_t bytes;
+ const struct udpard_bytes_scattered_t* next; ///< NULL in the last fragment.
+} udpard_bytes_scattered_t;
- /// Contains the actual data to be used by the application.
- /// The memory pointed to by this fragment shall not be freed by the application.
- struct UdpardPayload view;
+typedef struct udpard_bytes_mut_t
+{
+ size_t size;
+ void* data;
+} udpard_bytes_mut_t;
- /// This entity points to the base buffer that contains this fragment.
- /// The application can use this pointer to free the outer buffer after the payload has been consumed.
- /// In the most simple case this field is identical to the "view" field above, but it is not always the case.
- struct UdpardMutablePayload origin;
+/// The size can be changed arbitrarily. This value is a compromise between copy size and footprint and utility.
+#ifndef UDPARD_USER_CONTEXT_PTR_COUNT
+#define UDPARD_USER_CONTEXT_PTR_COUNT 4
+#endif
+
+/// The library carries the user-provided context from inputs to outputs without interpreting it,
+/// allowing the application to associate its own data with various entities inside the library.
+typedef union udpard_user_context_t
+{
+ void* ptr[UDPARD_USER_CONTEXT_PTR_COUNT];
+ unsigned char bytes[sizeof(void*) * UDPARD_USER_CONTEXT_PTR_COUNT];
+} udpard_user_context_t;
+#ifdef __cplusplus
+#define UDPARD_USER_CONTEXT_NULL \
+ udpard_user_context_t {}
+#else
+#define UDPARD_USER_CONTEXT_NULL ((udpard_user_context_t){ .ptr = { NULL } })
+#endif
+
+/// Zeros if invalid/unset/unavailable.
+typedef struct udpard_udpip_ep_t
+{
+ uint32_t ip;
+ uint16_t port;
+} udpard_udpip_ep_t;
+
+/// The remote information can be used for sending P2P responses back to the sender, if needed.
+/// The RX pipeline will attempt to discover the sender's UDP/IP endpoint per redundant interface
+/// based on the source address of the received UDP datagrams. If the sender's endpoint could not be discovered
+/// for a certain interface (e.g., if the sender is not connected to that interface), the corresponding entry in
+/// the endpoints array will be zeroed and udpard_is_valid_endpoint() will return false for that entry.
+///
+/// Cyphal/UDP thus allows nodes to change their network interface addresses dynamically.
+/// The library does not make any assumptions about the specific values and their uniqueness;
+/// as such, multiple remote nodes can even share the same endpoint.
+typedef struct udpard_remote_t
+{
+ uint64_t uid;
+ udpard_udpip_ep_t endpoints[UDPARD_IFACE_COUNT_MAX]; ///< Zeros in unavailable ifaces.
+} udpard_remote_t;
+
+/// Returns true if the given UDP/IP endpoint appears to be valid. Zero IP/port are considered invalid.
+bool udpard_is_valid_endpoint(const udpard_udpip_ep_t ep);
+
+/// Returns the destination multicast UDP/IP endpoint for the given subject-ID.
+/// The application should use this function when setting up subscription sockets or sending datagrams in
+/// udpard_tx_vtable_t::eject_subject().
+/// If the subject-ID exceeds UDPARD_IPv4_SUBJECT_ID_MAX, the excessive bits are masked out.
+/// For P2P use the unicast node address directly instead, as provided by the RX pipeline per received transfer.
+udpard_udpip_ep_t udpard_make_subject_endpoint(const uint32_t subject_id);
+
+/// The memory resource semantics are similar to malloc/free.
+/// Consider using O1Heap: https://github.com/pavel-kirienko/o1heap.
+/// The API documentation is written on the assumption that the memory management functions are O(1).
+typedef struct udpard_deleter_t udpard_deleter_t;
+typedef struct udpard_mem_t udpard_mem_t;
+
+typedef struct udpard_deleter_vtable_t
+{
+ void (*free)(void* context, size_t size, void* pointer);
+} udpard_deleter_vtable_t;
+
+struct udpard_deleter_t
+{
+ const udpard_deleter_vtable_t* vtable;
+ void* context;
};
-/// Cyphal/UDP uses only multicast traffic.
-/// Unicast support is not required; one consequence is that ARP tables are not needed.
-struct UdpardUDPIPEndpoint
+typedef struct udpard_mem_vtable_t
+{
+ udpard_deleter_vtable_t base;
+ void* (*alloc)(void* context, size_t size);
+} udpard_mem_vtable_t;
+
+struct udpard_mem_t
{
- uint32_t ip_address;
- uint16_t udp_port;
+ const udpard_mem_vtable_t* vtable;
+ void* context;
};
+/// A helper that upcasts a memory resource into a deleter.
+udpard_deleter_t udpard_make_deleter(const udpard_mem_t memory);
+
+/// This type represents payload as a binary tree of its fragments ordered by offset to eliminate data copying.
+/// The fragments are guaranteed to be non-redundant and non-overlapping; therefore, they are also ordered by their
+/// end offsets. See the helper functions below for managing the fragment tree.
+typedef struct udpard_fragment_t
+{
+ /// The index_offset BST orders fragments by their offset (and also end=offset+size) within the transfer payload.
+ /// It must be the first member.
+ udpard_tree_t index_offset;
+
+ /// Offset of this fragment's payload within the full payload buffer. The ordering key for the index_offset tree.
+ size_t offset;
+
+ /// Contains the actual data to be used by the application.
+ /// The memory pointed to by this fragment shall not be freed nor mutated by the application.
+ udpard_bytes_t view;
+
+ /// Points to the base buffer that contains this fragment.
+ /// The application can use this pointer to free the outer buffer after the payload has been consumed.
+ /// This memory must not be accessed by the application for any purpose other than freeing it.
+ udpard_bytes_mut_t origin;
+
+ /// When the fragment is no longer needed, this deleter shall be used to free the origin buffer.
+ /// We provide a dedicated deleter per fragment to allow NIC drivers to manage the memory directly,
+ /// which allows DMA access to the fragment data without copying.
+ /// See https://github.com/OpenCyphal-Garage/libcyphal/issues/352#issuecomment-2163056622
+ udpard_deleter_t payload_deleter;
+} udpard_fragment_t;
+
+/// Frees the memory allocated for the payload and its fragment headers using the correct deleters: the fragment
+/// deleter is given explicitly (use udpard_make_deleter() to obtain it from a memory resource), and the payload is
+/// freed using the payload_deleter per fragment.
+/// All fragments in the tree will be freed and invalidated.
+/// The passed fragment can be any fragment inside the tree (not necessarily the root).
+/// If the fragment argument is NULL, the function has no effect. The complexity is linear in the number of fragments.
+void udpard_fragment_free_all(udpard_fragment_t* const frag, const udpard_deleter_t fragment_deleter);
+
+/// Given any fragment in a transfer, returns the fragment that contains the given payload offset.
+/// Returns NULL if the offset points beyond the stored payload, or if frag is NULL.
+/// This is also the idiomatic way to find the head of the fragment list when invoked with offset zero.
+/// This function accepts any node in the fragment tree, not necessarily the head or the root, and
+/// has a logarithmic complexity in the number of fragments, which makes it very efficient.
+udpard_fragment_t* udpard_fragment_seek(const udpard_fragment_t* frag, const size_t offset);
+
+/// Given any fragment in a transfer, returns the next fragment in strictly ascending order of offsets.
+/// The offset of the next fragment always equals the sum of the offset and size of the current fragment.
+/// Returns NULL if there is no next fragment or if the given fragment is NULL.
+/// The complexity is amortized-constant.
+udpard_fragment_t* udpard_fragment_next(const udpard_fragment_t* frag);
+
+/// A convenience function built on top of udpard_fragment_seek() and udpard_fragment_next().
+/// Copies `size` bytes of payload stored in a fragment tree starting from `offset` into `destination`.
+/// The cursor pointer is an iterator updated to the last fragment touched, enabling very efficient sequential
+/// access without repeated searches; it is never set to NULL.
+/// Returns the number of bytes copied into the contiguous destination buffer, which equals `size` unless
+/// `offset+size` exceeds the amount of data stored in the fragments.
+/// The function has no effect and returns zero if the destination buffer or iterator pointer are NULL.
+size_t udpard_fragment_gather(const udpard_fragment_t** cursor,
+ const size_t offset,
+ const size_t size,
+ void* const destination);
+
// =====================================================================================================================
-// ================================================= MEMORY RESOURCE =================================================
+// ================================================= TX PIPELINE =================================================
// =====================================================================================================================
-/// A pointer to the memory allocation function. The semantics are similar to malloc():
-/// - The returned pointer shall point to an uninitialized block of memory that is at least "size" bytes large.
-/// - If there is not enough memory, the returned pointer shall be NULL.
-/// - The memory shall be aligned at least at max_align_t.
-/// - The execution time should be constant (O(1)).
-/// - The worst-case memory consumption (worst fragmentation) should be understood by the developer.
+/// Graphically, the transmission pipeline is arranged as shown below.
+/// There is a single pipeline instance that serves all topics, P2P, and all network interfaces.
///
-/// If the standard dynamic memory manager of the target platform does not satisfy the above requirements,
-/// consider using O1Heap: https://github.com/pavel-kirienko/o1heap. Alternatively, some applications may prefer to
-/// use a set of fixed-size block pool allocators (see the high-level overview for details).
+/// +---> REDUNDANT INTERFACE A
+/// |
+/// TRANSFERS ---> udpard_tx_t ---+---> REDUNDANT INTERFACE B
+/// |
+/// +---> ...
///
-/// The API documentation is written on the assumption that the memory management functions have constant
-/// complexity and are non-blocking.
+/// The RX pipeline is linked with the TX pipeline for reliable message management: the RX pipeline notifies
+/// the TX when acknowledgments are received, and also enqueues outgoing acknowledgments to confirm received messages.
+/// Thus the transmission pipeline is inherently remote-controlled by other nodes and one needs to keep in mind
+/// that new frames may appear in the TX pipeline even while the application is idle.
///
-/// The value of the user reference is taken from the corresponding field of the memory resource structure.
-typedef void* (*UdpardMemoryAllocate)(void* const user_reference, const size_t size);
-
-/// The counterpart of the above -- this function is invoked to return previously allocated memory to the allocator.
-/// The size argument contains the amount of memory that was originally requested via the allocation function;
-/// its value is undefined if the pointer is NULL.
-/// The semantics are similar to free():
-/// - The pointer was previously returned by the allocation function.
-/// - The pointer may be NULL, in which case the function shall have no effect.
-/// - The execution time should be constant (O(1)).
+/// The reliable delivery mechanism informs the application about the number of remote subscribers that confirmed the
+/// reception of each reliable message. The library uses heuristics to determine the number of attempts needed to
+/// deliver the message, but it is guaranteed to cease attempts by the specified deadline.
+/// Rudimentary congestion control is implemented by exponential backoff of retransmission intervals.
+/// The reliability is chosen by the publisher on a per-message basis; as such, the same topic may carry both
+/// reliable and unreliable messages depending on who is publishing at any given time.
///
-/// The value of the user reference is taken from the corresponding field of the memory resource structure.
-typedef void (*UdpardMemoryDeallocate)(void* const user_reference, const size_t size, void* const pointer);
+/// Reliable messages published over high-fanout topics will generate a large amount of feedback acknowledgments,
+/// which must be kept in mind when designing the network.
+///
+/// Subscribers operating in the ORDERED mode do not acknowledge messages that have been designated as lost
+/// (arriving too late, after the reordering window has passed). No negative acknowledgments are sent either
+/// because there may be other subscribers on the same topic who might still be able to receive the message.
+typedef struct udpard_tx_t udpard_tx_t;
-/// A kind of memory resource that can only be used to free memory previously allocated by the user.
-/// Instances are mostly intended to be passed by value.
-struct UdpardMemoryDeleter
+typedef struct udpard_tx_mem_resources_t
{
- void* user_reference; ///< Passed as the first argument.
- UdpardMemoryDeallocate deallocate; ///< Shall be a valid pointer.
-};
+ /// The queue bookkeeping structures are allocated per outgoing transfer, i.e., one per udpard_tx_push().
+ /// Each allocation is sizeof(tx_transfer_t).
+ udpard_mem_t transfer;
-/// A memory resource encapsulates the dynamic memory allocation and deallocation facilities.
-/// Note that the library allocates a large amount of small fixed-size objects for bookkeeping purposes;
-/// allocators for them can be implemented using fixed-size block pools to eliminate extrinsic memory fragmentation.
-/// Instances are mostly intended to be passed by value.
-struct UdpardMemoryResource
+ /// The UDP datagram payload buffers are allocated per frame, each at most HEADER_SIZE+MTU+sizeof(tx_frame_t).
+ /// These may be distinct per interface to allow each interface to draw buffers from a specific memory region
+ /// or a specific DMA-compatible memory pool.
+ ///
+ /// IMPORTANT: distinct memory resources increase tx memory usage and data copying.
+ /// If possible, it is recommended to use the same memory resource for all interfaces, because the library will be
+ /// able to avoid frame duplication and instead reuse each frame across all interfaces when the MTUs are identical.
+ udpard_mem_t payload[UDPARD_IFACE_COUNT_MAX];
+} udpard_tx_mem_resources_t;
+
+/// Outcome notification for a reliable transfer previously scheduled for transmission.
+typedef struct udpard_tx_feedback_t
{
- void* user_reference; ///< Passed as the first argument.
- UdpardMemoryDeallocate deallocate; ///< Shall be a valid pointer.
- UdpardMemoryAllocate allocate; ///< Shall be a valid pointer.
-};
+ udpard_user_context_t user; ///< Same value that was passed to udpard_tx_push().
-// =====================================================================================================================
-// ================================================= TX PIPELINE =================================================
-// =====================================================================================================================
+ /// The number of remote nodes that acknowledged the reception of the transfer.
+ /// For P2P transfers, this value is either 0 (failure) or 1 (success).
+ uint16_t acknowledgements;
+} udpard_tx_feedback_t;
-/// The set of memory resources is used per a TX pipeline instance.
-/// These are used to serve the memory needs of the library to keep state while assembling outgoing frames.
-/// Several memory resources are provided to enable fine control over the allocated memory.
-///
-/// A TX queue uses these memory resources for allocating the enqueued items (UDP datagrams).
-/// There are exactly two allocations per enqueued item:
-/// - the first for bookkeeping purposes (UdpardTxItem)
-/// - second for payload storage (the frame data)
-/// In a simple application, there would be just one memory resource shared by all parts of the library.
-/// If the application knows its MTU, it can use block allocation to avoid extrinsic fragmentation.
-///
-struct UdpardTxMemoryResources
+/// Request to transmit a UDP datagram over the specified interface.
+/// Which interface indexes are available is determined by the user when pushing the transfer.
+/// If Berkeley sockets or similar API is used, the application should use a dedicated socket per redundant interface.
+typedef struct udpard_tx_ejection_t
{
- /// The fragment handles are allocated per payload fragment; each handle contains a pointer to its fragment.
- /// Each instance is of a very small fixed size, so a trivial zero-fragmentation block allocator is enough.
- struct UdpardMemoryResource fragment;
+ /// The current time carried over from the API function that initiated the ejection.
+ udpard_us_t now;
- /// The payload fragments are allocated per payload frame; each payload fragment is at most MTU-sized buffer,
- /// so a trivial zero-fragmentation MTU-sized block allocator is enough if MTU is known in advance.
- struct UdpardMemoryResource payload;
-};
+ /// Specifies when the frame should be considered expired and dropped if not yet transmitted by then;
+ /// it is optional to use depending on the implementation of the NIC driver (most traditional drivers ignore it).
+ /// The library guarantees that now >= deadline at the time of ejection -- expired frames are purged beforehand.
+ udpard_us_t deadline;
-/// The transmission pipeline is a prioritized transmission queue that keeps UDP datagrams (aka transport frames)
-/// destined for transmission via one network interface.
-/// Applications with redundant network interfaces are expected to have one instance of this type per interface.
-/// Applications that are not interested in transmission may have zero such instances.
-///
-/// All operations are logarithmic in complexity on the number of enqueued items.
-///
-/// Once initialized, instances cannot be copied.
-///
-/// API functions that work with this type are named "udpardTx*()", find them below.
-///
-/// FUTURE: Eventually we might consider adding another way of arranging the transmission pipeline where the UDP
-/// datagrams ready for transmission are not enqueued into the local prioritized queue but instead are sent directly
-/// to the network interface driver using a dedicated callback. The callback would accept not just a single
-/// chunk of data but a list of three chunks to avoid copying the source transfer payload: the datagram header,
-/// the payload, and (only for the last frame) the CRC. The driver would then use some form of vectorized IO or
-/// MSG_MORE/UDP_CORK to transmit the data; the advantage of this approach is that up to two data copy operations are
-/// eliminated from the stack and the memory allocator is not used at all. The disadvantage is that if the driver
-/// callback is blocking, the application thread will be blocked as well; plus the driver will be responsible
-/// for the correct prioritization of the outgoing datagrams according to the DSCP value.
-struct UdpardTx
-{
- /// Pointer to the node-ID of the local node, which is used to populate the source node-ID field of outgoing
- /// transfers.
- /// This is made a pointer to allow the user to easily change the node-ID after a plug-and-play node-ID allocation
- /// across multiple instances (remember there is a separate instance per redundant interface).
- /// The node-ID value should be set to UDPARD_NODE_ID_UNSET if the local node is anonymous
- /// (e.g., during PnP allocation or if no transmission is needed).
- const UdpardNodeID* local_node_id;
-
- /// The maximum number of UDP datagrams this instance is allowed to enqueue.
- /// An attempt to push more will fail with UDPARD_ERROR_CAPACITY.
- /// The purpose of this limitation is to ensure that a blocked queue does not exhaust the memory.
- size_t queue_capacity;
-
- /// The maximum number of Cyphal transfer payload bytes per UDP datagram.
- /// The Cyphal/UDP header and the final CRC are added to this value to obtain the total UDP datagram payload size.
- /// See UDPARD_MTU_*.
- /// The value can be changed arbitrarily at any time between enqueue operations.
- /// The value is constrained by the library to be positive.
- size_t mtu;
-
- /// The mapping from the Cyphal priority level in [0,7], where the highest priority is at index 0
- /// and the lowest priority is at the last element of the array, to the IP DSCP field value.
- /// See UdpardPriority.
- /// By default, the mapping is initialized per the recommendations given in the Cyphal/UDP specification.
- /// The value can be changed arbitrarily at any time between enqueue operations.
- uint_least8_t dscp_value_per_priority[UDPARD_PRIORITY_MAX + 1U];
-
- /// Refer to UdpardTxMemoryResources.
- struct UdpardTxMemoryResources memory;
-
- /// The number of frames that are currently contained in the queue, initially zero.
- /// READ-ONLY
- size_t queue_size;
-
- /// Internal use only.
- /// READ-ONLY
- struct UdpardTreeNode* root;
-};
+ uint_fast8_t iface_index; ///< The interface index on which the datagram is to be transmitted.
+ uint_fast8_t dscp; ///< Set the DSCP field of the outgoing UDP packet to this.
+
+ /// If the datagram pointer is retained by the application, udpard_tx_refcount_inc() must be invoked on it
+ /// to prevent it from being garbage collected. When no longer needed (e.g, upon transmission),
+ /// udpard_tx_refcount_dec() must be invoked to release the reference.
+ udpard_bytes_t datagram;
-/// One transport frame (UDP datagram) stored in the UdpardTx transmission queue along with its metadata.
-/// The datagram should be sent to the indicated UDP/IP endpoint with the specified DSCP value.
-/// The datagram should be discarded (transmission aborted) if the deadline has expired.
-/// All fields are READ-ONLY except the mutable `datagram_payload` field, which could be nullified to indicate
-/// a transfer of the payload memory ownership to somewhere else.
-struct UdpardTxItem
+ /// This is the same value that was passed to udpard_tx_push().
+ udpard_user_context_t user;
+} udpard_tx_ejection_t;
+
+/// Virtual function table for the TX pipeline, to be provided by the application.
+typedef struct udpard_tx_vtable_t
+{
+ /// Invoked from udpard_tx_poll() to push outgoing UDP datagrams into the socket/NIC driver.
+ /// It is GUARANTEED that ONLY udpard_tx_poll() can invoke this function; in particular, pushing new transfers
+ /// will not trigger ejection callbacks.
+ /// The callback must not mutate the TX pipeline (no udpard_tx_push/cancel/free).
+ ///
+ /// The destination endpoint is provided only for P2P transfers; for multicast transfers, the application
+ /// must compute the endpoint using udpard_make_subject_endpoint() based on the subject-ID. This is because
+ /// the subject-ID may be changed by the consensus algorithm at any time if a collision/divergence is detected.
+ /// The application is expected to rely on the user context to access the topic context for subject-ID derivation.
+ bool (*eject_subject)(udpard_tx_t*, udpard_tx_ejection_t*);
+ bool (*eject_p2p)(udpard_tx_t*, udpard_tx_ejection_t*, udpard_udpip_ep_t destination);
+} udpard_tx_vtable_t;
+
+/// The application must create a single instance of this struct to manage the TX pipeline.
+/// A single instance manages all redundant interfaces.
+struct udpard_tx_t
{
- /// Internal use only; do not access this field.
- struct UdpardTreeNode base;
-
- /// Points to the next frame in this transfer or NULL. This field is mostly intended for own needs of the library.
- /// Normally, the application would not use it because transfer frame ordering is orthogonal to global TX ordering.
- /// It can be useful though for pulling pending frames from the TX queue if at least one frame of their transfer
- /// failed to transmit; the idea is that if at least one frame is missing, the transfer will not be received by
- /// remote nodes anyway, so all its remaining frames can be dropped from the queue at once using udpardTxPop().
- struct UdpardTxItem* next_in_transfer;
-
- /// This is the same value that is passed to udpardTxPublish/Request/Respond.
- /// Frames whose transmission deadline is in the past should be dropped (transmission aborted).
- UdpardMicrosecond deadline_usec;
-
- /// The IP differentiated services code point (DSCP) is used to prioritize UDP frames on the network.
- /// LibUDPard selects the DSCP value based on the transfer priority level and the configured DSCP mapping.
- uint_least8_t dscp;
-
- /// Holds the original transfer priority level (before DSCP mapping, see above `dscp`).
- enum UdpardPriority priority;
-
- /// This UDP/IP datagram compiled by libudpard should be sent to this endpoint.
- /// The endpoint is always at a multicast address.
- struct UdpardUDPIPEndpoint destination;
-
- /// The completed UDP/IP datagram payload. This includes the Cyphal header as well as all required CRCs.
- /// It should be sent through the socket (or equivalent abstraction) verbatim.
- struct UdpardMutablePayload datagram_payload;
-
- /// This opaque pointer is assigned the value that is passed to udpardTxPublish/Request/Respond.
- /// The library itself does not make use of it but the application can use it to provide continuity between
- /// its high-level transfer objects and datagrams that originate from it.
- /// If not needed, the application can set it to NULL.
- void* user_transfer_reference;
+ const udpard_tx_vtable_t* vtable;
+
+ /// The globally unique identifier of the local node. Must not change after initialization.
+ uint64_t local_uid;
+
+ /// A random-initialized transfer-ID counter for all outgoing P2P transfers. Must not be changed by the application.
+ uint64_t p2p_transfer_id;
+
+ /// The maximum number of Cyphal transfer payload bytes per UDP datagram. See UDPARD_MTU_*.
+ /// The Cyphal/UDP header is added to this value to obtain the total UDP datagram payload size.
+ /// The value can be changed arbitrarily between enqueue operations as long as it is at least UDPARD_MTU_MIN.
+ ///
+ /// IMPORTANT: distinct MTU values increase tx memory usage and data copying.
+ /// If possible, it is recommended to use the same MTU for all interfaces, because the library will be
+ /// able to avoid frame duplication and instead reuse each frame across all interfaces.
+ size_t mtu[UDPARD_IFACE_COUNT_MAX];
+
+ /// This duration is used to derive the acknowledgment timeout for reliable transfers in tx_ack_timeout().
+ /// It must be a positive number of microseconds.
+ ///
+ /// The baseline timeout should be greater than the expected round-trip time (RTT) between the most distant
+ /// nodes in the network for a message at the highest priority level.
+ ///
+ /// A sensible default is provided at initialization, which can be overridden by the application.
+ udpard_us_t ack_baseline_timeout;
+
+ /// Optional user-managed mapping from the Cyphal priority level in [0,7] (highest priority at index 0)
+ /// to the IP DSCP field value for use by the application when transmitting. By default, all entries are zero.
+ uint_least8_t dscp_value_per_priority[UDPARD_PRIORITY_COUNT];
+
+ /// The maximum number of UDP datagrams irrespective of the transfer count, for all ifaces pooled.
+ /// The purpose of this limitation is to ensure that a blocked interface queue does not exhaust the memory.
+ /// When the limit is reached, the library will apply simple heuristics to choose which transfers to sacrifice.
+ size_t enqueued_frames_limit;
+
+ /// The number of frames that are currently registered in the queue, initially zero.
+ /// This includes frames that are handed over to the NIC driver for transmission that are not yet released
+ /// via udpard_tx_refcount_dec().
+ /// READ-ONLY!
+ size_t enqueued_frames_count;
+
+ udpard_tx_mem_resources_t memory;
+
+ /// Error counters incremented automatically when the corresponding error condition occurs.
+ /// These counters are never decremented by the library but they can be reset by the application if needed.
+ uint64_t errors_oom; ///< A transfer could not be enqueued due to OOM, while there was queue space available.
+ uint64_t errors_capacity; ///< A transfer could not be enqueued due to queue capacity limit.
+ uint64_t errors_sacrifice; ///< A transfer had to be sacrificed to make room for a new transfer.
+ uint64_t errors_expiration; ///< A transfer had to be dequeued due to deadline expiration.
+
+ /// Internal use only, do not modify! See tx_transfer_t for details.
+ udpard_list_t queue[UDPARD_IFACE_COUNT_MAX][UDPARD_PRIORITY_COUNT]; ///< Next to transmit at the tail.
+ udpard_list_t agewise; ///< Oldest at the tail.
+ udpard_tree_t* index_staged;
+ udpard_tree_t* index_deadline;
+ udpard_tree_t* index_transfer;
+ udpard_tree_t* index_transfer_ack;
+
+ /// Opaque pointer for the application use only. Not accessed by the library.
+ void* user;
};
-/// Construct a new transmission pipeline with the specified queue capacity and memory resource.
-/// Refer to the documentation for UdpardTx for more information.
-/// The other parameters will be initialized to the recommended defaults automatically,
-/// which can be changed later by modifying the struct fields directly.
-/// No memory allocation is going to take place until the pipeline is actually written to.
-///
-/// The instance does not hold any resources itself except for the allocated memory.
-/// To safely discard it, simply pop all enqueued frames from it.
-///
-/// The return value is zero on success, otherwise it is a negative error code.
-/// The time complexity is constant. This function does not invoke the dynamic memory manager.
-int_fast8_t udpardTxInit(struct UdpardTx* const self,
- const UdpardNodeID* const local_node_id,
- const size_t queue_capacity,
- const struct UdpardTxMemoryResources memory);
-
-/// This function serializes a message transfer into a sequence of UDP datagrams and inserts them into the prioritized
-/// transmission queue at the appropriate position. Afterwards, the application is supposed to take the enqueued frames
-/// from the transmission queue using the function udpardTxPeek and transmit them one by one. Each transmitted
-/// (or discarded, e.g., due to timeout) frame should be removed from the queue using udpardTxPop. The enqueued items
-/// are prioritized according to their Cyphal transfer priority to avoid the inner priority inversion. The transfer
-/// payload will be copied into the transmission queue so that the lifetime of the datagrams is not related to the
-/// lifetime of the input payload buffer.
-///
-/// The MTU of the generated datagrams is dependent on the value of the MTU setting at the time when this function
-/// is invoked. The MTU setting can be changed arbitrarily between invocations.
-///
-/// The transfer_id parameter will be used to populate the transfer_id field of the generated datagrams.
-/// The caller shall increment the transfer-ID counter after each successful invocation of this function
-/// per redundant interface; the same transfer published over redundant interfaces shall have the same transfer-ID.
-/// There shall be a separate transfer-ID counter per subject (topic).
-/// The lifetime of the transfer-ID counter must exceed the lifetime of the intent to publish on this subject (topic);
-/// one common approach is to use a static variable or a field in a type that contains the state of the publisher.
+/// The parameters are default-initialized (MTU defaults to UDPARD_MTU_DEFAULT and counters are reset)
+/// and can be changed later by modifying the struct fields directly. No memory allocation is going to take place
+/// until the first transfer is successfully pushed via udpard_tx_push().
+///
+/// The local UID should be a globally unique EUI-64 identifier assigned to the local node. It may be a random
+/// EUI-64, which is especially useful for short-lived software nodes.
+///
+/// The p2p_transfer_id_initial value must be chosen randomly such that it is likely to be distinct per application
+/// startup. See the transfer-ID counter requirements in udpard_tx_push() for details.
+///
+/// The enqueued_frames_limit should be large enough to accommodate the expected burstiness of the application traffic.
+/// If the limit is reached, the library will apply heuristics to sacrifice some older transfers to make room
+/// for the new one. This behavior allows the library to make progress even when some interfaces are stalled.
+///
+/// True on success, false if any of the arguments are invalid.
+bool udpard_tx_new(udpard_tx_t* const self,
+ const uint64_t local_uid,
+ const uint64_t p2p_transfer_id_initial,
+ const size_t enqueued_frames_limit,
+ const udpard_tx_mem_resources_t memory,
+ const udpard_tx_vtable_t* const vtable);
+
+/// Submit a transfer for transmission. The payload data will be copied into the transmission queue, so it can be
+/// invalidated immediately after this function returns. When redundant interfaces are used, the library will attempt to
+/// minimize the number of copies by reusing frames across interfaces with identical MTU values and memory resources.
+///
+/// The caller shall increment the transfer-ID counter after each successful invocation of this function per topic.
+/// There shall be a separate transfer-ID counter per topic. The initial value shall be chosen randomly
+/// such that it is likely to be distinct per application startup (embedded systems can use noinit memory sections,
+/// hash uninitialized SRAM, use timers or ADC noise, etc).
+/// Related thread on random transfer-ID init: https://forum.opencyphal.org/t/improve-the-transfer-id-timeout/2375
+///
+/// The user context value is carried through to the callbacks. It must contain enough context to allow subject-ID
+/// derivation inside udpard_tx_vtable_t::eject_subject(). For example, it may contain a pointer to the topic struct.
+///
+/// Returns true on success. Runtime failures increment the corresponding error counters,
+/// while invocations with invalid arguments just return zero without modifying the queue state.
+///
+/// The enqueued transfer will be emitted over all interfaces specified in the iface_bitmap.
+/// The subject-ID is computed inside the udpard_tx_vtable::eject_subject() callback at the time of transmission.
+/// The subject-ID cannot be computed beforehand at the time of enqueuing because the topic->subject consensus protocol
+/// may find a different subject-ID allocation between the time of enqueuing and the time of (re)transmission.
+///
+/// An attempt to push a transfer with a (topic hash, transfer-ID) pair that is already enqueued will fail,
+/// as that violates the transfer-ID uniqueness requirement stated above.
+///
+/// The feedback callback is set to NULL for best-effort (non-acknowledged) transfers. Otherwise, the transfer is
+/// treated as reliable, requesting a delivery acknowledgement from remote subscribers with repeated retransmissions if
+/// necessary; it is guaranteed that delivery attempts will cease no later than by the specified deadline.
+/// The feedback callback is ALWAYS invoked EXACTLY ONCE per reliable transfer pushed via udpard_tx_push() successfully,
+/// indicating the number of remote nodes that acknowledged the reception of the transfer.
+/// The retransmission delay is increased exponentially with each retransmission attempt as a means of congestion
+/// control and latency adaptation; please refer to udpard_tx_t::ack_baseline_timeout for details.
+///
+/// Beware that reliable delivery may cause message reordering. For example, when sending messages A and B,
+/// and A is lost on the first attempt, the next attempt may be scheduled after B is published,
+/// so that the remote sees B followed by A. Most applications tolerate it without issues; if this is not the case,
+/// the subscriber should use the ORDERED subscription mode (refer to the RX pipeline for details),
+/// which will reconstruct the original message ordering.
+///
+/// On success, the function allocates a single transfer state instance and a number of payload fragments.
+/// The time complexity is O(p + log e), where p is the transfer payload size, and e is the number of
+/// transfers already enqueued in the transmission queue.
+bool udpard_tx_push(udpard_tx_t* const self,
+ const udpard_us_t now,
+ const udpard_us_t deadline,
+ const uint16_t iface_bitmap,
+ const udpard_prio_t priority,
+ const uint64_t topic_hash,
+ const uint64_t transfer_id,
+ const udpard_bytes_scattered_t payload,
+ void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), // NULL if best-effort.
+ const udpard_user_context_t user);
+
+/// This is a specialization of the general push function for P2P transfers.
+/// P2P transfers treat the topic hash as the destination node's UID.
+/// The transfer-ID counter is shared for all P2P outgoing P2P transfers and is managed automatically.
+/// If out_transfer_id is not NULL, the assigned internal transfer-ID is stored there for use with udpard_tx_cancel_p2p.
+bool udpard_tx_push_p2p(udpard_tx_t* const self,
+ const udpard_us_t now,
+ const udpard_us_t deadline,
+ const udpard_prio_t priority,
+ const udpard_remote_t remote, // Endpoints may be invalid for some ifaces.
+ const udpard_bytes_scattered_t payload,
+ void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), // NULL if best-effort.
+ const udpard_user_context_t user,
+ uint64_t* const out_transfer_id);
+
+/// This should be invoked whenever the socket/NIC of this queue becomes ready to accept new datagrams for transmission.
+/// It is fine to also invoke it periodically unconditionally to drive the transmission process.
+/// Internally, the function will query the scheduler for the next frame to be transmitted and will attempt
+/// to submit it via the eject() callback provided in the vtable.
+/// The iface bitmap indicates which interfaces are currently ready to accept new datagrams.
+/// The function may deallocate memory. The time complexity is logarithmic in the number of enqueued transfers.
+void udpard_tx_poll(udpard_tx_t* const self, const udpard_us_t now, const uint16_t iface_bitmap);
+
+/// Cancel a previously enqueued transfer.
+/// To cancel a P2P transfer, pass the destination node's UID as the topic_hash.
+/// If provided, the feedback callback will be invoked with success==false.
+/// Not safe to call from the eject() callback.
+/// Returns true if a transfer was found and cancelled, false if no such transfer was found.
+/// The complexity is O(log t + f), where t is the number of enqueued transfers,
+/// and f is the number of frames in the transfer.
+/// The function will free the memory associated with the transfer.
+bool udpard_tx_cancel(udpard_tx_t* const self, const uint64_t topic_hash, const uint64_t transfer_id);
+
+/// Like udpard_tx_cancel(), but cancels all transfers matching the given topic hash.
+/// Returns the number of matched transfers.
+/// This is important to invoke when destroying a topic to ensure no dangling callbacks remain.
+size_t udpard_tx_cancel_all(udpard_tx_t* const self, const uint64_t topic_hash);
+
+/// Returns a bitmap of interfaces that have pending transmissions. This is useful for IO multiplexing loops.
+/// Zero indicates that there are no pending transmissions.
+/// Which interfaces are usable is defined by the remote endpoints provided when pushing transfers.
+uint16_t udpard_tx_pending_ifaces(const udpard_tx_t* const self);
+
+/// When a datagram is ejected and the application opts to keep it, these functions must be used to manage the
+/// datagram buffer lifetime. The datagram will be freed once the reference count reaches zero.
+void udpard_tx_refcount_inc(const udpard_bytes_t tx_payload_view);
+void udpard_tx_refcount_dec(const udpard_bytes_t tx_payload_view);
+
+/// Drops all enqueued items; afterward, the instance is safe to discard. Reliable transfer callbacks are still invoked.
+void udpard_tx_free(udpard_tx_t* const self);
+
+// =====================================================================================================================
+// ================================================= RX PIPELINE =================================================
+// =====================================================================================================================
+
+/// The reception (RX) pipeline is used to subscribe to subjects and to receive P2P transfers.
+/// The reception pipeline is highly robust and is able to accept datagrams with arbitrary MTU distinct per interface,
+/// delivered out-of-order (OOO) with duplication and arbitrary interleaving between transfers.
+/// All redundant interfaces are pooled together into a single fragment stream per RX port,
+/// thus providing seamless failover and great resilience against packet loss on any of the interfaces.
+/// The RX pipeline operates at the speed/latency of the best-performing interface at any given time.
///
-/// The user_transfer_reference is an opaque pointer that will be assigned to the user_transfer_reference field of
-/// each enqueued item. The library itself does not use or check this value in any way, so it can be NULL if not needed.
+/// The application should instantiate one RX port instance per subject it needs to receive messages from,
+/// irrespective of the number of redundant interfaces. There needs to be one socket (or a similar abstraction
+/// provided by the underlying UDP/IP stack) per RX port instance per redundant interface,
+/// each socket bound to the same UDP/IP endpoint (IP address and UDP port) obtained using udpard_make_subject_endpoint.
+/// The application needs to listen to all these sockets simultaneously and pass the received UDP datagrams to the
+/// corresponding RX port instance as they arrive.
///
-/// The deadline_usec value will be used to populate the eponymous field of the generated datagrams
-/// (all will share the same deadline value).
-/// This feature is intended to allow aborting frames that could not be transmitted before the specified deadline;
-/// therefore, normally, the timestamp value should be in the future.
-/// The library itself, however, does not use or check this value in any way, so it can be zero if not needed
-/// (this is not recommended for real-time systems).
+/// P2P transfers are handled in a similar way, except that the topic hash is replaced with the destination node's UID,
+/// and the UDP/IP endpoints are unicast addresses instead of multicast addresses.
///
-/// Note that due to the priority ordering, transient transfer loss may occur if the user increases the priority
-/// level on a given port. This is because the frames of the new transfer will be enqueued before the frames of
-/// the previous transfer, so the frames of the previous transfer will be transmitted only after the frames of
-/// the new transfer are transmitted, causing the receiver to discard them as duplicates due to their lower transfer-ID.
-/// To avoid this, it is necessary to wait for all frames originating from the port to be delivered before increasing
-/// the priority level on the port. The "user_transfer_reference" may help here as it allows the user to establish
-/// traceability from enqueued transfer frames (datagrams) back to the port they originate from.
+/// Graphically, the subscription pipeline is arranged per port as shown below.
+/// Remember that the application with N RX ports would have N such pipelines, one per port.
///
-/// The function returns the number of UDP datagrams enqueued, which is always a positive number, in case of success.
-/// In case of failure, the function returns a negated error code.
+/// REDUNDANT INTERFACE A ---> UDP SOCKET ---+
+/// |
+/// REDUNDANT INTERFACE B ---> UDP SOCKET ---+---> udpard_rx_port_t ---> TRANSFERS
+/// |
+/// ... ---+
///
-/// UDPARD_ERROR_ARGUMENT may be returned in the following cases:
-/// - Any of the input arguments except user_transfer_reference are NULL.
-/// - The priority or the port-ID exceed their respective maximums.
-/// - The payload pointer is NULL while the payload size is nonzero.
+/// The transfer reassembly state machine can operate in several modes described below. First, a brief summary:
///
-/// UDPARD_ERROR_ANONYMOUS is returned if local node is anonymous (the local node-ID is unset) and
-/// the transfer payload cannot fit into a single datagram (a multi-frame transfer is required).
+/// Mode Guarantees Limitations Reordering window
+/// -----------------------------------------−------------------------------------------------------------------
+/// ORDERED Strictly increasing transfer-ID May delay transfers, CPU heavier Non-negative microseconds
+/// UNORDERED Unique transfer-ID Ordering not guaranteed Ignored
+/// STATELESS Constant time, constant memory 1-frame only, dups, no responses Ignored
///
-/// UDPARD_ERROR_MEMORY is returned if a TX frame could not be allocated due to the memory being exhausted.
-/// UDPARD_ERROR_CAPACITY is returned if the capacity of the queue would be exceeded by this operation.
-/// In such cases, all frames allocated for this transfer (if any) will be deallocated automatically.
-/// In other words, either all frames of the transfer are enqueued successfully, or none are.
+/// If not sure, choose unordered. The ordered mode is a good fit for ordering-sensitive use cases like state
+/// estimators and control loops, but it is not suitable for P2P.
+/// The stateless mode is chiefly intended for the heartbeat topic.
///
-/// The memory allocation requirement is two allocations per datagram:
-/// a single-frame transfer takes two allocations; a multi-frame transfer of N frames takes N*2 allocations.
-/// In each pair of allocations:
-/// - the first allocation is for `UdpardTxItem`; the size is `sizeof(UdpardTxItem)`;
-/// the TX queue `memory.fragment` memory resource is used for this allocation (and later for deallocation);
-/// - the second allocation is for payload storage (the frame data) - size is normally MTU but could be less for
-/// the last frame of the transfer; the TX queue `memory.payload` memory resource is used for this allocation.
+/// ORDERED
///
-/// The time complexity is O(p + log e), where p is the amount of payload in the transfer, and e is the number of
-/// frames already enqueued in the transmission queue.
-int32_t udpardTxPublish(struct UdpardTx* const self,
- const UdpardMicrosecond deadline_usec,
- const enum UdpardPriority priority,
- const UdpardPortID subject_id,
- const UdpardTransferID transfer_id,
- const struct UdpardPayload payload,
- void* const user_transfer_reference);
-
-/// This is similar to udpardTxPublish except that it is intended for service request transfers.
-/// It takes the node-ID of the server that is intended to receive the request.
+/// Each transfer is received at most once. The sequence of transfers delivered (ejected)
+/// to the application is STRICTLY INCREASING (with possible gaps in case of loss).
///
-/// The transfer_id parameter will be used to populate the transfer_id field of the generated datagrams.
-/// The caller shall increment the transfer-ID counter after each successful invocation of this function
-/// per redundant interface; the same transfer published over redundant interfaces shall have the same transfer-ID.
-/// There shall be a separate transfer-ID counter per pair of (service-ID, server node-ID).
-/// The lifetime of the transfer-ID counter must exceed the lifetime of the intent to invoke this service
-/// on this server node; one common approach is to use a static array or a struct field indexed by
-/// the server node-ID per service-ID (memory-constrained applications may choose a more compact container;
-/// e.g., a list or an AVL tree).
+/// The reassembler may hold completed transfers for a brief time if they arrive out-of-order,
+/// hoping for the earlier missing transfers to show up, such that they are not permanently lost.
+/// For example, a sequence 1 2 4 3 5 will be delivered as 1 2 3 4 5 if 3 arrives shortly after 4;
+/// however, if 3 does not arrive within the configured reordering window,
+/// the application will receive 1 2 4 5, and transfer 3 will be permanently lost even if it arrives later
+/// because accepting it without violating the strictly increasing transfer-ID constraint is not possible.
///
-/// Additional error conditions:
-/// - UDPARD_ERROR_ARGUMENT if the server node-ID value is invalid.
-/// - UDPARD_ERROR_ANONYMOUS if the local node is anonymous (the local node-ID is unset).
+/// This mode requires much more bookkeeping which results in a greater processing load per received fragment/transfer.
///
-/// Other considerations are the same as for udpardTxPublish.
-int32_t udpardTxRequest(struct UdpardTx* const self,
- const UdpardMicrosecond deadline_usec,
- const enum UdpardPriority priority,
- const UdpardPortID service_id,
- const UdpardNodeID server_node_id,
- const UdpardTransferID transfer_id,
- const struct UdpardPayload payload,
- void* const user_transfer_reference);
-
-/// This is similar to udpardTxRequest except that it takes the node-ID of the client instead of server.
-/// The transfer-ID must be the same as that of the corresponding RPC-request transfer;
-/// this is to allow the client to match responses with their requests.
-int32_t udpardTxRespond(struct UdpardTx* const self,
- const UdpardMicrosecond deadline_usec,
- const enum UdpardPriority priority,
- const UdpardPortID service_id,
- const UdpardNodeID client_node_id,
- const UdpardTransferID transfer_id,
- const struct UdpardPayload payload,
- void* const user_transfer_reference);
-
-/// This function accesses the enqueued UDP datagram scheduled for transmission next. The queue itself is not modified
-/// (i.e., the accessed element is not removed). The application should invoke this function to collect the datagrams
-/// enqueued by udpardTxPublish/Request/Respond whenever the socket (or equivalent abstraction) becomes writable.
+/// Zero is not really a special case for the reordering window; it simply means that out-of-order transfers
+/// are not waited for at all (declared permanently lost immediately), and no received transfer is delayed
+/// before ejection to the application.
///
-/// The timestamp values of the enqueued items are initialized with deadline_usec from udpardTxPublish/Request/Respond.
-/// The timestamps are used to specify the transmission deadline. It is up to the application and/or the socket layer
-/// to implement the discardment of timed-out datagrams. The library does not check it, so a frame that is
-/// already timed out may be returned here.
+/// The ORDERED mode is mostly intended for applications like state estimators, control systems, and data streaming
+/// where ordering is critical.
///
-/// If the queue is empty or if the argument is NULL, the returned value is NULL.
+/// UNORDERED
///
-/// If the queue is non-empty, the returned value is a pointer to its top element (i.e., the next item to transmit).
-/// The returned pointer points to an object allocated in the dynamic storage; it should be eventually freed by the
-/// application by calling `udpardTxFree`. The memory shall not be freed before the item is removed
-/// from the queue by calling udpardTxPop; this is because until udpardTxPop is executed, the library retains
-/// ownership of the item. The pointer retains validity until explicitly freed by the application; in other words,
-/// calling udpardTxPop does not invalidate the object.
+/// Each transfer is ejected immediately upon successful reassembly. Ordering is not enforced,
+/// but duplicates are still removed. For example, a sequence 1 2 4 3 5 will be delivered as-is without delay.
///
-/// Calling functions that modify the queue may cause the next invocation to return a different pointer.
+/// This mode does not reject nor delay transfers arriving late, making it the desired choice for applications
+/// where all transfers need to be received no matter the order. This is in particular useful for request-response
+/// topics, where late arrivals occur not only due to network conditions but also due to the inherent
+/// asynchrony between requests and responses. For example, node A could publish messages X and Y on subject S,
+/// while node B could respond to X only after receiving Y, thus causing the response to X to arrive late with
+/// respect to Y. This would cause the ORDERED mode to delay or drop the response to X, which is undesirable;
+/// therefore, the UNORDERED mode is preferred for request-response topics.
///
-/// The payload buffer is allocated in the dynamic storage of the queue. The application may transfer ownership of
-/// the payload to a different application component (f.e. to transmission media) by copying the pointer and then
-/// (if the ownership transfer was accepted) by nullifying `datagram_payload` fields of the frame (`size` & `data`).
-/// If these fields stay with their original values, the `udpardTxFree` (after proper `udpardTxPop` of course) will
-/// deallocate the payload buffer. In any case, the payload has to be eventually deallocated by using the TX queue
-/// `memory.payload` memory resource. It will be automatically done by the `udpardTxFree` (if the payload still
-/// stays in the item), OR if moved, it is the responsibility of the application to eventually (f.e. at the end of
-/// transmission) deallocate the memory with the TX queue `memory.payload` memory resource.
-/// Note that the mentioned above nullification of the `datagram_payload` fields is the
-/// only reason why a returned TX item pointer is mutable. It was constant in the past (before v2),
-/// but it was changed to be mutable to allow the payload ownership transfer.
+/// The unordered mode should be the default mode for most use cases.
///
-/// The time complexity is logarithmic of the queue size. This function does not invoke the dynamic memory manager.
-struct UdpardTxItem* udpardTxPeek(const struct UdpardTx* const self);
-
-/// This function transfers the ownership of the specified item of the prioritized transmission queue from the queue
-/// to the application. The item does not necessarily need to be the top one -- it is safe to dequeue any item.
-/// The item is dequeued but not invalidated; it is the responsibility of the application to deallocate its memory
-/// later. The memory SHALL NOT be deallocated UNTIL this function is invoked (use `udpardTxFree` helper).
-/// The function returns the same pointer that it is given except that it becomes mutable.
+/// STATELESS
///
-/// If any of the arguments are NULL, the function has no effect and returns NULL.
+/// Only single-frame transfers are accepted (where the entire payload fits into a single datagram,
+/// or the extent does not exceed the MTU). No attempt to enforce ordering or remove duplicates is made.
+/// The return path is only discovered for the one interface that delivered the transfer.
+/// Transfers arriving from N interfaces are duplicated N times.
///
-/// The time complexity is logarithmic of the queue size. This function does not invoke the dynamic memory manager.
-struct UdpardTxItem* udpardTxPop(struct UdpardTx* const self, struct UdpardTxItem* const item);
+/// The stateless mode allocates only a fragment header per accepted frame and does not contain any
+/// variable-complexity processing logic, enabling great scalability for topics with a very large number of
+/// publishers where unordered and duplicated messages are acceptable, such as the heartbeat topic.
-/// This is a simple helper that frees the memory allocated for the item and its payload,
-/// using the correct sizes and memory resources.
-/// If the item argument is NULL, the function has no effect. The time complexity is constant.
-/// If the item frame payload is NULL then it is assumed that the payload buffer was already freed,
-/// or moved to a different owner (f.e. to media layer).
-void udpardTxFree(const struct UdpardTxMemoryResources memory, struct UdpardTxItem* const item);
+/// The application will have a single RX instance to manage all subscriptions and P2P ports.
+typedef struct udpard_rx_t
+{
+ udpard_list_t list_session_by_animation; ///< Oldest at the tail.
+ udpard_tree_t* index_session_by_reordering; ///< Earliest reordering window closure on the left.
-// =====================================================================================================================
-// ================================================= RX PIPELINE =================================================
-// =====================================================================================================================
+ uint64_t errors_oom; ///< A frame could not be processed (transfer possibly dropped) due to OOM.
+ uint64_t errors_frame_malformed; ///< A received frame was malformed and thus dropped.
+ uint64_t errors_transfer_malformed; ///< A transfer could not be reassembled correctly.
-/// This type represents an open input port, such as a subscription to a subject (topic), a service server port
-/// that accepts RPC-service requests, or a service client port that accepts RPC-service responses.
-///
-/// The library performs transfer reassembly, deduplication, and integrity checks, along with the management of
-/// redundant network interfaces.
-struct UdpardRxPort
+ /// Incremented when an ack cannot be enqueued (including when tx is NULL).
+ /// If tx is available, inspect its error counters for details.
+ uint64_t errors_ack_tx;
+
+ /// The transmission pipeline is needed to manage ack transmission and removal of acknowledged transfers.
+ /// If the application wants to only listen, the pointer may be NULL (no acks will be sent).
+ /// When initializing the library, the TX instance needs to be created first.
+ udpard_tx_t* tx;
+
+ void* user; ///< Opaque pointer for the application use only. Not accessed by the library.
+} udpard_rx_t;
+
+/// These are used to serve the memory needs of the library to keep state while reassembling incoming transfers.
+/// Several memory resources are provided to enable fine control over the allocated memory if necessary; however,
+/// simple applications may choose to use the same memory resource implemented via malloc()/free() for all of them.
+typedef struct udpard_rx_mem_resources_t
+{
+ /// Provides memory for rx_session_t described below.
+ /// Each instance is fixed-size, so a trivial zero-fragmentation block allocator is sufficient.
+ udpard_mem_t session;
+
+ /// The udpard_fragment_t handles are allocated per payload fragment; each contains a pointer to its fragment.
+ /// Each instance is of a very small fixed size, so a trivial zero-fragmentation block allocator is sufficient.
+ udpard_mem_t fragment;
+} udpard_rx_mem_resources_t;
+
+typedef struct udpard_rx_port_t udpard_rx_port_t;
+typedef struct udpard_rx_transfer_t udpard_rx_transfer_t;
+
+/// RX port mode for transfer reassembly behavior.
+typedef enum udpard_rx_mode_t
+{
+ udpard_rx_unordered = 0,
+ udpard_rx_ordered = 1,
+ udpard_rx_stateless = 2,
+} udpard_rx_mode_t;
+
+/// Provided by the application per port instance to specify the callbacks to be invoked on certain events.
+/// This design allows distinct callbacks per port, which is especially useful for the P2P port.
+typedef struct udpard_rx_port_vtable_t
{
- /// The maximum payload size that can be accepted at this port.
- /// The rest will be truncated away following the implicit truncation rule defined in the Cyphal specification.
- /// READ-ONLY
+ /// A new message is received on a port. The handler takes ownership of the payload; it must free it after use.
+ void (*on_message)(udpard_rx_t*, udpard_rx_port_t*, udpard_rx_transfer_t);
+
+ /// A topic hash collision is detected on a port.
+ /// On P2P ports, this indicates that the destination UID doesn't match the local UID (misaddressed message);
+ /// safe to ignore.
+ /// May be NULL if the application is not interested.
+ void (*on_collision)(udpard_rx_t*, udpard_rx_port_t*, udpard_remote_t);
+} udpard_rx_port_vtable_t;
+
+/// This type represents an open input port, such as a subscription to a topic.
+struct udpard_rx_port_t
+{
+ /// Mismatch will be filtered out and the collision notification callback invoked.
+ /// For P2P ports, this is the destination node's UID (i.e., the local node's UID).
+ uint64_t topic_hash;
+
+ /// Transfer payloads exceeding this extent may be truncated.
+ /// The total size of the received payload may still exceed this extent setting by some small margin.
size_t extent;
- /// Refer to the Cyphal specification for the description of the transfer-ID timeout.
- /// By default, this is set to UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC and it can be changed by the user.
- /// This field can be adjusted at runtime arbitrarily; e.g., this is useful to implement adaptive timeouts.
- UdpardMicrosecond transfer_id_timeout_usec;
+ /// Behavior undefined if the reassembly mode or the reordering window are switched on a live port.
+ udpard_rx_mode_t mode;
+ udpard_us_t reordering_window;
- /// Libudpard creates a new session instance per remote node-ID that emits transfers matching this port.
+ udpard_rx_mem_resources_t memory;
+
+ /// Libudpard creates a new session instance per remote UID that emits transfers matching this port.
/// For example, if the local node is subscribed to a certain subject and there are X nodes publishing
/// transfers on that subject, then there will be X sessions created for that subject.
- /// Same applies to RPC-services as well.
- ///
- /// Once a session is created, it is never freed again until the port that owns it (this structure) is destroyed.
- /// This is in line with the assumption that the network configuration is usually mostly static, and that
- /// once a node has started emitting data on a certain port, it is likely to continue doing so.
- /// Applications where this is not the case may consider cycling their ports periodically
- /// by destroying and re-creating them immediately.
///
- /// Each session instance takes sizeof(UdpardInternalRxSession) bytes of dynamic memory for itself,
- /// which is at most 512 bytes on wide-word platforms (on small word size platforms it is usually much smaller).
+ /// Each session instance takes sizeof(rx_session_t) bytes of dynamic memory for itself.
/// On top of that, each session instance holds memory for the transfer payload fragments and small fixed-size
- /// metadata objects called "fragment handles" (at most 128 bytes large, usually much smaller,
- /// depending on the pointer width and the word size), one handle per fragment.
+ /// metadata objects of type udpard_fragment_t, one handle per fragment.
///
/// The transfer payload memory is not allocated by the library but rather moved from the application
- /// when the corresponding UDP datagram is received. If the library chooses to keep the frame payload
- /// (which is the case if the frame is not a duplicate, the frame sequence is valid, and the received payload
- /// does not exceed the extent configured for the port), a new fragment handle is allocated and it takes ownership
- /// of the entire datagram payload (including all overheads such as the Cyphal/UDP frame header and possible
- /// data that spills over the configured extent value for this port).
+ /// when the corresponding UDP datagram is received. If the library chooses to keep the frame payload,
+ /// a new fragment handle is allocated and it takes ownership of the entire datagram payload.
/// If the library does not need the datagram to reassemble the transfer, its payload buffer is freed immediately.
/// There is a 1-to-1 correspondence between the fragment handles and the payload fragments.
/// Remote nodes that emit highly fragmented transfers cause a higher memory utilization in the local node
/// because of the increased number of fragment handles and per-datagram overheads.
///
- /// In the worst case, the library may keep up to two full transfer payloads in memory at the same time
- /// (two transfer states are kept to allow acceptance of interleaved frames).
- ///
/// Ultimately, the worst-case memory consumption is dependent on the configured extent and the transmitting
/// side's MTU, as these parameters affect the number of payload buffers retained in memory.
///
/// The maximum memory consumption is when there is a large number of nodes emitting data such that each node
- /// begins a multi-frame transfer while never completing it.
- ///
- /// Everything stated above holds for service transfers as well.
+ /// begins a multi-frame transfer while never completing it. The library mitigates this by pruning stale
+ /// transfers and removing sessions that have been inactive for a long time.
///
/// If the dynamic memory pool(s) is(are) sized correctly, and all transmitting nodes are known to avoid excessive
- /// fragmentation of egress transfers (which can be ensured by not using MTU values smaller than the default),
+ /// fragmentation of egress transfers (which can be ensured by avoiding small MTU values),
/// the application is guaranteed to never encounter an out-of-memory (OOM) error at runtime.
/// High-integrity applications can optionally police ingress traffic for MTU violations and filter it before
/// passing it to the library; alternatively, applications could limit memory consumption per port,
/// which is easy to implement since each port gets a dedicated set of memory resources.
- ///
- /// READ-ONLY
- struct UdpardInternalRxSession* sessions;
-};
-
-/// The set of memory resources is used per an RX pipeline instance such as subscription or a service dispatcher.
-/// These are used to serve the memory needs of the library to keep state while reassembling incoming transfers.
-/// Several memory resources are provided to enable fine control over the allocated memory;
-/// simple applications may choose to use the same memory resource implemented via malloc()/free() for all of them.
-struct UdpardRxMemoryResources
-{
- /// The session memory resource is used to provide memory for the session instances described above.
- /// Each instance is fixed-size, so a trivial zero-fragmentation block allocator is sufficient.
- struct UdpardMemoryResource session;
+ udpard_tree_t* index_session_by_remote_uid;
- /// The fragment handles are allocated per payload fragment; each handle contains a pointer to its fragment.
- /// Each instance is of a very small fixed size, so a trivial zero-fragmentation block allocator is sufficient.
- struct UdpardMemoryResource fragment;
+ const udpard_rx_port_vtable_t* vtable;
+ const struct udpard_rx_port_vtable_private_t* vtable_private;
- /// The library never allocates payload buffers itself, as they are handed over by the application via
- /// udpardRx*Receive. Once a buffer is handed over, the library may choose to keep it if it is deemed to be
- /// necessary to complete a transfer reassembly, or to discard it if it is deemed to be unnecessary.
- /// Discarded payload buffers are freed using this object.
- struct UdpardMemoryDeleter payload;
+ /// Opaque pointer for the application use only. Not accessed by the library.
+ void* user;
};
/// Represents a received Cyphal transfer.
-/// The payload is owned by this instance, so the application must free it after use; see udpardRxTransferFree.
-struct UdpardRxTransfer
+/// The payload is owned by this instance, so the application must free it after use using udpard_fragment_free_all()
+/// together with the port's fragment memory resource.
+struct udpard_rx_transfer_t
{
- UdpardMicrosecond timestamp_usec;
- enum UdpardPriority priority;
- UdpardNodeID source_node_id;
- UdpardTransferID transfer_id;
+ udpard_us_t timestamp;
+ udpard_prio_t priority;
+ uint64_t transfer_id;
+ udpard_remote_t remote;
/// The total size of the payload available to the application, in bytes, is provided for convenience;
/// it is the sum of the sizes of all its fragments. For example, if the sender emitted a transfer of 2000
/// bytes split into two frames, 1408 bytes in the first frame and 592 bytes in the second frame,
- /// then the payload_size will be 2000 and the payload buffer will contain two fragments of 1408 and 592 bytes.
- /// The transfer CRC is not included here. If the received payload exceeds the configured extent,
- /// the excess payload will be discarded and the payload_size will be set to the extent.
+ /// then the payload_size_stored will be 2000 and the payload buffer will contain two fragments of 1408 and
+ /// 592 bytes. If the received payload exceeds the configured extent, fragments starting past the extent are
+ /// dropped but fragments crossing it are kept, so payload_size_stored may exceed the extent.
///
/// The application is given ownership of the payload buffer, so it is required to free it after use;
/// this requires freeing both the handles and the payload buffers they point to.
@@ -791,324 +806,101 @@ struct UdpardRxTransfer
/// the application is responsible for freeing them using the correct memory resource.
///
/// If the payload is empty, the corresponding buffer pointers may be NULL.
- size_t payload_size;
- struct UdpardFragment payload;
-};
-
-/// This is, essentially, a helper that frees the memory allocated for the payload and its fragment headers
-/// using the correct memory resources. The application can do the same thing manually if it has access to the
-/// required context to compute the size, or if the memory resource implementation does not require deallocation size.
-///
-/// The head of the fragment list is passed by value so it is not freed. This is in line with the UdpardRxTransfer
-/// design, where the head is stored by value to reduce indirection in small transfers. We call it Scott's Head.
-///
-/// If any of the arguments are NULL, the function has no effect.
-void udpardRxFragmentFree(const struct UdpardFragment head,
- const struct UdpardMemoryResource memory_fragment,
- const struct UdpardMemoryDeleter memory_payload);
+ size_t payload_size_stored;
-// --------------------------------------------- SUBJECTS ---------------------------------------------
+ /// The original size of the transfer payload before extent-based dropping, in bytes.
+ /// This may exceed the stored payload if fragments beyond the extent were skipped. Cannot be less than
+ /// payload_size_stored.
+ size_t payload_size_wire;
-/// This is a specialization of a port for subject (topic) subscriptions.
-///
-/// In Cyphal/UDP, each subject (topic) has a specific IP multicast group address associated with it.
-/// This address is contained in the field named "udp_ip_endpoint".
-/// The application is expected to open a separate socket bound to that endpoint per redundant interface,
-/// and then feed the UDP datagrams received from these sockets into udpardRxSubscriptionReceive,
-/// collecting UdpardRxTransfer instances at the output.
-///
-/// Observe that the subscription pipeline is entirely independent of the node-ID of the local node.
-/// This is by design, allowing nodes to listen to subjects without having to be present online.
-struct UdpardRxSubscription
-{
- /// See UdpardRxPort.
- /// Use this to change the transfer-ID timeout value for this subscription.
- struct UdpardRxPort port;
+ /// The payload is stored in a tree of fragments ordered by their offset within the payload.
+ /// See udpard_fragment_t and its helper functions for managing the fragment tree.
+ udpard_fragment_t* payload;
+};
- /// The IP multicast group address and the UDP port number where UDP/IP datagrams matching this Cyphal
- /// subject will be sent by the publishers (remote nodes).
- /// READ-ONLY
- struct UdpardUDPIPEndpoint udp_ip_endpoint;
+/// The RX instance holds no resources and can be destroyed at any time by simply freeing all its ports first
+/// using udpard_rx_port_free(), then discarding the instance itself. The self pointer must not be NULL.
+/// The TX instance must be initialized beforehand, unless the application wants to only listen,
+/// in which case it may be NULL.
+void udpard_rx_new(udpard_rx_t* const self, udpard_tx_t* const tx);
- /// Refer to UdpardRxMemoryResources.
- struct UdpardRxMemoryResources memory;
-};
+/// Must be invoked at least every few milliseconds (more often is fine) to purge timed-out sessions and eject
+/// received transfers when the reordering window expires. If this is invoked simultaneously with rx subscription
+/// reception, then this function should ideally be invoked after the reception handling.
+/// The time complexity is logarithmic in the number of living sessions.
+void udpard_rx_poll(udpard_rx_t* const self, const udpard_us_t now);
/// To subscribe to a subject, the application should do this:
+/// 1. Create a new udpard_rx_port_t instance using udpard_rx_port_new().
+/// 2. Per redundant network interface:
+/// - Create a new RX socket bound to the IP multicast group address and UDP port number returned by
+/// udpard_make_subject_endpoint() for the desired subject-ID.
+/// For P2P transfer ports use ordinary unicast sockets.
+/// 3. Read data from the sockets continuously and forward each datagram to udpard_rx_port_push(),
+/// along with the index of the redundant interface the datagram was received on.
///
-/// 1. Create a new UdpardRxSubscription instance.
-///
-/// 2. Initialize it by calling udpardRxSubscriptionInit. The subject-ID and port-ID are synonymous here.
-///
-/// 3. Per redundant network interface:
-/// - Create a new socket bound to the IP multicast group address and UDP port number specified in the
-/// udp_ip_endpoint field of the initialized subscription instance. The library will determine the
-/// endpoint to use based on the subject-ID.
-///
-/// 4. Read data from the sockets continuously and forward each received UDP datagram to
-/// udpardRxSubscriptionReceive, along with the index of the redundant interface the datagram was received on.
+/// For P2P ports, the procedure is identical, except that the topic hash is set to the local node's UID.
+/// There must be exactly one P2P port per node. The P2P port is also used for acks.
///
/// The extent defines the maximum possible size of received objects, considering also possible future data type
-/// versions with new fields. It is safe to pick larger values.
-/// Note well that the extent is not the same thing as the maximum size of the object, it is usually larger!
-/// Transfers that carry payloads that exceed the specified extent will be accepted anyway but the excess payload
-/// will be truncated away, as mandated by the Specification. The transfer CRC is always validated regardless of
-/// whether its payload is truncated.
+/// versions with new fields. It is safe to pick larger values. Note well that the extent is not the same thing as
+/// the maximum size of the object, it is usually larger! Transfers that carry payloads beyond the specified extent
+/// still keep fragments that start before the extent, so the delivered payload may exceed it; fragments starting past
+/// the limit are dropped.
///
-/// By default, the transfer-ID timeout value is set to UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC.
-/// It can be changed by the user at any time by modifying the corresponding field in the subscription instance.
+/// The topic hash is needed to detect and ignore transfers that use different topics on the same subject-ID.
+/// The collision callback is invoked if a topic hash collision is detected.
///
-/// The return value is 0 on success.
-/// The return value is a negated UDPARD_ERROR_ARGUMENT if any of the input arguments are invalid.
+/// If not sure which reassembly mode to choose, consider `udpard_rx_unordered` as the default choice.
+/// For ordering-sensitive use cases, such as state estimators and control loops, use `udpard_rx_ordered` with a short
+/// window.
///
-/// The time complexity is constant. This function does not invoke the dynamic memory manager.
-int_fast8_t udpardRxSubscriptionInit(struct UdpardRxSubscription* const self,
- const UdpardPortID subject_id,
- const size_t extent,
- const struct UdpardRxMemoryResources memory);
-
-/// Frees all memory held by the subscription instance.
-/// After invoking this function, the instance is no longer usable.
-/// The function has no effect if the instance is NULL.
-/// Do not forget to close the sockets that were opened for this subscription.
-void udpardRxSubscriptionFree(struct UdpardRxSubscription* const self);
-
-/// Datagrams received from the sockets of this subscription are fed into this function.
+/// The pointed-to vtable instance must outlive the port instance.
///
-/// The timestamp value indicates the arrival time of the datagram; the arrival time of the earliest datagram of
-/// a transfer becomes the transfer timestamp upon successful reassembly.
-/// This value is also used for the transfer-ID timeout management.
-/// Usually, naive software timestamping is adequate for these purposes, but some applications may require
+/// The return value is true on success, false if any of the arguments are invalid.
+/// The time complexity is constant. This function does not invoke the dynamic memory manager.
+bool udpard_rx_port_new(udpard_rx_port_t* const self,
+ const uint64_t topic_hash, // For P2P ports, this is the local node's UID.
+ const size_t extent,
+ const udpard_rx_mode_t mode,
+ const udpard_us_t reordering_window,
+ const udpard_rx_mem_resources_t memory,
+ const udpard_rx_port_vtable_t* const vtable);
+
+/// Returns all memory allocated for the sessions, slots, fragments, etc of the given port.
+/// Does not free the port itself since it is allocated by the application rather than the library,
+/// and does not alter the RX instance aside from unlinking the port from it.
+/// It is safe to invoke this at any time, but the port instance shall not be used again unless re-initialized.
+/// The function has no effect if any of the arguments are NULL.
+void udpard_rx_port_free(udpard_rx_t* const rx, udpard_rx_port_t* const port);
+
+/// The timestamp value indicates the arrival time of the datagram and shall be non-negative.
+/// Often, naive software timestamping is adequate for these purposes, but some applications may require
/// a greater accuracy (e.g., for time synchronization).
///
-/// The redundant interface index shall not exceed UDPARD_NETWORK_INTERFACE_COUNT_MAX.
-///
/// The function takes ownership of the passed datagram payload buffer. The library will either store it as a
/// fragment of the reassembled transfer payload or free it using the corresponding memory resource
-/// (see UdpardRxMemoryResources) if the datagram is not needed for reassembly. Because of the ownership transfer,
-/// the datagram payload buffer has to be mutable (non-const).
-/// One exception is that if the "self" pointer is invalid, the library will be unable to process or free the datagram,
-/// which may lead to a memory leak in the application; hence, the caller should always check that the "self" pointer
-/// is always valid.
-///
-/// The accepted datagram may either be invalid, carry a non-final part of a multi-frame transfer,
-/// carry a final part of a valid multi-frame transfer, or carry a valid single-frame transfer.
-/// The last two cases are said to complete a transfer.
-///
-/// If the datagram completes a transfer, the out_transfer argument is filled with the transfer details
-/// and the return value is one.
-/// The caller is assigned ownership of the transfer payload buffer memory; it has to be freed after use as described
-/// in the documentation for UdpardRxTransfer.
-/// The memory pointed to by out_transfer may be mutated arbitrarily if no transfer is completed.
-///
-/// If the datagram does not complete a transfer or is malformed, the function returns zero and the out_transfer
-/// is not modified. Observe that malformed frames are not treated as errors, as the local application is not
-/// responsible for the behavior of external agents producing the datagrams.
-///
-/// The function invokes the dynamic memory manager in the following cases only (refer to UdpardRxPort for details):
-///
-/// 1. A new session state instance is allocated when a new session is initiated.
-///
-/// 2. A new transfer fragment handle is allocated when a new transfer fragment is accepted.
-///
-/// 3. Allocated objects may occasionally be deallocated at the discretion of the library.
-/// This behavior does not increase the worst case execution time and does not improve the worst case memory
-/// consumption, so a deterministic application need not consider this behavior in its resource analysis.
-/// This behavior is implemented for the benefit of applications where rigorous characterization is unnecessary.
-///
-/// The time complexity is O(log n) where n is the number of remote notes publishing on this subject (topic).
-/// No data copy takes place. Malformed frames are discarded in constant time.
-/// Linear time is spent on the CRC verification of the transfer payload when the transfer is complete.
-///
-/// UDPARD_ERROR_MEMORY is returned if the function fails to allocate memory.
-/// UDPARD_ERROR_ARGUMENT is returned if any of the input arguments are invalid.
-int_fast8_t udpardRxSubscriptionReceive(struct UdpardRxSubscription* const self,
- const UdpardMicrosecond timestamp_usec,
- const struct UdpardMutablePayload datagram_payload,
- const uint_fast8_t redundant_iface_index,
- struct UdpardRxTransfer* const out_transfer);
-
-// --------------------------------------------- RPC-SERVICES ---------------------------------------------
-
-/// An RPC-service RX port models the interest of the application in receiving RPC-service transfers of
-/// a particular kind (request or response) and a particular service-ID.
-struct UdpardRxRPCPort
-{
- /// READ-ONLY
- struct UdpardTreeNode base;
-
- /// READ-ONLY
- UdpardPortID service_id;
-
- /// See UdpardRxPort.
- /// Use this to change the transfer-ID timeout value for this RPC-service port.
- struct UdpardRxPort port;
-
- /// This field can be arbitrarily mutated by the user. It is never accessed by the library.
- /// Its purpose is to simplify integration with OOP interfaces.
- void* user_reference;
-};
-
-/// A service dispatcher is a collection of RPC-service RX ports.
-/// Anonymous nodes (nodes without a node-ID of their own) cannot use RPC-services.
-struct UdpardRxRPCDispatcher
-{
- /// The local node-ID has to be stored to facilitate correctness checking of incoming transfers.
- /// This value shall not be modified.
- /// READ-ONLY
- UdpardNodeID local_node_id;
-
- /// Refer to UdpardRxMemoryResources.
- struct UdpardRxMemoryResources memory;
-
- /// READ-ONLY
- struct UdpardTreeNode* request_ports;
- struct UdpardTreeNode* response_ports;
-};
-
-/// Represents a received Cyphal RPC-service transfer -- either request or response.
-struct UdpardRxRPCTransfer
-{
- struct UdpardRxTransfer base;
- UdpardPortID service_id;
- bool is_request;
-};
-
-/// To begin receiving RPC-service requests and/or responses, the application should do this:
-///
-/// 1. Create a new UdpardRxRPCDispatcher instance and initialize it by calling udpardRxRPCDispatcherInit.
-///
-/// 2. Announce its interest in specific RPC-services (requests and/or responses) by calling
-/// udpardRxRPCDispatcherListen per each. This can be done at any later point as well.
-///
-/// 3. When the local node-ID is known, invoke udpardRxRPCDispatcherStart to inform the library of the
-/// node-ID value of the local node, and at the same time obtain the address of the UDP/IP multicast group
-/// to bind the socket(s) to. This step can be taken before or after the RPC-service port registration.
-/// If the application has to perform a plug-and-play node-ID allocation, it has to complete that beforehand
-/// (the dispatcher is not needed for PnP node-ID allocation).
-///
-/// 4. Having obtained the UDP/IP endpoint in the previous step, do per redundant network interface:
-/// - Create a new socket bound to the IP multicast group address and UDP port number obtained earlier.
-/// The multicast group address depends on the local node-ID.
-///
-/// 5. Read data from the sockets continuously and forward each received UDP datagram to
-/// udpardRxRPCDispatcherReceive, along with the index of the redundant interface
-/// the datagram was received on. Only those services that were announced in step 3 will be processed.
-///
-/// The reason the local node-ID has to be specified via a separate call is to allow the application to set up the
-/// RPC ports early, without having to be aware of its own node-ID. This is useful for applications that perform
-/// plug-and-play node-ID allocation. Applications where PnP is not needed will simply call both functions
-/// at the same time during early initialization.
-///
-/// There is no resource deallocation function ("free") for the RPC dispatcher. This is because the dispatcher
-/// does not own any resources. To dispose of a dispatcher safely, the application shall invoke
-/// udpardRxRPCDispatcherCancel for each RPC-service port on that dispatcher.
-///
-/// The return value is 0 on success.
-/// The return value is a negated UDPARD_ERROR_ARGUMENT if any of the input arguments are invalid.
-///
-/// The time complexity is constant. This function does not invoke the dynamic memory manager.
-int_fast8_t udpardRxRPCDispatcherInit(struct UdpardRxRPCDispatcher* const self,
- const struct UdpardRxMemoryResources memory);
-
-/// This function must be called exactly once to complete the initialization of the RPC dispatcher.
-/// It takes the node-ID of the local node, which is used to derive the UDP/IP multicast group address
-/// to bind the sockets to, which is returned via the out parameter.
-///
-/// In Cyphal/UDP, each node has a specific IP multicast group address where RPC-service transfers destined to that
-/// node are sent to. This is similar to subject (topic) multicast group addressed except that the node-ID takes
-/// the place of the subject-ID. The IP multicast group address is derived from the local node-ID.
-///
-/// The application is expected to open a separate socket bound to that endpoint per redundant interface,
-/// and then feed the UDP datagrams received from these sockets into udpardRxRPCDispatcherReceive,
-/// collecting UdpardRxRPCTransfer instances at the output.
-///
-/// This function shall not be called more than once per dispatcher. If the local node needs to change its node-ID,
-/// this dispatcher instance must be destroyed and a new one created instead.
-///
-/// The return value is 0 on success.
-/// The return value is a negated UDPARD_ERROR_ARGUMENT if any of the input arguments are invalid.
-///
-/// The time complexity is constant. This function does not invoke the dynamic memory manager.
-int_fast8_t udpardRxRPCDispatcherStart(struct UdpardRxRPCDispatcher* const self,
- const UdpardNodeID local_node_id,
- struct UdpardUDPIPEndpoint* const out_udp_ip_endpoint);
-
-/// This function lets the application register its interest in a particular service-ID and kind (request/response)
-/// by creating an RPC-service RX port. The port pointer shall retain validity until its unregistration or until
-/// the dispatcher is destroyed. The service instance shall not be moved or destroyed.
-///
-/// If such registration already exists, it will be unregistered first as if udpardRxRPCDispatcherCancel was
-/// invoked by the application, and then re-created anew with the new parameters.
-///
-/// For the meaning of extent, please refer to the documentation of the subscription pipeline.
-///
-/// By default, the transfer-ID timeout value is set to UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC.
-/// It can be changed by the user at any time by modifying the corresponding field in the registration instance.
-///
-/// The return value is 1 if a new registration has been created as requested.
-/// The return value is 0 if such registration existed at the time the function was invoked. In this case,
-/// the existing registration is terminated and then a new one is created in its place. Pending transfers may be lost.
-/// The return value is a negated UDPARD_ERROR_ARGUMENT if any of the input arguments are invalid.
-///
-/// The time complexity is logarithmic from the number of current registrations under the specified transfer kind
-/// (request or response).
-/// This function does not allocate new memory. The function may deallocate memory if such registration already
-/// existed; the deallocation behavior is specified in the documentation for udpardRxRPCDispatcherCancel.
-int_fast8_t udpardRxRPCDispatcherListen(struct UdpardRxRPCDispatcher* const self,
- struct UdpardRxRPCPort* const port,
- const UdpardPortID service_id,
- const bool is_request,
- const size_t extent);
-
-/// This function reverses the effect of udpardRxRPCDispatcherListen.
-/// If the registration is found, all its memory is de-allocated (session states and payload buffers).
-/// Please refer to the UdpardRxPort session description for detailed information on the amount of memory freed.
-///
-/// The return value is 1 if such registration existed (and, therefore, it was removed).
-/// The return value is 0 if such registration does not exist. In this case, the function has no effect.
-/// The return value is a negated UDPARD_ERROR_ARGUMENT if any of the input arguments are invalid.
-///
-/// The time complexity is logarithmic from the number of current registration under the specified transfer kind.
-/// This function does not allocate new memory.
-int_fast8_t udpardRxRPCDispatcherCancel(struct UdpardRxRPCDispatcher* const self,
- const UdpardPortID service_id,
- const bool is_request);
-
-/// Datagrams received from the sockets of this RPC service dispatcher are fed into this function.
-/// It is the analog of udpardRxSubscriptionReceive for RPC-service transfers.
-/// Please refer to the documentation of udpardRxSubscriptionReceive for the usage information.
-///
-/// Frames (datagrams) that belong to transfers for which there is no active RX RPC port are ignored.
-///
-/// The "out_port" pointer-to-pointer can be used to retrieve the specific UdpardRxRPCPort instance that was used to
-/// process the received transfer. Remember that each UdpardRxRPCPort instance has a user reference field,
-/// which in combination with this feature can be used to construct OOP interfaces on top of the library.
-/// If this is not needed, the pointer-to-pointer can be NULL.
-///
-/// The memory pointed to by out_transfer may be mutated arbitrarily if no transfer is completed.
-int_fast8_t udpardRxRPCDispatcherReceive(struct UdpardRxRPCDispatcher* const self,
- const UdpardMicrosecond timestamp_usec,
- const struct UdpardMutablePayload datagram_payload,
- const uint_fast8_t redundant_iface_index,
- struct UdpardRxRPCPort** const out_port,
- struct UdpardRxRPCTransfer* const out_transfer);
-
-// =====================================================================================================================
-// ==================================================== MISC =====================================================
-// =====================================================================================================================
-
-/// This helper function takes the head of a fragmented buffer list and copies the data into the contiguous buffer
-/// provided by the user. If the total size of all fragments combined exceeds the size of the user-provided buffer,
-/// copying will stop early after the buffer is filled, thus truncating the fragmented data short.
-///
-/// The source list is not modified. Do not forget to free its memory afterward if it was dynamically allocated.
-///
-/// The function has no effect and returns zero if the destination buffer is NULL.
-/// The data pointers in the fragment list shall be valid, otherwise the behavior is undefined.
-///
-/// Returns the number of bytes copied into the contiguous destination buffer.
-size_t udpardGather(const struct UdpardFragment head, const size_t destination_size_bytes, void* const destination);
+/// (see udpard_rx_mem_resources_t) if the datagram is not needed for reassembly. Because of the ownership transfer,
+/// the datagram payload buffer has to be mutable (non-const). The ownership transfer does not take place if
+/// any of the arguments are invalid; the function returns false in that case and the caller must clean up.
+///
+/// The function invokes the dynamic memory manager in the following cases only (refer to udpard_rx_port_t):
+/// 1. A new session state instance is allocated when a new session is initiated.
+/// 2. A new transfer fragment handle is allocated when a new transfer fragment is accepted.
+/// 3. Allocated objects may occasionally be deallocated to clean up stale transfers and sessions.
+///
+/// The time complexity is O(log n + log k) where n is the number of remote nodes publishing on this subject,
+/// and k is the number of fragments retained in memory for the corresponding in-progress transfer.
+/// No data copying takes place.
+///
+/// Returns false if any of the arguments are invalid.
+bool udpard_rx_port_push(udpard_rx_t* const rx,
+ udpard_rx_port_t* const port,
+ const udpard_us_t timestamp,
+ const udpard_udpip_ep_t source_ep,
+ const udpard_bytes_mut_t datagram_payload,
+ const udpard_deleter_t payload_deleter,
+ const uint_fast8_t iface_index);
#ifdef __cplusplus
}
diff --git a/tests/.clang-tidy b/tests/.clang-tidy
index 657d99d..942b2b5 100644
--- a/tests/.clang-tidy
+++ b/tests/.clang-tidy
@@ -40,12 +40,22 @@ Checks: >-
-*-no-malloc,
-cert-msc30-c,
-cert-msc50-cpp,
- -modernize-macro-to-enum,
+ -*-macro-to-enum,
-modernize-use-trailing-return-type,
+ -*-macro-usage,
+ -*-enum-size,
+ -*-use-using,
-cppcoreguidelines-owning-memory,
-misc-include-cleaner,
-performance-avoid-endl,
-cppcoreguidelines-avoid-do-while,
+ -*DeprecatedOrUnsafeBufferHandling,
+ -*-prefer-static-over-anonymous-namespace,
+ -*-pro-bounds-avoid-unchecked-container-access,
+ -*-array*decay,
+ -*-avoid-c-arrays,
+ -*-casting-through-void,
+ -*-named-parameter,
WarningsAsErrors: '*'
HeaderFilterRegex: '.*\.hpp'
FormatStyle: file
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index bc0f063..0e8f0b4 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -15,10 +15,13 @@ enable_testing()
set(CTEST_OUTPUT_ON_FAILURE ON)
set(NO_STATIC_ANALYSIS OFF CACHE BOOL "disable udpard static analysis")
+set(ENABLE_COVERAGE OFF CACHE BOOL "enable code coverage measurement")
set(library_dir "${CMAKE_SOURCE_DIR}/libudpard")
set(unity_root "${CMAKE_SOURCE_DIR}/submodules/unity")
+include_directories(SYSTEM ${CMAKE_SOURCE_DIR}/lib/cavl)
+
# Use -DNO_STATIC_ANALYSIS=1 to suppress static analysis.
# If not suppressed, the tools used here shall be available, otherwise the build will fail.
if (NOT NO_STATIC_ANALYSIS)
@@ -48,6 +51,14 @@ function(gen_test name files compile_definitions compile_flags link_flags c_stan
target_include_directories(${name} PUBLIC ${library_dir})
target_compile_definitions(${name} PUBLIC ${compile_definitions})
target_link_libraries(${name} "${name}_unity")
+
+ # Apply coverage flags if coverage is enabled
+ if (ENABLE_COVERAGE)
+ target_compile_options(${name} PRIVATE --coverage -fprofile-arcs -ftest-coverage)
+ target_link_options(${name} PRIVATE --coverage -fprofile-arcs)
+ target_compile_definitions(${name} PRIVATE NDEBUG=1) # Remove assertion checks as they interfere with coverage
+ endif()
+
set_target_properties(
${name}
PROPERTIES
@@ -69,15 +80,61 @@ function(gen_test_matrix name files)
gen_test("${name}_x32_c11" "${files}" "" "-m32" "-m32" "11")
endfunction()
+function(gen_test_single name files) # When the full matrix is not needed, to keep pipelines fast.
+ gen_test("${name}" "${files}" "" "-m32" "-m32" "11")
+endfunction()
+
# Add the test targets.
# Those that are written in C may #include to reach its internals; they are called "intrusive".
# The public interface tests may be written in C++ for convenience.
gen_test_matrix(test_helpers "src/test_helpers.c")
-gen_test_matrix(test_cavl "src/test_cavl.cpp")
-gen_test_matrix(test_tx "${library_dir}/udpard.c;src/test_tx.cpp")
-gen_test_matrix(test_rx "${library_dir}/udpard.c;src/test_rx.cpp")
-gen_test_matrix(test_e2e "${library_dir}/udpard.c;src/test_e2e.cpp")
-gen_test_matrix(test_misc "${library_dir}/udpard.c;src/test_misc.cpp")
-gen_test_matrix(test_intrusive_crc "src/test_intrusive_crc.c")
+gen_test_matrix(test_intrusive_header "src/test_intrusive_header.c")
+gen_test_matrix(test_intrusive_misc "src/test_intrusive_misc.c")
gen_test_matrix(test_intrusive_tx "src/test_intrusive_tx.c")
gen_test_matrix(test_intrusive_rx "src/test_intrusive_rx.c")
+gen_test_matrix(test_intrusive_guards "src/test_intrusive_guards.c")
+gen_test_matrix(test_fragment "src/test_fragment.cpp;${library_dir}/udpard.c")
+gen_test_single(test_e2e_random "src/test_e2e_random.cpp;${library_dir}/udpard.c")
+gen_test_single(test_e2e_edge "src/test_e2e_edge.cpp;${library_dir}/udpard.c")
+gen_test_single(test_e2e_api "src/test_e2e_api.cpp;${library_dir}/udpard.c")
+gen_test_single(test_e2e_responses "src/test_e2e_responses.cpp;${library_dir}/udpard.c")
+gen_test_single(test_e2e_reliable_ordered "src/test_e2e_reliable_ordered.cpp;${library_dir}/udpard.c")
+gen_test_single(test_integration_sockets "src/test_integration_sockets.cpp;${library_dir}/udpard.c")
+
+# Coverage targets. Usage:
+# cmake -DENABLE_COVERAGE=ON ..
+# make -j16 && make test && make coverage
+# xdg-open coverage-html/index.html
+if (ENABLE_COVERAGE)
+ find_program(LCOV_PATH lcov REQUIRED)
+ find_program(GENHTML_PATH genhtml REQUIRED)
+
+ # Target to reset coverage counters
+ add_custom_target(coverage-reset
+ COMMAND ${LCOV_PATH} --zerocounters --directory .
+ COMMAND ${LCOV_PATH} --capture --initial --directory . --output-file coverage-base.info
+ --rc lcov_branch_coverage=1
+ WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
+ COMMENT "Resetting coverage counters"
+ )
+
+ # Target to generate coverage report
+ add_custom_target(coverage
+ COMMAND ${LCOV_PATH} --capture --directory . --output-file coverage-total.info --rc lcov_branch_coverage=1
+ COMMAND ${LCOV_PATH} --extract coverage-total.info '*/libudpard/udpard.c' --output-file coverage-udpard.info
+ --rc lcov_branch_coverage=1
+
+ COMMAND ${CMAKE_COMMAND} -E echo ""
+ COMMAND ${CMAKE_COMMAND} -E echo "=== 🔬 COVERAGE SUMMARY BEGIN 📐 ==="
+ COMMAND ${LCOV_PATH} --list coverage-udpard.info --rc lcov_branch_coverage=1
+ COMMAND ${CMAKE_COMMAND} -E echo "==== ⬆️ COVERAGE SUMMARY END ⬆️ ===="
+ COMMAND ${CMAKE_COMMAND} -E echo ""
+
+ COMMAND ${GENHTML_PATH} coverage-udpard.info --output-directory coverage-html --title "libudpard coverage"
+ --legend --demangle-cpp --branch-coverage
+ COMMAND ${CMAKE_COMMAND} -E echo "Coverage report: file://${CMAKE_BINARY_DIR}/coverage-html/index.html"
+
+ WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
+ COMMENT "Generating coverage HTML report"
+ )
+endif()
diff --git a/tests/src/helpers.h b/tests/src/helpers.h
index f4e035f..60ee34a 100644
--- a/tests/src/helpers.h
+++ b/tests/src/helpers.h
@@ -2,62 +2,82 @@
// Copyright (c) 2016 Cyphal Development Team.
/// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// ReSharper disable CppRedundantInlineSpecifier
+// NOLINTBEGIN(*-unchecked-string-to-number-conversion,*-deprecated-headers,*-designated-initializers,*-loop-convert)
+// NOLINTBEGIN(*DeprecatedOrUnsafeBufferHandling,*err34-c,*-vararg,*-use-auto,*-use-nullptr,*-redundant-void-arg)
+// NOLINTBEGIN(*-cstyle-cast)
#pragma once
-#include // Shall always be included first.
+#include // Shall always be included first.
#include
#include
-#include
#include
#if !(defined(UDPARD_VERSION_MAJOR) && defined(UDPARD_VERSION_MINOR))
-# error "Library version not defined"
+#error "Library version not defined"
#endif
-#if !(defined(UDPARD_CYPHAL_SPECIFICATION_VERSION_MAJOR) && defined(UDPARD_CYPHAL_SPECIFICATION_VERSION_MINOR))
-# error "Cyphal specification version not defined"
+#if !(defined(UDPARD_CYPHAL_VERSION_MAJOR) && defined(UDPARD_CYPHAL_VERSION_MINOR))
+#error "Cyphal specification version not defined"
#endif
// This is only needed to tell static analyzers that the code that follows is not C++.
#ifdef __cplusplus
-extern "C" {
+extern "C"
+{
#endif
-#define TEST_PANIC(message) \
- do \
- { \
- (void) fprintf(stderr, "%s:%u: PANIC: %s\n", __FILE__, (unsigned) __LINE__, message); \
- (void) fflush(stderr); \
- abort(); \
+#define TEST_PANIC(message) \
+ do { \
+ (void)fprintf(stderr, "%s:%u: PANIC: %s\n", __FILE__, (unsigned)__LINE__, message); \
+ (void)fflush(stderr); \
+ abort(); \
} while (0)
#define TEST_PANIC_UNLESS(condition) \
- do \
- { \
- if (!(condition)) \
- { \
+ do { \
+ if (!(condition)) { \
TEST_PANIC(#condition); \
} \
} while (0)
-static inline void* dummyAllocatorAllocate(void* const user_reference, const size_t size)
+static inline void* dummy_alloc(void* const user, const size_t size)
{
- (void) user_reference;
- (void) size;
+ (void)user;
+ (void)size;
return NULL;
}
-static inline void dummyAllocatorDeallocate(void* const user_reference, const size_t size, void* const pointer)
+static inline void dummy_free(void* const user, const size_t size, void* const pointer)
{
- (void) user_reference;
- (void) size;
+ (void)user;
+ (void)size;
TEST_PANIC_UNLESS(pointer == NULL);
}
+// Single-fragment scatter helper.
+static inline udpard_bytes_scattered_t make_scattered(const void* const data, const size_t size)
+{
+ udpard_bytes_scattered_t out;
+ out.bytes.size = size;
+ out.bytes.data = data;
+ out.next = NULL;
+ return out;
+}
+
+// Wraps an application pointer for user context plumbing.
+static inline udpard_user_context_t make_user_context(void* const obj)
+{
+ udpard_user_context_t out = UDPARD_USER_CONTEXT_NULL;
+ out.ptr[0] = obj;
+ return out;
+}
+
/// The instrumented allocator tracks memory consumption, checks for heap corruption, and can be configured to fail
/// allocations above a certain threshold.
#define INSTRUMENTED_ALLOCATOR_CANARY_SIZE 1024U
typedef struct
{
+ /// Each allocator has its own canary, to catch an attempt to free memory allocated by a different allocator.
uint_least8_t canary[INSTRUMENTED_ALLOCATOR_CANARY_SIZE];
/// The limit can be changed at any moment to control the maximum amount of memory that can be allocated.
/// It may be set to a value less than the currently allocated amount.
@@ -66,31 +86,34 @@ typedef struct
/// The current state of the allocator.
size_t allocated_fragments;
size_t allocated_bytes;
-} InstrumentedAllocator;
+ /// Event counters.
+ uint64_t count_alloc;
+ uint64_t count_free;
+} instrumented_allocator_t;
-static inline void* instrumentedAllocatorAllocate(void* const user_reference, const size_t size)
+static inline void* instrumented_allocator_alloc(void* const user_reference, const size_t size)
{
- InstrumentedAllocator* const self = (InstrumentedAllocator*) user_reference;
- void* result = NULL;
- if ((size > 0U) && //
- ((self->allocated_bytes + size) <= self->limit_bytes) && //
- ((self->allocated_fragments + 1U) <= self->limit_fragments))
- {
- const size_t size_with_canaries = size + ((size_t) INSTRUMENTED_ALLOCATOR_CANARY_SIZE * 2U);
+ instrumented_allocator_t* const self = (instrumented_allocator_t*)user_reference;
+ void* result = NULL; // NOLINT(*-const-correctness)
+ self->count_alloc++;
+ if ((size > 0U) && //
+ ((self->allocated_bytes + size) <= self->limit_bytes) && //
+ ((self->allocated_fragments + 1U) <= self->limit_fragments)) {
+ const size_t size_with_canaries = size + ((size_t)INSTRUMENTED_ALLOCATOR_CANARY_SIZE * 2U);
void* origin = malloc(size_with_canaries);
TEST_PANIC_UNLESS(origin != NULL);
- *((size_t*) origin) = size;
- uint_least8_t* p = ((uint_least8_t*) origin) + sizeof(size_t);
- result = ((uint_least8_t*) origin) + INSTRUMENTED_ALLOCATOR_CANARY_SIZE;
- for (size_t i = sizeof(size_t); i < INSTRUMENTED_ALLOCATOR_CANARY_SIZE; i++) // Fill the front canary.
+ *((size_t*)origin) = size;
+ uint_least8_t* p = ((uint_least8_t*)origin) + sizeof(size_t); // NOLINT(*-const-correctness)
+ result = ((uint_least8_t*)origin) + INSTRUMENTED_ALLOCATOR_CANARY_SIZE;
+ for (size_t i = sizeof(size_t); i < INSTRUMENTED_ALLOCATOR_CANARY_SIZE; i++) // Fill the front canary.
{
*p++ = self->canary[i];
}
- for (size_t i = 0; i < size; i++) // Randomize the allocated fragment.
+ for (size_t i = 0; i < size; i++) // Randomize the allocated fragment.
{
- *p++ = (uint_least8_t) (rand() % (UINT_LEAST8_MAX + 1));
+ *p++ = (uint_least8_t)(rand() % (UINT_LEAST8_MAX + 1));
}
- for (size_t i = 0; i < INSTRUMENTED_ALLOCATOR_CANARY_SIZE; i++) // Fill the back canary.
+ for (size_t i = 0; i < INSTRUMENTED_ALLOCATOR_CANARY_SIZE; i++) // Fill the back canary.
{
*p++ = self->canary[i];
}
@@ -100,25 +123,25 @@ static inline void* instrumentedAllocatorAllocate(void* const user_reference, co
return result;
}
-static inline void instrumentedAllocatorDeallocate(void* const user_reference, const size_t size, void* const pointer)
+static inline void instrumented_allocator_free(void* const user_reference, const size_t size, void* const pointer)
{
- InstrumentedAllocator* const self = (InstrumentedAllocator*) user_reference;
- if (pointer != NULL)
- {
- uint_least8_t* p = ((uint_least8_t*) pointer) - INSTRUMENTED_ALLOCATOR_CANARY_SIZE;
+ instrumented_allocator_t* const self = (instrumented_allocator_t*)user_reference;
+ self->count_free++;
+ if (pointer != NULL) { // NOLINTNEXTLINE(*-const-correctness)
+ uint_least8_t* p = ((uint_least8_t*)pointer) - INSTRUMENTED_ALLOCATOR_CANARY_SIZE;
void* const origin = p;
- const size_t true_size = *((const size_t*) origin);
+ const size_t true_size = *((const size_t*)origin);
TEST_PANIC_UNLESS(size == true_size);
p += sizeof(size_t);
- for (size_t i = sizeof(size_t); i < INSTRUMENTED_ALLOCATOR_CANARY_SIZE; i++) // Check the front canary.
+ for (size_t i = sizeof(size_t); i < INSTRUMENTED_ALLOCATOR_CANARY_SIZE; i++) // Check the front canary.
{
TEST_PANIC_UNLESS(*p++ == self->canary[i]);
}
- for (size_t i = 0; i < size; i++) // Destroy the returned memory to prevent use-after-free.
+ for (size_t i = 0; i < size; i++) // Destroy the returned memory to prevent use-after-free.
{
- *p++ = (uint_least8_t) (rand() % (UINT_LEAST8_MAX + 1));
+ *p++ = (uint_least8_t)(rand() % (UINT_LEAST8_MAX + 1));
}
- for (size_t i = 0; i < INSTRUMENTED_ALLOCATOR_CANARY_SIZE; i++) // Check the back canary.
+ for (size_t i = 0; i < INSTRUMENTED_ALLOCATOR_CANARY_SIZE; i++) // Check the back canary.
{
TEST_PANIC_UNLESS(*p++ == self->canary[i]);
}
@@ -131,46 +154,77 @@ static inline void instrumentedAllocatorDeallocate(void* const user_reference, c
}
/// By default, the limit is unrestricted (set to the maximum possible value).
-static inline void instrumentedAllocatorNew(InstrumentedAllocator* const self)
+static inline void instrumented_allocator_new(instrumented_allocator_t* const self)
{
- for (size_t i = 0; i < INSTRUMENTED_ALLOCATOR_CANARY_SIZE; i++)
- {
- self->canary[i] = (uint_least8_t) (rand() % (UINT_LEAST8_MAX + 1));
+ for (size_t i = 0; i < INSTRUMENTED_ALLOCATOR_CANARY_SIZE; i++) {
+ self->canary[i] = (uint_least8_t)(rand() % (UINT_LEAST8_MAX + 1));
}
self->limit_fragments = SIZE_MAX;
self->limit_bytes = SIZE_MAX;
self->allocated_fragments = 0U;
self->allocated_bytes = 0U;
+ self->count_alloc = 0U;
+ self->count_free = 0U;
}
-static inline struct UdpardMemoryResource instrumentedAllocatorMakeMemoryResource(
- const InstrumentedAllocator* const self)
+/// Resets the counters and generates a new canary.
+/// Will crash if there are outstanding allocations.
+static inline void instrumented_allocator_reset(instrumented_allocator_t* const self)
{
- const struct UdpardMemoryResource out = {.user_reference = (void*) self,
- .deallocate = &instrumentedAllocatorDeallocate,
- .allocate = &instrumentedAllocatorAllocate};
- return out;
+ TEST_PANIC_UNLESS(self->allocated_fragments == 0U);
+ TEST_PANIC_UNLESS(self->allocated_bytes == 0U);
+ instrumented_allocator_new(self);
}
-static inline struct UdpardMemoryDeleter instrumentedAllocatorMakeMemoryDeleter(const InstrumentedAllocator* const self)
+// Shared vtable for instrumented allocators.
+static const udpard_mem_vtable_t instrumented_allocator_vtable = {
+ .base = { .free = instrumented_allocator_free },
+ .alloc = instrumented_allocator_alloc,
+};
+
+static inline udpard_mem_t instrumented_allocator_make_resource(const instrumented_allocator_t* const self)
{
- const struct UdpardMemoryDeleter out = {.user_reference = (void*) self,
- .deallocate = &instrumentedAllocatorDeallocate};
- return out;
+ const udpard_mem_t result = { .vtable = &instrumented_allocator_vtable, .context = (void*)self };
+ return result;
}
-static inline void seedRandomNumberGenerator(void)
+static inline udpard_deleter_t instrumented_allocator_make_deleter(const instrumented_allocator_t* const self)
{
- unsigned seed = (unsigned) time(NULL);
+ const udpard_deleter_t result = { .vtable = &instrumented_allocator_vtable.base, .context = (void*)self };
+ return result;
+}
+
+// Shortcuts for vtable-based memory access.
+static inline void* mem_res_alloc(const udpard_mem_t mem, const size_t size)
+{
+ return mem.vtable->alloc(mem.context, size);
+}
+
+static inline void mem_res_free(const udpard_mem_t mem, const size_t size, void* const ptr)
+{
+ mem.vtable->base.free(mem.context, size, ptr);
+}
+
+static inline void mem_del_free(const udpard_deleter_t del, const size_t size, void* const ptr)
+{
+ del.vtable->free(del.context, size, ptr);
+}
+
+static inline void seed_prng(void)
+{
+ unsigned seed = (unsigned)time(NULL);
const char* const env_var = getenv("RANDOM_SEED");
- if (env_var != NULL)
- {
- seed = (unsigned) atoll(env_var); // Conversion errors are possible but ignored.
+ if (env_var != NULL) {
+ seed = (unsigned)atoll(env_var); // Conversion errors are possible but ignored.
}
srand(seed);
- (void) fprintf(stderr, "RANDOM_SEED=%u\n", seed);
+ (void)fprintf(stderr, "export RANDOM_SEED=%u\n", seed);
}
#ifdef __cplusplus
}
#endif
+
+// NOLINTEND(*-cstyle-cast)
+// NOLINTEND(*DeprecatedOrUnsafeBufferHandling,*err34-c,*-vararg,*-use-auto,*-use-nullptr,*-redundant-void-arg)
+// NOLINTEND(*-unchecked-string-to-number-conversion,*-deprecated-headers,*-designated-initializers,*-loop-convert)
diff --git a/tests/src/hexdump.hpp b/tests/src/hexdump.hpp
deleted file mode 100644
index d27bc83..0000000
--- a/tests/src/hexdump.hpp
+++ /dev/null
@@ -1,83 +0,0 @@
-/// This software is distributed under the terms of the MIT License.
-/// Copyright (C) OpenCyphal Development Team
-/// Copyright Amazon.com Inc. or its affiliates.
-/// SPDX-License-Identifier: MIT
-/// Author: Pavel Kirienko
-
-#include
-#include
-#include
-#include
-
-namespace hexdump
-{
-using Byte = std::uint_least8_t;
-
-template
-[[nodiscard]] std::string hexdump(InputIterator begin, const InputIterator end)
-{
- static_assert(BytesPerRow > 0);
- static constexpr std::pair PrintableASCIIRange{32, 126};
- std::uint32_t offset = 0;
- std::ostringstream output;
- bool first = true;
- output << std::hex << std::setfill('0');
- do
- {
- if (first)
- {
- first = false;
- }
- else
- {
- output << "\n";
- }
- output << std::setw(8) << offset << " ";
- offset += BytesPerRow;
- auto it = begin;
- for (Byte i = 0; i < BytesPerRow; ++i)
- {
- if (i == 8)
- {
- output << ' ';
- }
- if (it != end)
- {
- output << std::setw(2) << static_cast(*it) << ' ';
- ++it;
- }
- else
- {
- output << " ";
- }
- }
- output << " ";
- for (Byte i = 0; i < BytesPerRow; ++i)
- {
- if (begin != end)
- {
- output << (((static_cast(*begin) >= PrintableASCIIRange.first) &&
- (static_cast(*begin) <= PrintableASCIIRange.second))
- ? static_cast(*begin) // NOSONAR intentional conversion to plain char
- : '.');
- ++begin;
- }
- else
- {
- output << ' ';
- }
- }
- } while (begin != end);
- return output.str();
-}
-
-[[nodiscard]] auto hexdump(const auto& cont)
-{
- return hexdump(std::begin(cont), std::end(cont));
-}
-
-[[nodiscard]] inline auto hexdump(const void* const data, const std::size_t size)
-{
- return hexdump(static_cast(data), static_cast(data) + size);
-}
-} // namespace hexdump
diff --git a/tests/src/test_cavl.cpp b/tests/src/test_cavl.cpp
deleted file mode 100644
index 8d816b6..0000000
--- a/tests/src/test_cavl.cpp
+++ /dev/null
@@ -1,1404 +0,0 @@
-// This software is distributed under the terms of the MIT License.
-// Copyright (c) 2016-2020 OpenCyphal Development Team.
-// These tests have been adapted from the Cavl test suite that you can find at https://github.com/pavel-kirienko/cavl
-
-#include <_udpard_cavl.h>
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-namespace
-{
-/// These aliases are introduced to keep things nicely aligned in test cases.
-constexpr auto Zz = nullptr;
-constexpr auto Zzzzz = nullptr;
-constexpr auto Zzzzzz = nullptr;
-
-template
-struct Node final : Cavl
-{
- explicit Node(const T val) : Cavl{Cavl{}}, value(val) {}
-
- Node(const Cavl& cv, const T val) : Cavl{cv}, value(val) {}
-
- Node() : Cavl{Cavl{}} {}
-
- T value{};
-
- auto checkLinkageUpLeftRightBF(const Cavl* const check_up,
- const Cavl* const check_le,
- const Cavl* const check_ri,
- const std::int_fast8_t check_bf) const -> bool
- {
- return (up == check_up) && //
- (lr[0] == check_le) && (lr[1] == check_ri) && //
- (bf == check_bf) && //
- ((check_up == nullptr) || (check_up->lr[0] == this) || (check_up->lr[1] == this)) && //
- ((check_le == nullptr) || (check_le->up == this)) && //
- ((check_ri == nullptr) || (check_ri->up == this));
- }
-
- auto min() -> Node* { return static_cast(cavlFindExtremum(this, false)); }
-
- auto max() -> Node* { return static_cast(cavlFindExtremum(this, true)); }
-
- auto operator=(const Cavl& cv) -> Node&
- {
- static_cast(*this) = cv;
- return *this;
- }
-};
-
-/// Wrapper over cavlSearch() that supports closures.
-template
-auto search(Node** const root, const Predicate& predicate, const Factory& factory) -> Node*
-{
- struct Refs
- {
- Predicate predicate;
- Factory factory;
-
- static auto callPredicate(void* const user_reference, const Cavl* const node) -> std::int_fast8_t
- {
- const auto ret = static_cast(user_reference)->predicate(static_cast&>(*node));
- if (ret > 0)
- {
- return 1;
- }
- if (ret < 0)
- {
- return -1;
- }
- return 0;
- }
-
- static auto callFactory(void* const user_reference) -> Cavl*
- {
- return static_cast(user_reference)->factory();
- }
- } refs{predicate, factory};
- Cavl* const out = cavlSearch(reinterpret_cast(root), &refs, &Refs::callPredicate, &Refs::callFactory);
- return static_cast*>(out);
-}
-
-template
-auto search(Node** const root, const Predicate& predicate) -> Node*
-{
- return search(root, predicate, []() { return nullptr; });
-}
-
-/// Wrapper over cavlRemove().
-template
-void remove(Node** const root, const Node* const n)
-{
- cavlRemove(reinterpret_cast(root), n);
-}
-
-template
-auto getHeight(const Node* const n) -> std::uint_fast8_t // NOLINT recursion
-{
- return (n != nullptr) ? static_cast(1U + std::max(getHeight(static_cast*>(n->lr[0])),
- getHeight(static_cast*>(n->lr[1]))))
- : 0;
-}
-
-template
-void print(const Node* const nd, const std::uint_fast8_t depth = 0, const char marker = 'T') // NOLINT recursion
-{
- TEST_ASSERT(10 > getHeight(nd)); // Fail early for malformed cyclic trees, do not overwhelm stdout.
- if (nd != nullptr)
- {
- print(static_cast*>(nd->lr[0]), static_cast(depth + 1U), 'L');
- for (std::uint16_t i = 1U; i < depth; i++)
- {
- std::cout << " ";
- }
- if (marker == 'L')
- {
- std::cout << " .............";
- }
- else if (marker == 'R')
- {
- std::cout << " `````````````";
- }
- else
- {
- (void) 0;
- }
- std::cout << marker << "=" << static_cast(nd->value) //
- << " [" << static_cast(nd->bf) << "]" << std::endl;
- print(static_cast*>(nd->lr[1]), static_cast(depth + 1U), 'R');
- }
-}
-
-template
-void traverse(Node* const root, const Visitor& visitor) // NOLINT recursion needed for testing
-{
- if (root != nullptr)
- {
- traverse(static_cast(root->lr[!Ascending]), visitor);
- visitor(root);
- traverse(static_cast(root->lr[Ascending]), visitor);
- }
-}
-
-template
-auto checkAscension(const Node* const root) -> std::optional
-{
- const Node* prev = nullptr;
- bool valid = true;
- std::size_t size = 0;
- traverse>(root, [&](const Node* const nd) {
- if (prev != nullptr)
- {
- valid = valid && (prev->value < nd->value);
- }
- prev = nd;
- size++;
- });
- return valid ? std::optional(size) : std::optional{};
-}
-
-template
-auto findBrokenAncestry(const Node* const n, const Cavl* const parent = nullptr) // NOLINT recursion
- -> const Node*
-{
- if ((n != nullptr) && (n->up == parent))
- {
- for (auto* ch : n->lr) // NOLINT array decay due to C API
- {
- if (const Node* p = findBrokenAncestry(static_cast*>(ch), n))
- {
- return p;
- }
- }
- return nullptr;
- }
- return n;
-}
-
-template
-auto findBrokenBalanceFactor(const Node* const n) -> const Cavl* // NOLINT recursion
-{
- if (n != nullptr)
- {
- if (std::abs(n->bf) > 1)
- {
- return n;
- }
- const std::int16_t hl = getHeight(static_cast*>(n->lr[0]));
- const std::int16_t hr = getHeight(static_cast*>(n->lr[1]));
- if (n->bf != (hr - hl))
- {
- return n;
- }
- for (auto* ch : n->lr) // NOLINT array decay due to C API
- {
- if (const Cavl* p = findBrokenBalanceFactor(static_cast*>(ch)))
- {
- return p;
- }
- }
- }
- return nullptr;
-}
-
-void testCheckAscension()
-{
- using N = Node;
- N t{2};
- N l{1};
- N r{3};
- N rr{4};
- // Correctly arranged tree -- smaller items on the left.
- t.lr[0] = &l;
- t.lr[1] = &r;
- r.lr[1] = &rr;
- TEST_ASSERT(4 == checkAscension(&t));
- TEST_ASSERT(3 == getHeight(&t));
- // Break the arrangement and make sure the breakage is detected.
- t.lr[1] = &l;
- t.lr[0] = &r;
- TEST_ASSERT(4 != checkAscension(&t));
- TEST_ASSERT(3 == getHeight(&t));
- TEST_ASSERT(&t == findBrokenBalanceFactor(&t)); // All zeros, incorrect.
- r.lr[1] = nullptr;
- std::cout << __LINE__ << ": " << static_cast(getHeight(&t)) << std::endl;
- print(&t);
- std::cout << __LINE__ << ": " << static_cast(getHeight(&t)) << std::endl;
- TEST_ASSERT_EQUAL_size_t(2, getHeight(&t));
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(&t)); // Balanced now as we removed one node.
-}
-
-void testRotation()
-{
- using N = Node;
- // Original state:
- // x.left = a
- // x.right = z
- // z.left = b
- // z.right = c
- // After left rotation of X:
- // x.left = a
- // x.right = b
- // z.left = x
- // z.right = c
- N c{{Zz, {Zz, Zz}, 0}, 3};
- N b{{Zz, {Zz, Zz}, 0}, 2};
- N a{{Zz, {Zz, Zz}, 0}, 1};
- N z{{Zz, {&b, &c}, 0}, 8};
- N x{{Zz, {&a, &z}, 1}, 9};
- z.up = &x;
- c.up = &z;
- b.up = &z;
- a.up = &x;
-
- std::cout << "Before rotation:\n";
- TEST_ASSERT(nullptr == findBrokenAncestry(&x));
- print(&x);
-
- std::cout << "After left rotation:\n";
- cavlPrivateRotate(&x, false); // z is now the root
- TEST_ASSERT(nullptr == findBrokenAncestry(&z));
- print(&z);
- TEST_ASSERT(&a == x.lr[0]);
- TEST_ASSERT(&b == x.lr[1]);
- TEST_ASSERT(&x == z.lr[0]);
- TEST_ASSERT(&c == z.lr[1]);
-
- std::cout << "After right rotation, back into the original configuration:\n";
- cavlPrivateRotate(&z, true); // x is now the root
- TEST_ASSERT(nullptr == findBrokenAncestry(&x));
- print(&x);
- TEST_ASSERT(&a == x.lr[0]);
- TEST_ASSERT(&z == x.lr[1]);
- TEST_ASSERT(&b == z.lr[0]);
- TEST_ASSERT(&c == z.lr[1]);
-}
-
-void testBalancingA()
-{
- using N = Node;
- // Double left-right rotation.
- // X X Y
- // / ` / ` / `
- // Z C => Y C => Z X
- // / ` / ` / ` / `
- // D Y Z G D F G C
- // / ` / `
- // F G D F
- N x{{Zz, {Zz, Zz}, 0}, 1}; // bf = -2
- N z{{&x, {Zz, Zz}, 0}, 2}; // bf = +1
- N c{{&x, {Zz, Zz}, 0}, 3};
- N d{{&z, {Zz, Zz}, 0}, 4};
- N y{{&z, {Zz, Zz}, 0}, 5};
- N f{{&y, {Zz, Zz}, 0}, 6};
- N g{{&y, {Zz, Zz}, 0}, 7};
- x.lr[0] = &z;
- x.lr[1] = &c;
- z.lr[0] = &d;
- z.lr[1] = &y;
- y.lr[0] = &f;
- y.lr[1] = &g;
- print(&x);
- TEST_ASSERT(nullptr == findBrokenAncestry(&x));
- TEST_ASSERT(&x == cavlPrivateAdjustBalance(&x, false)); // bf = -1, same topology
- TEST_ASSERT(-1 == x.bf);
- TEST_ASSERT(&z == cavlPrivateAdjustBalance(&z, true)); // bf = +1, same topology
- TEST_ASSERT(+1 == z.bf);
- TEST_ASSERT(&y == cavlPrivateAdjustBalance(&x, false)); // bf = -2, rotation needed
- print(&y);
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(&y)); // Should be balanced now.
- TEST_ASSERT(nullptr == findBrokenAncestry(&y));
- TEST_ASSERT(&z == y.lr[0]);
- TEST_ASSERT(&x == y.lr[1]);
- TEST_ASSERT(&d == z.lr[0]);
- TEST_ASSERT(&f == z.lr[1]);
- TEST_ASSERT(&g == x.lr[0]);
- TEST_ASSERT(&c == x.lr[1]);
- TEST_ASSERT(Zz == d.lr[0]);
- TEST_ASSERT(Zz == d.lr[1]);
- TEST_ASSERT(Zz == f.lr[0]);
- TEST_ASSERT(Zz == f.lr[1]);
- TEST_ASSERT(Zz == g.lr[0]);
- TEST_ASSERT(Zz == g.lr[1]);
- TEST_ASSERT(Zz == c.lr[0]);
- TEST_ASSERT(Zz == c.lr[1]);
-}
-
-void testBalancingB()
-{
- using N = Node;
- // Without F the handling of Z and Y is more complex; Z flips the sign of its balance factor:
- // X X Y
- // / ` / ` / `
- // Z C => Y C => Z X
- // / ` / ` / / `
- // D Y Z G D G C
- // ` /
- // G D
- N x{};
- N z{};
- N c{};
- N d{};
- N y{};
- N g{};
- x = {{Zz, {&z, &c}, 0}, 1}; // bf = -2
- z = {{&x, {&d, &y}, 0}, 2}; // bf = +1
- c = {{&x, {Zz, Zz}, 0}, 3};
- d = {{&z, {Zz, Zz}, 0}, 4};
- y = {{&z, {Zz, &g}, 0}, 5}; // bf = +1
- g = {{&y, {Zz, Zz}, 0}, 7};
- print(&x);
- TEST_ASSERT(nullptr == findBrokenAncestry(&x));
- TEST_ASSERT(&x == cavlPrivateAdjustBalance(&x, false)); // bf = -1, same topology
- TEST_ASSERT(-1 == x.bf);
- TEST_ASSERT(&z == cavlPrivateAdjustBalance(&z, true)); // bf = +1, same topology
- TEST_ASSERT(+1 == z.bf);
- TEST_ASSERT(&y == cavlPrivateAdjustBalance(&y, true)); // bf = +1, same topology
- TEST_ASSERT(+1 == y.bf);
- TEST_ASSERT(&y == cavlPrivateAdjustBalance(&x, false)); // bf = -2, rotation needed
- print(&y);
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(&y)); // Should be balanced now.
- TEST_ASSERT(nullptr == findBrokenAncestry(&y));
- TEST_ASSERT(&z == y.lr[0]);
- TEST_ASSERT(&x == y.lr[1]);
- TEST_ASSERT(&d == z.lr[0]);
- TEST_ASSERT(Zz == z.lr[1]);
- TEST_ASSERT(&g == x.lr[0]);
- TEST_ASSERT(&c == x.lr[1]);
- TEST_ASSERT(Zz == d.lr[0]);
- TEST_ASSERT(Zz == d.lr[1]);
- TEST_ASSERT(Zz == g.lr[0]);
- TEST_ASSERT(Zz == g.lr[1]);
- TEST_ASSERT(Zz == c.lr[0]);
- TEST_ASSERT(Zz == c.lr[1]);
-}
-
-void testBalancingC()
-{
- using N = Node;
- // Both X and Z are heavy on the same side.
- // X Z
- // / ` / `
- // Z C => D X
- // / ` / ` / `
- // D Y F G Y C
- // / `
- // F G
- N x{};
- N z{};
- N c{};
- N d{};
- N y{};
- N f{};
- N g{};
- x = {{Zz, {&z, &c}, 0}, 1}; // bf = -2
- z = {{&x, {&d, &y}, 0}, 2}; // bf = -1
- c = {{&x, {Zz, Zz}, 0}, 3};
- d = {{&z, {&f, &g}, 0}, 4};
- y = {{&z, {Zz, Zz}, 0}, 5};
- f = {{&d, {Zz, Zz}, 0}, 6};
- g = {{&d, {Zz, Zz}, 0}, 7};
- print(&x);
- TEST_ASSERT(nullptr == findBrokenAncestry(&x));
- TEST_ASSERT(&x == cavlPrivateAdjustBalance(&x, false)); // bf = -1, same topology
- TEST_ASSERT(-1 == x.bf);
- TEST_ASSERT(&z == cavlPrivateAdjustBalance(&z, false)); // bf = -1, same topology
- TEST_ASSERT(-1 == z.bf);
- TEST_ASSERT(&z == cavlPrivateAdjustBalance(&x, false));
- print(&z);
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(&z));
- TEST_ASSERT(nullptr == findBrokenAncestry(&z));
- TEST_ASSERT(&d == z.lr[0]);
- TEST_ASSERT(&x == z.lr[1]);
- TEST_ASSERT(&f == d.lr[0]);
- TEST_ASSERT(&g == d.lr[1]);
- TEST_ASSERT(&y == x.lr[0]);
- TEST_ASSERT(&c == x.lr[1]);
- TEST_ASSERT(Zz == f.lr[0]);
- TEST_ASSERT(Zz == f.lr[1]);
- TEST_ASSERT(Zz == g.lr[0]);
- TEST_ASSERT(Zz == g.lr[1]);
- TEST_ASSERT(Zz == y.lr[0]);
- TEST_ASSERT(Zz == y.lr[1]);
- TEST_ASSERT(Zz == c.lr[0]);
- TEST_ASSERT(Zz == c.lr[1]);
-}
-
-void testRetracingOnGrowth()
-{
- using N = Node;
- std::array t{};
- for (std::uint_fast8_t i = 0; i < 100; i++)
- {
- t[i].value = i;
- }
- // 50 30
- // / ` / `
- // 30 60? => 20 50
- // / ` / / `
- // 20 40? 10 40? 60?
- // /
- // 10
- t[50] = {Zzzzzz, {&t[30], &t[60]}, -1};
- t[30] = {&t[50], {&t[20], &t[40]}, 00};
- t[60] = {&t[50], {Zzzzzz, Zzzzzz}, 00};
- t[20] = {&t[30], {&t[10], Zzzzzz}, 00};
- t[40] = {&t[30], {Zzzzzz, Zzzzzz}, 00};
- t[10] = {&t[20], {Zzzzzz, Zzzzzz}, 00};
- print(&t[50]); // The tree is imbalanced because we just added 1 and are about to retrace it.
- TEST_ASSERT(nullptr == findBrokenAncestry(&t[50]));
- TEST_ASSERT(6 == checkAscension(&t[50]));
- TEST_ASSERT(&t[30] == cavlPrivateRetraceOnGrowth(&t[10]));
- std::puts("ADD 10:");
- print(&t[30]); // This is the new root.
- TEST_ASSERT(&t[20] == t[30].lr[0]);
- TEST_ASSERT(&t[50] == t[30].lr[1]);
- TEST_ASSERT(&t[10] == t[20].lr[0]);
- TEST_ASSERT(Zzzzzz == t[20].lr[1]);
- TEST_ASSERT(&t[40] == t[50].lr[0]);
- TEST_ASSERT(&t[60] == t[50].lr[1]);
- TEST_ASSERT(Zzzzzz == t[10].lr[0]);
- TEST_ASSERT(Zzzzzz == t[10].lr[1]);
- TEST_ASSERT(Zzzzzz == t[40].lr[0]);
- TEST_ASSERT(Zzzzzz == t[40].lr[1]);
- TEST_ASSERT(Zzzzzz == t[60].lr[0]);
- TEST_ASSERT(Zzzzzz == t[60].lr[1]);
- TEST_ASSERT(-1 == t[20].bf);
- TEST_ASSERT(+0 == t[30].bf);
- TEST_ASSERT(nullptr == findBrokenAncestry(&t[30]));
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(&t[30]));
- TEST_ASSERT(6 == checkAscension(&t[30]));
- // Add a new child under 20 and ensure that retracing stops at 20 because it becomes perfectly balanced:
- // 30
- // / `
- // 20 50
- // / ` / `
- // 10 21 40 60
- TEST_ASSERT(nullptr == findBrokenAncestry(&t[30]));
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(&t[30]));
- t[21] = {&t[20], {Zzzzzz, Zzzzzz}, 0};
- t[20].lr[1] = &t[21];
- TEST_ASSERT(nullptr == cavlPrivateRetraceOnGrowth(&t[21])); // Root not reached, NULL returned.
- std::puts("ADD 21:");
- print(&t[30]);
- TEST_ASSERT(0 == t[20].bf);
- TEST_ASSERT(0 == t[30].bf);
- TEST_ASSERT(nullptr == findBrokenAncestry(&t[30]));
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(&t[30]));
- TEST_ASSERT(7 == checkAscension(&t[30]));
- // 30
- // / `
- // 20 50
- // / ` / `
- // 10 21 40 60
- // `
- // 15 <== first we add this, no balancing needed
- // `
- // 17 <== then we add this, forcing left rotation at 10
- //
- // After the left rotation of 10, we get:
- //
- // 30
- // / `
- // 20 50
- // / ` / `
- // 15 21 40 60
- // / `
- // 10 17
- //
- // When we add one extra item after 17, we force a double rotation (15 left, 20 right). Before the rotation:
- //
- // 30
- // / `
- // 20 50
- // / ` / `
- // 15 21 40 60
- // / `
- // 10 17
- // `
- // 18 <== new item causes imbalance
- //
- // After left rotation of 15:
- //
- // 30
- // / `
- // 20 50
- // / ` / `
- // 17 21 40 60
- // / `
- // 15 18
- // /
- // 10
- //
- // After right rotation of 20, this is the final state:
- //
- // 30
- // / `
- // 17 50
- // / ` / `
- // 15 20 40 60
- // / / `
- // 10 18 21
- std::puts("ADD 15:");
- TEST_ASSERT(nullptr == findBrokenAncestry(&t[30]));
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(&t[30]));
- TEST_ASSERT(7 == checkAscension(&t[30]));
- t[15] = {&t[10], {Zzzzzz, Zzzzzz}, 0};
- t[10].lr[1] = &t[15];
- TEST_ASSERT(&t[30] == cavlPrivateRetraceOnGrowth(&t[15])); // Same root, its balance becomes -1.
- print(&t[30]);
- TEST_ASSERT(+1 == t[10].bf);
- TEST_ASSERT(-1 == t[20].bf);
- TEST_ASSERT(-1 == t[30].bf);
- TEST_ASSERT(nullptr == findBrokenAncestry(&t[30]));
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(&t[30]));
- TEST_ASSERT(8 == checkAscension(&t[30]));
-
- std::puts("ADD 17:");
- t[17] = {&t[15], {Zzzzzz, Zzzzzz}, 0};
- t[15].lr[1] = &t[17];
- TEST_ASSERT(nullptr == cavlPrivateRetraceOnGrowth(&t[17])); // Same root, same balance, 10 rotated left.
- print(&t[30]);
- // Check 10
- TEST_ASSERT(&t[15] == t[10].up);
- TEST_ASSERT(0 == t[10].bf);
- TEST_ASSERT(nullptr == t[10].lr[0]);
- TEST_ASSERT(nullptr == t[10].lr[1]);
- // Check 17
- TEST_ASSERT(&t[15] == t[17].up);
- TEST_ASSERT(0 == t[17].bf);
- TEST_ASSERT(nullptr == t[17].lr[0]);
- TEST_ASSERT(nullptr == t[17].lr[1]);
- // Check 15
- TEST_ASSERT(&t[20] == t[15].up);
- TEST_ASSERT(0 == t[15].bf);
- TEST_ASSERT(&t[10] == t[15].lr[0]);
- TEST_ASSERT(&t[17] == t[15].lr[1]);
- // Check 20 -- leaning left
- TEST_ASSERT(&t[30] == t[20].up);
- TEST_ASSERT(-1 == t[20].bf);
- TEST_ASSERT(&t[15] == t[20].lr[0]);
- TEST_ASSERT(&t[21] == t[20].lr[1]);
- // Check the root -- still leaning left by one.
- TEST_ASSERT(nullptr == t[30].up);
- TEST_ASSERT(-1 == t[30].bf);
- TEST_ASSERT(&t[20] == t[30].lr[0]);
- TEST_ASSERT(&t[50] == t[30].lr[1]);
- // Check hard invariants.
- TEST_ASSERT(nullptr == findBrokenAncestry(&t[30]));
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(&t[30]));
- TEST_ASSERT(9 == checkAscension(&t[30]));
-
- std::puts("ADD 18:");
- t[18] = {&t[17], {Zzzzzz, Zzzzzz}, 0};
- t[17].lr[1] = &t[18];
- TEST_ASSERT(nullptr == cavlPrivateRetraceOnGrowth(&t[18])); // Same root, 15 went left, 20 went right.
- print(&t[30]);
- // Check 17
- TEST_ASSERT(&t[30] == t[17].up);
- TEST_ASSERT(0 == t[17].bf);
- TEST_ASSERT(&t[15] == t[17].lr[0]);
- TEST_ASSERT(&t[20] == t[17].lr[1]);
- // Check 15
- TEST_ASSERT(&t[17] == t[15].up);
- TEST_ASSERT(-1 == t[15].bf);
- TEST_ASSERT(&t[10] == t[15].lr[0]);
- TEST_ASSERT(nullptr == t[15].lr[1]);
- // Check 20
- TEST_ASSERT(&t[17] == t[20].up);
- TEST_ASSERT(0 == t[20].bf);
- TEST_ASSERT(&t[18] == t[20].lr[0]);
- TEST_ASSERT(&t[21] == t[20].lr[1]);
- // Check 10
- TEST_ASSERT(&t[15] == t[10].up);
- TEST_ASSERT(0 == t[10].bf);
- TEST_ASSERT(nullptr == t[10].lr[0]);
- TEST_ASSERT(nullptr == t[10].lr[1]);
- // Check 18
- TEST_ASSERT(&t[20] == t[18].up);
- TEST_ASSERT(0 == t[18].bf);
- TEST_ASSERT(nullptr == t[18].lr[0]);
- TEST_ASSERT(nullptr == t[18].lr[1]);
- // Check 21
- TEST_ASSERT(&t[20] == t[21].up);
- TEST_ASSERT(0 == t[21].bf);
- TEST_ASSERT(nullptr == t[21].lr[0]);
- TEST_ASSERT(nullptr == t[21].lr[1]);
- // Check hard invariants.
- TEST_ASSERT(nullptr == findBrokenAncestry(&t[30]));
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(&t[30]));
- TEST_ASSERT(10 == checkAscension(&t[30]));
-}
-
-void testSearchTrivial()
-{
- using N = Node;
- // A
- // B C
- // D E F G
- N a{4};
- N b{2};
- N c{6};
- N d{1};
- N e{3};
- N f{5};
- N g{7};
- N q{9};
- a = {Zz, {&b, &c}, 0};
- b = {&a, {&d, &e}, 0};
- c = {&a, {&f, &g}, 0};
- d = {&b, {Zz, Zz}, 0};
- e = {&b, {Zz, Zz}, 0};
- f = {&c, {Zz, Zz}, 0};
- g = {&c, {Zz, Zz}, 0};
- q = {Zz, {Zz, Zz}, 0};
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(&a));
- TEST_ASSERT(nullptr == findBrokenAncestry(&a));
- TEST_ASSERT(7 == checkAscension(&a));
- N* root = &a;
- TEST_ASSERT(nullptr == cavlSearch(reinterpret_cast(&root), nullptr, nullptr, nullptr)); // Bad arguments.
- TEST_ASSERT(&a == root);
- TEST_ASSERT(nullptr == search(&root, [&](const N& v) { return q.value - v.value; }));
- TEST_ASSERT(&a == root);
- TEST_ASSERT(&e == search(&root, [&](const N& v) { return e.value - v.value; }));
- TEST_ASSERT(&b == search(&root, [&](const N& v) { return b.value - v.value; }));
- TEST_ASSERT(&a == root);
- print(&a);
- TEST_ASSERT(nullptr == cavlFindExtremum(nullptr, true));
- TEST_ASSERT(nullptr == cavlFindExtremum(nullptr, false));
- TEST_ASSERT(&g == a.max());
- TEST_ASSERT(&d == a.min());
- TEST_ASSERT(&g == g.max());
- TEST_ASSERT(&g == g.min());
- TEST_ASSERT(&d == d.max());
- TEST_ASSERT(&d == d.min());
-}
-
-void testRemovalA()
-{
- using N = Node;
- // 4
- // / `
- // 2 6
- // / ` / `
- // 1 3 5 8
- // / `
- // 7 9
- std::array t{};
- for (std::uint_fast8_t i = 0; i < 10; i++)
- {
- t[i].value = i;
- }
- t[1] = {&t[2], {Zzzzz, Zzzzz}, 00};
- t[2] = {&t[4], {&t[1], &t[3]}, 00};
- t[3] = {&t[2], {Zzzzz, Zzzzz}, 00};
- t[4] = {Zzzzz, {&t[2], &t[6]}, +1};
- t[5] = {&t[6], {Zzzzz, Zzzzz}, 00};
- t[6] = {&t[4], {&t[5], &t[8]}, +1};
- t[7] = {&t[8], {Zzzzz, Zzzzz}, 00};
- t[8] = {&t[6], {&t[7], &t[9]}, 00};
- t[9] = {&t[8], {Zzzzz, Zzzzz}, 00};
- N* root = &t[4];
- print(root);
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(root));
- TEST_ASSERT(nullptr == findBrokenAncestry(root));
- TEST_ASSERT(9 == checkAscension(root));
-
- // Remove 9, the easiest case. The rest of the tree remains unchanged.
- // 4
- // / `
- // 2 6
- // / ` / `
- // 1 3 5 8
- // /
- // 7
- std::puts("REMOVE 9:");
- remove(&root, &t[9]);
- TEST_ASSERT(&t[4] == root);
- print(root);
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(root));
- TEST_ASSERT(nullptr == findBrokenAncestry(root));
- TEST_ASSERT(8 == checkAscension(root));
- // 1
- TEST_ASSERT(&t[2] == t[1].up);
- TEST_ASSERT(Zzzzz == t[1].lr[0]);
- TEST_ASSERT(Zzzzz == t[1].lr[1]);
- TEST_ASSERT(00 == t[1].bf);
- // 2
- TEST_ASSERT(&t[4] == t[2].up);
- TEST_ASSERT(&t[1] == t[2].lr[0]);
- TEST_ASSERT(&t[3] == t[2].lr[1]);
- TEST_ASSERT(00 == t[2].bf);
- // 3
- TEST_ASSERT(&t[2] == t[3].up);
- TEST_ASSERT(Zzzzz == t[3].lr[0]);
- TEST_ASSERT(Zzzzz == t[3].lr[1]);
- TEST_ASSERT(00 == t[3].bf);
- // 4
- TEST_ASSERT(Zzzzz == t[4].up); // Nihil Supernum
- TEST_ASSERT(&t[2] == t[4].lr[0]);
- TEST_ASSERT(&t[6] == t[4].lr[1]);
- TEST_ASSERT(+1 == t[4].bf);
- // 5
- TEST_ASSERT(&t[6] == t[5].up);
- TEST_ASSERT(Zzzzz == t[5].lr[0]);
- TEST_ASSERT(Zzzzz == t[5].lr[1]);
- TEST_ASSERT(00 == t[5].bf);
- // 6
- TEST_ASSERT(&t[4] == t[6].up);
- TEST_ASSERT(&t[5] == t[6].lr[0]);
- TEST_ASSERT(&t[8] == t[6].lr[1]);
- TEST_ASSERT(+1 == t[6].bf);
- // 7
- TEST_ASSERT(&t[8] == t[7].up);
- TEST_ASSERT(Zzzzz == t[7].lr[0]);
- TEST_ASSERT(Zzzzz == t[7].lr[1]);
- TEST_ASSERT(00 == t[7].bf);
- // 8
- TEST_ASSERT(&t[6] == t[8].up);
- TEST_ASSERT(&t[7] == t[8].lr[0]);
- TEST_ASSERT(Zzzzz == t[8].lr[1]);
- TEST_ASSERT(-1 == t[8].bf);
-
- // Remove 8, 7 takes its place (the one-child case). The rest of the tree remains unchanged.
- // 4
- // / `
- // 2 6
- // / ` / `
- // 1 3 5 7
- std::puts("REMOVE 8:");
- remove(&root, &t[8]);
- TEST_ASSERT(&t[4] == root);
- print(root);
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(root));
- TEST_ASSERT(nullptr == findBrokenAncestry(root));
- TEST_ASSERT(7 == checkAscension(root));
- // 1
- TEST_ASSERT(&t[2] == t[1].up);
- TEST_ASSERT(Zzzzz == t[1].lr[0]);
- TEST_ASSERT(Zzzzz == t[1].lr[1]);
- TEST_ASSERT(00 == t[1].bf);
- // 2
- TEST_ASSERT(&t[4] == t[2].up);
- TEST_ASSERT(&t[1] == t[2].lr[0]);
- TEST_ASSERT(&t[3] == t[2].lr[1]);
- TEST_ASSERT(00 == t[2].bf);
- // 3
- TEST_ASSERT(&t[2] == t[3].up);
- TEST_ASSERT(Zzzzz == t[3].lr[0]);
- TEST_ASSERT(Zzzzz == t[3].lr[1]);
- TEST_ASSERT(00 == t[3].bf);
- // 4
- TEST_ASSERT(Zzzzz == t[4].up); // Nihil Supernum
- TEST_ASSERT(&t[2] == t[4].lr[0]);
- TEST_ASSERT(&t[6] == t[4].lr[1]);
- TEST_ASSERT(00 == t[4].bf);
- // 5
- TEST_ASSERT(&t[6] == t[5].up);
- TEST_ASSERT(Zzzzz == t[5].lr[0]);
- TEST_ASSERT(Zzzzz == t[5].lr[1]);
- TEST_ASSERT(00 == t[5].bf);
- // 6
- TEST_ASSERT(&t[4] == t[6].up);
- TEST_ASSERT(&t[5] == t[6].lr[0]);
- TEST_ASSERT(&t[7] == t[6].lr[1]);
- TEST_ASSERT(00 == t[6].bf);
- // 7
- TEST_ASSERT(&t[6] == t[7].up);
- TEST_ASSERT(Zzzzz == t[7].lr[0]);
- TEST_ASSERT(Zzzzz == t[7].lr[1]);
- TEST_ASSERT(00 == t[7].bf);
-
- // Remove the root node 4, 5 takes its place. The overall structure remains unchanged except that 5 is now the root.
- // 5
- // / `
- // 2 6
- // / ` `
- // 1 3 7
- std::puts("REMOVE 4:");
- remove(&root, &t[4]);
- print(root);
- TEST_ASSERT(&t[5] == root);
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(root));
- TEST_ASSERT(nullptr == findBrokenAncestry(root));
- TEST_ASSERT(6 == checkAscension(root));
- // 1
- TEST_ASSERT(&t[2] == t[1].up);
- TEST_ASSERT(Zzzzz == t[1].lr[0]);
- TEST_ASSERT(Zzzzz == t[1].lr[1]);
- TEST_ASSERT(00 == t[1].bf);
- // 2
- TEST_ASSERT(&t[5] == t[2].up);
- TEST_ASSERT(&t[1] == t[2].lr[0]);
- TEST_ASSERT(&t[3] == t[2].lr[1]);
- TEST_ASSERT(00 == t[2].bf);
- // 3
- TEST_ASSERT(&t[2] == t[3].up);
- TEST_ASSERT(Zzzzz == t[3].lr[0]);
- TEST_ASSERT(Zzzzz == t[3].lr[1]);
- TEST_ASSERT(00 == t[3].bf);
- // 5
- TEST_ASSERT(Zzzzz == t[5].up); // Nihil Supernum
- TEST_ASSERT(&t[2] == t[5].lr[0]);
- TEST_ASSERT(&t[6] == t[5].lr[1]);
- TEST_ASSERT(00 == t[5].bf);
- // 6
- TEST_ASSERT(&t[5] == t[6].up);
- TEST_ASSERT(Zzzzz == t[6].lr[0]);
- TEST_ASSERT(&t[7] == t[6].lr[1]);
- TEST_ASSERT(+1 == t[6].bf);
- // 7
- TEST_ASSERT(&t[6] == t[7].up);
- TEST_ASSERT(Zzzzz == t[7].lr[0]);
- TEST_ASSERT(Zzzzz == t[7].lr[1]);
- TEST_ASSERT(00 == t[7].bf);
-
- // Remove the root node 5, 6 takes its place.
- // 6
- // / `
- // 2 7
- // / `
- // 1 3
- std::puts("REMOVE 5:");
- remove(&root, &t[5]);
- TEST_ASSERT(&t[6] == root);
- print(root);
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(root));
- TEST_ASSERT(nullptr == findBrokenAncestry(root));
- TEST_ASSERT(5 == checkAscension(root));
- // 1
- TEST_ASSERT(&t[2] == t[1].up);
- TEST_ASSERT(Zzzzz == t[1].lr[0]);
- TEST_ASSERT(Zzzzz == t[1].lr[1]);
- TEST_ASSERT(00 == t[1].bf);
- // 2
- TEST_ASSERT(&t[6] == t[2].up);
- TEST_ASSERT(&t[1] == t[2].lr[0]);
- TEST_ASSERT(&t[3] == t[2].lr[1]);
- TEST_ASSERT(00 == t[2].bf);
- // 3
- TEST_ASSERT(&t[2] == t[3].up);
- TEST_ASSERT(Zzzzz == t[3].lr[0]);
- TEST_ASSERT(Zzzzz == t[3].lr[1]);
- TEST_ASSERT(00 == t[3].bf);
- // 6
- TEST_ASSERT(Zzzzz == t[6].up); // Nihil Supernum
- TEST_ASSERT(&t[2] == t[6].lr[0]);
- TEST_ASSERT(&t[7] == t[6].lr[1]);
- TEST_ASSERT(-1 == t[6].bf);
- // 7
- TEST_ASSERT(&t[6] == t[7].up);
- TEST_ASSERT(Zzzzz == t[7].lr[0]);
- TEST_ASSERT(Zzzzz == t[7].lr[1]);
- TEST_ASSERT(00 == t[7].bf);
-
- // Remove the root node 6, 7 takes its place, then right rotation is done to restore balance, 2 is the new root.
- // 2
- // / `
- // 1 7
- // /
- // 3
- std::puts("REMOVE 6:");
- remove(&root, &t[6]);
- TEST_ASSERT(&t[2] == root);
- print(root);
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(root));
- TEST_ASSERT(nullptr == findBrokenAncestry(root));
- TEST_ASSERT(4 == checkAscension(root));
- // 1
- TEST_ASSERT(&t[2] == t[1].up);
- TEST_ASSERT(Zzzzz == t[1].lr[0]);
- TEST_ASSERT(Zzzzz == t[1].lr[1]);
- TEST_ASSERT(00 == t[1].bf);
- // 2
- TEST_ASSERT(Zzzzz == t[2].up); // Nihil Supernum
- TEST_ASSERT(&t[1] == t[2].lr[0]);
- TEST_ASSERT(&t[7] == t[2].lr[1]);
- TEST_ASSERT(+1 == t[2].bf);
- // 3
- TEST_ASSERT(&t[7] == t[3].up);
- TEST_ASSERT(Zzzzz == t[3].lr[0]);
- TEST_ASSERT(Zzzzz == t[3].lr[1]);
- TEST_ASSERT(00 == t[3].bf);
- // 7
- TEST_ASSERT(&t[2] == t[7].up);
- TEST_ASSERT(&t[3] == t[7].lr[0]);
- TEST_ASSERT(Zzzzz == t[7].lr[1]);
- TEST_ASSERT(-1 == t[7].bf);
-
- // Remove 1, then balancing makes 3 the new root node.
- // 3
- // / `
- // 2 7
- std::puts("REMOVE 1:");
- remove(&root, &t[1]);
- TEST_ASSERT(&t[3] == root);
- print(root);
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(root));
- TEST_ASSERT(nullptr == findBrokenAncestry(root));
- TEST_ASSERT(3 == checkAscension(root));
- // 2
- TEST_ASSERT(&t[3] == t[2].up);
- TEST_ASSERT(Zzzzz == t[2].lr[0]);
- TEST_ASSERT(Zzzzz == t[2].lr[1]);
- TEST_ASSERT(0 == t[2].bf);
- // 3
- TEST_ASSERT(Zzzzz == t[3].up); // Nihil Supernum
- TEST_ASSERT(&t[2] == t[3].lr[0]);
- TEST_ASSERT(&t[7] == t[3].lr[1]);
- TEST_ASSERT(00 == t[3].bf);
- // 7
- TEST_ASSERT(&t[3] == t[7].up);
- TEST_ASSERT(Zzzzz == t[7].lr[0]);
- TEST_ASSERT(Zzzzz == t[7].lr[1]);
- TEST_ASSERT(00 == t[7].bf);
-
- // Remove 7.
- // 3
- // /
- // 2
- std::puts("REMOVE 7:");
- remove(&root, &t[7]);
- TEST_ASSERT(&t[3] == root);
- print(root);
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(root));
- TEST_ASSERT(nullptr == findBrokenAncestry(root));
- TEST_ASSERT(2 == checkAscension(root));
- // 2
- TEST_ASSERT(&t[3] == t[2].up);
- TEST_ASSERT(Zzzzz == t[2].lr[0]);
- TEST_ASSERT(Zzzzz == t[2].lr[1]);
- TEST_ASSERT(0 == t[2].bf);
- // 3
- TEST_ASSERT(Zzzzz == t[3].up); // Nihil Supernum
- TEST_ASSERT(&t[2] == t[3].lr[0]);
- TEST_ASSERT(Zzzzz == t[3].lr[1]);
- TEST_ASSERT(-1 == t[3].bf);
-
- // Remove 3. Only 2 is left, which is now obviously the root.
- std::puts("REMOVE 3:");
- remove(&root, &t[3]);
- TEST_ASSERT(&t[2] == root);
- print(root);
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(root));
- TEST_ASSERT(nullptr == findBrokenAncestry(root));
- TEST_ASSERT(1 == checkAscension(root));
- // 2
- TEST_ASSERT(Zzzzz == t[2].up);
- TEST_ASSERT(Zzzzz == t[2].lr[0]);
- TEST_ASSERT(Zzzzz == t[2].lr[1]);
- TEST_ASSERT(0 == t[2].bf);
-
- // Remove 2. The tree is now empty, make sure the root pointer is updated accordingly.
- std::puts("REMOVE 2:");
- remove(&root, &t[2]);
- TEST_ASSERT(nullptr == root);
-}
-
-void testMutationManual()
-{
- using N = Node;
- // Build a tree with 31 elements from 1 to 31 inclusive by adding new elements successively:
- // 16
- // / `
- // 8 24
- // / ` / `
- // 4 12 20 28
- // / ` / ` / ` / `
- // 2 6 10 14 18 22 26 30
- // / ` / ` / ` / ` / ` / ` / ` / `
- // 1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31
- std::array t{};
- for (std::uint_fast8_t i = 0; i < 32; i++)
- {
- t[i].value = i;
- }
- // Build the actual tree.
- N* root = nullptr;
- for (std::uint_fast8_t i = 1; i < 32; i++)
- {
- const auto pred = [&](const N& v) { return t.at(i).value - v.value; };
- TEST_ASSERT(nullptr == search(&root, pred));
- TEST_ASSERT(&t[i] == search(&root, pred, [&]() { return &t.at(i); }));
- TEST_ASSERT(&t[i] == search(&root, pred));
- // Validate the tree after every mutation.
- TEST_ASSERT(nullptr != root);
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(root));
- TEST_ASSERT(nullptr == findBrokenAncestry(root));
- TEST_ASSERT(i == checkAscension(root));
- }
- print(root);
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(root));
- TEST_ASSERT(nullptr == findBrokenAncestry(root));
- TEST_ASSERT(31 == checkAscension(root));
- // Check composition -- ensure that every element is in the tree and it is there exactly once.
- {
- std::array seen{};
- traverse(root, [&](const N* const n) {
- TEST_ASSERT(!seen.at(n->value));
- seen[n->value] = true;
- });
- TEST_ASSERT(std::all_of(&seen[1], &seen[31], [](bool x) { return x; }));
- }
-
- // REMOVE 24
- // 16
- // / `
- // 8 25
- // / ` / `
- // 4 12 20 28
- // / ` / ` / ` / `
- // 2 6 10 14 18 22 26 30
- // / ` / ` / ` / ` / ` / ` ` / `
- // 1 3 5 7 9 11 13 15 17 19 21 23 27 29 31
- std::puts("REMOVE 24:");
- TEST_ASSERT(t[24].checkLinkageUpLeftRightBF(&t[16], &t[20], &t[28], 00));
- remove(&root, &t[24]);
- TEST_ASSERT(&t[16] == root);
- print(root);
- TEST_ASSERT(t[25].checkLinkageUpLeftRightBF(&t[16], &t[20], &t[28], 00));
- TEST_ASSERT(t[26].checkLinkageUpLeftRightBF(&t[28], Zzzzzz, &t[27], +1));
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(root));
- TEST_ASSERT(nullptr == findBrokenAncestry(root));
- TEST_ASSERT(30 == checkAscension(root));
-
- // REMOVE 25
- // 16
- // / `
- // 8 26
- // / ` / `
- // 4 12 20 28
- // / ` / ` / ` / `
- // 2 6 10 14 18 22 27 30
- // / ` / ` / ` / ` / ` / ` / `
- // 1 3 5 7 9 11 13 15 17 19 21 23 29 31
- std::puts("REMOVE 25:");
- TEST_ASSERT(t[25].checkLinkageUpLeftRightBF(&t[16], &t[20], &t[28], 00));
- remove(&root, &t[25]);
- TEST_ASSERT(&t[16] == root);
- print(root);
- TEST_ASSERT(t[26].checkLinkageUpLeftRightBF(&t[16], &t[20], &t[28], 00));
- TEST_ASSERT(t[28].checkLinkageUpLeftRightBF(&t[26], &t[27], &t[30], +1));
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(root));
- TEST_ASSERT(nullptr == findBrokenAncestry(root));
- TEST_ASSERT(29 == checkAscension(root));
-
- // REMOVE 26
- // 16
- // / `
- // 8 27
- // / ` / `
- // 4 12 20 30
- // / ` / ` / ` / `
- // 2 6 10 14 18 22 28 31
- // / ` / ` / ` / ` / ` / ` `
- // 1 3 5 7 9 11 13 15 17 19 21 23 29
- std::puts("REMOVE 26:");
- TEST_ASSERT(t[26].checkLinkageUpLeftRightBF(&t[16], &t[20], &t[28], 00));
- remove(&root, &t[26]);
- TEST_ASSERT(&t[16] == root);
- print(root);
- TEST_ASSERT(t[27].checkLinkageUpLeftRightBF(&t[16], &t[20], &t[30], 00));
- TEST_ASSERT(t[30].checkLinkageUpLeftRightBF(&t[27], &t[28], &t[31], -1));
- TEST_ASSERT(t[28].checkLinkageUpLeftRightBF(&t[30], Zzzzzz, &t[29], +1));
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(root));
- TEST_ASSERT(nullptr == findBrokenAncestry(root));
- TEST_ASSERT(28 == checkAscension(root));
-
- // REMOVE 20
- // 16
- // / `
- // 8 27
- // / ` / `
- // 4 12 21 30
- // / ` / ` / ` / `
- // 2 6 10 14 18 22 28 31
- // / ` / ` / ` / ` / ` ` `
- // 1 3 5 7 9 11 13 15 17 19 23 29
- std::puts("REMOVE 20:");
- TEST_ASSERT(t[20].checkLinkageUpLeftRightBF(&t[27], &t[18], &t[22], 00));
- remove(&root, &t[20]);
- TEST_ASSERT(&t[16] == root);
- print(root);
- TEST_ASSERT(t[21].checkLinkageUpLeftRightBF(&t[27], &t[18], &t[22], 00));
- TEST_ASSERT(t[22].checkLinkageUpLeftRightBF(&t[21], Zzzzzz, &t[23], +1));
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(root));
- TEST_ASSERT(nullptr == findBrokenAncestry(root));
- TEST_ASSERT(27 == checkAscension(root));
-
- // REMOVE 27
- // 16
- // / `
- // 8 28
- // / ` / `
- // 4 12 21 30
- // / ` / ` / ` / `
- // 2 6 10 14 18 22 29 31
- // / ` / ` / ` / ` / ` `
- // 1 3 5 7 9 11 13 15 17 19 23
- std::puts("REMOVE 27:");
- TEST_ASSERT(t[27].checkLinkageUpLeftRightBF(&t[16], &t[21], &t[30], 00));
- remove(&root, &t[27]);
- TEST_ASSERT(&t[16] == root);
- print(root);
- TEST_ASSERT(t[28].checkLinkageUpLeftRightBF(&t[16], &t[21], &t[30], -1));
- TEST_ASSERT(t[30].checkLinkageUpLeftRightBF(&t[28], &t[29], &t[31], 00));
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(root));
- TEST_ASSERT(nullptr == findBrokenAncestry(root));
- TEST_ASSERT(26 == checkAscension(root));
-
- // REMOVE 28
- // 16
- // / `
- // 8 29
- // / ` / `
- // 4 12 21 30
- // / ` / ` / ` `
- // 2 6 10 14 18 22 31
- // / ` / ` / ` / ` / ` `
- // 1 3 5 7 9 11 13 15 17 19 23
- std::puts("REMOVE 28:");
- TEST_ASSERT(t[28].checkLinkageUpLeftRightBF(&t[16], &t[21], &t[30], -1));
- remove(&root, &t[28]);
- TEST_ASSERT(&t[16] == root);
- print(root);
- TEST_ASSERT(t[29].checkLinkageUpLeftRightBF(&t[16], &t[21], &t[30], -1));
- TEST_ASSERT(t[30].checkLinkageUpLeftRightBF(&t[29], Zzzzzz, &t[31], +1));
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(root));
- TEST_ASSERT(nullptr == findBrokenAncestry(root));
- TEST_ASSERT(25 == checkAscension(root));
-
- // REMOVE 29; UNBALANCED TREE BEFORE ROTATION:
- // 16
- // / `
- // 8 30
- // / ` / `
- // 4 12 21 31
- // / ` / ` / `
- // 2 6 10 14 18 22
- // / ` / ` / ` / ` / ` `
- // 1 3 5 7 9 11 13 15 17 19 23
- //
- // FINAL STATE AFTER ROTATION:
- // 16
- // / `
- // 8 21
- // / ` / `
- // 4 12 18 30
- // / ` / ` / ` / `
- // 2 6 10 14 17 19 22 31
- // / ` / ` / ` / ` `
- // 1 3 5 7 9 11 13 15 23
- std::puts("REMOVE 29:");
- TEST_ASSERT(t[29].checkLinkageUpLeftRightBF(&t[16], &t[21], &t[30], -1));
- remove(&root, &t[29]);
- TEST_ASSERT(&t[16] == root);
- print(root);
- TEST_ASSERT(t[21].checkLinkageUpLeftRightBF(&t[16], &t[18], &t[30], +1));
- TEST_ASSERT(t[18].checkLinkageUpLeftRightBF(&t[21], &t[17], &t[19], 00));
- TEST_ASSERT(t[30].checkLinkageUpLeftRightBF(&t[21], &t[22], &t[31], -1));
- TEST_ASSERT(t[22].checkLinkageUpLeftRightBF(&t[30], Zzzzzz, &t[23], +1));
- TEST_ASSERT(t[16].checkLinkageUpLeftRightBF(Zzzzzz, &t[8], &t[21], 00));
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(root));
- TEST_ASSERT(nullptr == findBrokenAncestry(root));
- TEST_ASSERT(24 == checkAscension(root));
-
- // REMOVE 8
- // 16
- // / `
- // 9 21
- // / ` / `
- // 4 12 18 30
- // / ` / ` / ` / `
- // 2 6 10 14 17 19 22 31
- // / ` / ` ` / ` `
- // 1 3 5 7 11 13 15 23
- std::puts("REMOVE 8:");
- TEST_ASSERT(t[8].checkLinkageUpLeftRightBF(&t[16], &t[4], &t[12], 00));
- remove(&root, &t[8]);
- TEST_ASSERT(&t[16] == root);
- print(root);
- TEST_ASSERT(t[9].checkLinkageUpLeftRightBF(&t[16], &t[4], &t[12], 00));
- TEST_ASSERT(t[10].checkLinkageUpLeftRightBF(&t[12], Zzzzz, &t[11], +1));
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(root));
- TEST_ASSERT(nullptr == findBrokenAncestry(root));
- TEST_ASSERT(23 == checkAscension(root));
-
- // REMOVE 9
- // 16
- // / `
- // 10 21
- // / ` / `
- // 4 12 18 30
- // / ` / ` / ` / `
- // 2 6 11 14 17 19 22 31
- // / ` / ` / ` `
- // 1 3 5 7 13 15 23
- std::puts("REMOVE 9:");
- TEST_ASSERT(t[9].checkLinkageUpLeftRightBF(&t[16], &t[4], &t[12], 00));
- remove(&root, &t[9]);
- TEST_ASSERT(&t[16] == root);
- print(root);
- TEST_ASSERT(t[10].checkLinkageUpLeftRightBF(&t[16], &t[4], &t[12], 00));
- TEST_ASSERT(t[12].checkLinkageUpLeftRightBF(&t[10], &t[11], &t[14], +1));
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(root));
- TEST_ASSERT(nullptr == findBrokenAncestry(root));
- TEST_ASSERT(22 == checkAscension(root));
-
- // REMOVE 1
- // 16
- // / `
- // 10 21
- // / ` / `
- // 4 12 18 30
- // / ` / ` / ` / `
- // 2 6 11 14 17 19 22 31
- // ` / ` / ` `
- // 3 5 7 13 15 23
- std::puts("REMOVE 1:");
- TEST_ASSERT(t[1].checkLinkageUpLeftRightBF(&t[2], Zzzzz, Zzzzz, 00));
- remove(&root, &t[1]);
- TEST_ASSERT(&t[16] == root);
- print(root);
- TEST_ASSERT(t[2].checkLinkageUpLeftRightBF(&t[4], Zzzzz, &t[3], +1));
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(root));
- TEST_ASSERT(nullptr == findBrokenAncestry(root));
- TEST_ASSERT(21 == checkAscension(root));
-}
-
-auto getRandomByte()
-{
- return static_cast((0xFFLL * std::rand()) / RAND_MAX);
-}
-
-void testMutationRandomized()
-{
- using N = Node;
- std::array t{};
- for (auto i = 0U; i < 256U; i++)
- {
- t.at(i).value = static_cast(i);
- }
- std::array mask{};
- std::size_t size = 0;
- N* root = nullptr;
-
- std::uint64_t cnt_addition = 0;
- std::uint64_t cnt_removal = 0;
-
- const auto validate = [&] {
- TEST_ASSERT(size == std::accumulate(mask.begin(), mask.end(), 0U, [](const std::size_t a, const std::size_t b) {
- return a + b;
- }));
- TEST_ASSERT(nullptr == findBrokenBalanceFactor(root));
- TEST_ASSERT(nullptr == findBrokenAncestry(root));
- TEST_ASSERT(size == checkAscension(root));
- std::array new_mask{};
- traverse(root, [&](const N* node) { new_mask.at(node->value) = true; });
- TEST_ASSERT(mask == new_mask); // Otherwise, the contents of the tree does not match our expectations.
- };
- validate();
-
- const auto add = [&](const std::uint_fast8_t x) {
- const auto predicate = [&](const N& v) { return x - v.value; };
- if (N* const existing = search(&root, predicate))
- {
- TEST_ASSERT(mask.at(x));
- TEST_ASSERT(x == existing->value);
- TEST_ASSERT(x == search(&root, predicate, []() -> N* {
- throw std::logic_error("Attempted to create a new node when there is one already");
- })->value);
- }
- else
- {
- TEST_ASSERT(!mask.at(x));
- bool factory_called = false;
- TEST_ASSERT(x == search(&root, predicate, [&]() -> N* {
- factory_called = true; // NOLINT(bugprone-assignment-in-if-condition)
- return &t.at(x);
- })->value);
- TEST_ASSERT(factory_called);
- size++;
- cnt_addition++;
- mask.at(x) = true;
- }
- };
-
- const auto drop = [&](const std::uint_fast8_t x) {
- const auto predicate = [&](const N& v) { return x - v.value; };
- if (N* const existing = search(&root, predicate))
- {
- TEST_ASSERT(mask.at(x));
- TEST_ASSERT(x == existing->value);
- remove(&root, existing);
- size--;
- cnt_removal++;
- mask.at(x) = false;
- TEST_ASSERT(nullptr == search(&root, predicate));
- }
- else
- {
- TEST_ASSERT(!mask.at(x));
- }
- };
-
- std::puts("Running the randomized test...");
- for (std::uint32_t iteration = 0U; iteration < 10'000U; iteration++)
- {
- if ((getRandomByte() % 2U) != 0)
- {
- add(getRandomByte());
- }
- else
- {
- drop(getRandomByte());
- }
- validate();
- }
-
- std::cout << "Randomized test finished. Final state:\n" //
- << "\tsize: " << size //
- << "\tcnt_addition: " << cnt_addition //
- << "\tcnt_removal: " << cnt_removal //
- << std::endl;
- if (root != nullptr)
- {
- std::cout << "\tmin/max: " << static_cast(root->min()->value) //
- << "/" << static_cast(root->max()->value) //
- << std::endl;
- }
- validate();
-}
-
-} // namespace
-
-void setUp() {}
-
-void tearDown() {}
-
-int main(const int argc, const char* const argv[])
-{
- const auto seed = static_cast((argc > 1) ? std::atoll(argv[1]) : std::time(nullptr)); // NOLINT
- std::cout << "Randomness seed: " << seed << std::endl;
- std::srand(seed);
- UNITY_BEGIN();
- RUN_TEST(testCheckAscension);
- RUN_TEST(testRotation);
- RUN_TEST(testBalancingA);
- RUN_TEST(testBalancingB);
- RUN_TEST(testBalancingC);
- RUN_TEST(testRetracingOnGrowth);
- RUN_TEST(testSearchTrivial);
- RUN_TEST(testRemovalA);
- RUN_TEST(testMutationManual);
- RUN_TEST(testMutationRandomized);
- return UNITY_END();
-}
diff --git a/tests/src/test_e2e.cpp b/tests/src/test_e2e.cpp
deleted file mode 100644
index 8ea7800..0000000
--- a/tests/src/test_e2e.cpp
+++ /dev/null
@@ -1,623 +0,0 @@
-/// This software is distributed under the terms of the MIT License.
-/// Copyright (C) OpenCyphal Development Team
-/// Copyright Amazon.com Inc. or its affiliates.
-/// SPDX-License-Identifier: MIT
-
-#include
-#include "helpers.h"
-#include
-#include
-#include
-#include
-#include
-
-namespace
-{
-
-UdpardPayload makePayload(const std::string_view& payload)
-{
- return {.size = payload.size(), .data = payload.data()};
-}
-
-/// A wrapper over udpardRxSubscriptionReceive() that copies the datagram payload into a newly allocated buffer.
-[[nodiscard]] int_fast8_t rxSubscriptionReceive(UdpardRxSubscription* const self,
- InstrumentedAllocator& payload_memory,
- const UdpardMicrosecond timestamp_usec,
- const UdpardMutablePayload datagram_payload,
- const uint_fast8_t redundant_iface_index,
- UdpardRxTransfer* const out_transfer)
-{
- return udpardRxSubscriptionReceive(self,
- timestamp_usec,
- {
- .size = datagram_payload.size,
- .data = std::memmove(instrumentedAllocatorAllocate(&payload_memory,
- datagram_payload.size),
- datagram_payload.data,
- datagram_payload.size),
- },
- redundant_iface_index,
- out_transfer);
-}
-
-/// A wrapper over udpardRxRPCDispatcherReceive() that copies the datagram payload into a newly allocated buffer.
-[[nodiscard]] int_fast8_t rxRPCDispatcherReceive(UdpardRxRPCDispatcher* const self,
- InstrumentedAllocator& payload_memory,
- const UdpardMicrosecond timestamp_usec,
- const UdpardMutablePayload datagram_payload,
- const uint_fast8_t redundant_iface_index,
- UdpardRxRPCPort** const out_port,
- UdpardRxRPCTransfer* const out_transfer)
-{
- return udpardRxRPCDispatcherReceive(self,
- timestamp_usec,
- {
- .size = datagram_payload.size,
- .data = std::memmove(instrumentedAllocatorAllocate(&payload_memory,
- datagram_payload.size),
- datagram_payload.data,
- datagram_payload.size),
- },
- redundant_iface_index,
- out_port,
- out_transfer);
-}
-
-void testPubSub()
-{
- InstrumentedAllocator alloc_tx;
- InstrumentedAllocator alloc_rx_session;
- InstrumentedAllocator alloc_rx_fragment;
- InstrumentedAllocator alloc_rx_payload;
- instrumentedAllocatorNew(&alloc_tx);
- instrumentedAllocatorNew(&alloc_rx_session);
- instrumentedAllocatorNew(&alloc_rx_fragment);
- instrumentedAllocatorNew(&alloc_rx_payload);
- const UdpardTxMemoryResources mem_tx{
- .fragment = instrumentedAllocatorMakeMemoryResource(&alloc_tx),
- .payload = instrumentedAllocatorMakeMemoryResource(&alloc_tx),
- };
- const UdpardRxMemoryResources mem_rx{
- .session = instrumentedAllocatorMakeMemoryResource(&alloc_rx_session),
- .fragment = instrumentedAllocatorMakeMemoryResource(&alloc_rx_fragment),
- .payload = instrumentedAllocatorMakeMemoryDeleter(&alloc_rx_payload),
- };
- // Initialize the TX pipeline. Set the MTU to a low value to ensure that we test multi-frame transfers.
- UdpardTx tx{};
- UdpardNodeID node_id = UDPARD_NODE_ID_UNSET;
- TEST_ASSERT_EQUAL(0, udpardTxInit(&tx, &node_id, 7, mem_tx));
- tx.mtu = 100;
- for (auto i = 0U; i <= UDPARD_PRIORITY_MAX; i++)
- {
- tx.dscp_value_per_priority[i] = static_cast(0xA0U + i);
- }
- // Initialize the subscriptions.
- std::array sub{};
- TEST_ASSERT_EQUAL(0, udpardRxSubscriptionInit(&sub.at(0), 5000, 300, mem_rx));
- TEST_ASSERT_EQUAL(0, udpardRxSubscriptionInit(&sub.at(1), 5001, 200, mem_rx));
- TEST_ASSERT_EQUAL(0, udpardRxSubscriptionInit(&sub.at(2), 5002, 100, mem_rx));
-
- // Publish something on subject 5000.
- std::array transfer_id{};
- TEST_ASSERT_EQUAL(1, // Single-frame anonymous = success.
- udpardTxPublish(&tx,
- 10'000'000,
- UdpardPrioritySlow,
- 5000,
- transfer_id.at(0)++,
- makePayload("Last night, I had a dream."),
- nullptr));
- const std::string_view Eden =
- "After speaking with Scott, Lan Xi halted his busy work amid chaotic feelings, and stopped to think, as the "
- "colonel had advised. Faster than he had imagined, Eden's cold, slippery vipers crawled into his "
- "consciousness. He found the fruit of knowledge and ate it, and the last rays of sunshine in his soul "
- "disappeared forever as everything plunged into darkness.";
- TEST_ASSERT_EQUAL(-UDPARD_ERROR_ANONYMOUS,
- udpardTxPublish(&tx,
- 10'001'000,
- UdpardPriorityNominal,
- 5000,
- transfer_id.at(0),
- makePayload(Eden),
- nullptr));
- node_id = 42; // Change the node-ID to allow multi-frame transfers, then try again.
- TEST_ASSERT_EQUAL(4,
- udpardTxPublish(&tx,
- 10'002'000,
- UdpardPriorityOptional,
- 5000,
- transfer_id.at(0)++,
- makePayload(Eden),
- nullptr));
- TEST_ASSERT_EQUAL(5, tx.queue_size);
-
- // Publish something on subject 5001. The priority here is higher so it should be delivered earlier.
- node_id = 43; // Change the node-ID.
- const std::string_view Later = "Two days later, the captain of Ultimate Law committed suicide.";
- TEST_ASSERT_EQUAL(1,
- udpardTxPublish(&tx,
- 10'003'000,
- UdpardPriorityNominal,
- 5001,
- transfer_id.at(1)++,
- makePayload(Later),
- nullptr));
- TEST_ASSERT_EQUAL(6, tx.queue_size);
-
- // Publish something on subject 5002. The priority here is the same.
- const std::string_view Dark = "'Dark. It's so fucking dark,' the captain murmured, and then shot himself.";
- TEST_ASSERT_EQUAL(1,
- udpardTxPublish(&tx,
- 10'004'000,
- UdpardPriorityNominal,
- 5002,
- transfer_id.at(2)++,
- makePayload(Dark),
- nullptr));
- TEST_ASSERT_EQUAL(7, tx.queue_size);
- TEST_ASSERT_EQUAL(7 * 2ULL, alloc_tx.allocated_fragments);
-
- // Transmit the enqueued frames by pushing them into the subscribers.
- // Here we pop the frames one by one ensuring that they come out in the correct order.
- UdpardRxTransfer transfer{};
- // First transfer.
- TEST_ASSERT_EQUAL(0, alloc_rx_session.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments);
- UdpardTxItem* tx_item = udpardTxPeek(&tx);
- TEST_ASSERT_NOT_NULL(tx_item);
- TEST_ASSERT_EQUAL(sub.at(1).udp_ip_endpoint.ip_address, tx_item->destination.ip_address);
- TEST_ASSERT_NULL(tx_item->next_in_transfer);
- TEST_ASSERT_EQUAL(10'003'000, tx_item->deadline_usec);
- TEST_ASSERT_EQUAL(0xA4, tx_item->dscp);
- TEST_ASSERT_EQUAL(1,
- rxSubscriptionReceive(&sub.at(1),
- alloc_rx_payload,
- 10'005'000,
- tx_item->datagram_payload,
- 0,
- &transfer));
- TEST_ASSERT_EQUAL(1, alloc_rx_session.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments);
- TEST_ASSERT_EQUAL(1, alloc_rx_payload.allocated_fragments);
- // Check the received transfer.
- TEST_ASSERT_EQUAL(10'005'000, transfer.timestamp_usec);
- TEST_ASSERT_EQUAL(UdpardPriorityNominal, transfer.priority);
- TEST_ASSERT_EQUAL(43, transfer.source_node_id);
- TEST_ASSERT_EQUAL(0, transfer.transfer_id);
- TEST_ASSERT_EQUAL(Later.size(), transfer.payload_size);
- TEST_ASSERT_EQUAL(Later.size(), transfer.payload.view.size);
- TEST_ASSERT_EQUAL_MEMORY(Later.data(), transfer.payload.view.data, transfer.payload.view.size);
- TEST_ASSERT_NULL(transfer.payload.next);
- // Free the transfer payload.
- udpardRxFragmentFree(transfer.payload, mem_rx.fragment, mem_rx.payload);
- TEST_ASSERT_EQUAL(1, alloc_rx_session.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments);
- // Send duplicates.
- TEST_ASSERT_EQUAL(0, // Duplicate on same iface.
- rxSubscriptionReceive(&sub.at(1),
- alloc_rx_payload,
- 10'005'100,
- tx_item->datagram_payload,
- 0,
- &transfer));
- TEST_ASSERT_EQUAL(0, // Duplicate on another iface.
- rxSubscriptionReceive(&sub.at(1),
- alloc_rx_payload,
- 10'005'200,
- tx_item->datagram_payload,
- 1,
- &transfer));
- // Ensure the duplicates do no alter memory usage.
- TEST_ASSERT_EQUAL(1, alloc_rx_session.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments);
- // Free the TX item.
- udpardTxFree(mem_tx, udpardTxPop(&tx, tx_item));
- TEST_ASSERT_EQUAL(6 * 2ULL, alloc_tx.allocated_fragments);
-
- // Second transfer.
- tx_item = udpardTxPeek(&tx);
- TEST_ASSERT_NOT_NULL(tx_item);
- TEST_ASSERT_EQUAL(sub.at(2).udp_ip_endpoint.ip_address, tx_item->destination.ip_address);
- TEST_ASSERT_NULL(tx_item->next_in_transfer);
- TEST_ASSERT_EQUAL(10'004'000, tx_item->deadline_usec);
- TEST_ASSERT_EQUAL(0xA4, tx_item->dscp);
- TEST_ASSERT_EQUAL(1,
- rxSubscriptionReceive(&sub.at(2),
- alloc_rx_payload,
- 10'006'000,
- tx_item->datagram_payload,
- 1,
- &transfer));
- TEST_ASSERT_EQUAL(2, alloc_rx_session.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments);
- TEST_ASSERT_EQUAL(1, alloc_rx_payload.allocated_fragments);
- // Check the received transfer.
- TEST_ASSERT_EQUAL(10'006'000, transfer.timestamp_usec);
- TEST_ASSERT_EQUAL(UdpardPriorityNominal, transfer.priority);
- TEST_ASSERT_EQUAL(43, transfer.source_node_id);
- TEST_ASSERT_EQUAL(0, transfer.transfer_id);
- TEST_ASSERT_EQUAL(Dark.size(), transfer.payload_size);
- TEST_ASSERT_EQUAL(Dark.size(), transfer.payload.view.size);
- TEST_ASSERT_EQUAL_MEMORY(Dark.data(), transfer.payload.view.data, transfer.payload.view.size);
- TEST_ASSERT_NULL(transfer.payload.next);
- // Free the transfer payload.
- udpardRxFragmentFree(transfer.payload, mem_rx.fragment, mem_rx.payload);
- TEST_ASSERT_EQUAL(2, alloc_rx_session.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments);
- // Free the TX item.
- udpardTxFree(mem_tx, udpardTxPop(&tx, tx_item));
- TEST_ASSERT_EQUAL(5 * 2ULL, alloc_tx.allocated_fragments);
-
- // Third transfer. This one is anonymous.
- tx_item = udpardTxPeek(&tx);
- TEST_ASSERT_NOT_NULL(tx_item);
- TEST_ASSERT_EQUAL(sub.at(0).udp_ip_endpoint.ip_address, tx_item->destination.ip_address);
- TEST_ASSERT_NULL(tx_item->next_in_transfer);
- TEST_ASSERT_EQUAL(10'000'000, tx_item->deadline_usec);
- TEST_ASSERT_EQUAL(0xA6, tx_item->dscp);
- TEST_ASSERT_EQUAL(1,
- rxSubscriptionReceive(&sub.at(0),
- alloc_rx_payload,
- 10'007'000,
- tx_item->datagram_payload,
- 2,
- &transfer));
- TEST_ASSERT_EQUAL(2, alloc_rx_session.allocated_fragments); // No increment, anonymous transfers are stateless.
- TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments);
- TEST_ASSERT_EQUAL(1, alloc_rx_payload.allocated_fragments);
- // Check the received transfer.
- TEST_ASSERT_EQUAL(10'007'000, transfer.timestamp_usec);
- TEST_ASSERT_EQUAL(UdpardPrioritySlow, transfer.priority);
- TEST_ASSERT_EQUAL(UDPARD_NODE_ID_UNSET, transfer.source_node_id);
- TEST_ASSERT_EQUAL(0, transfer.transfer_id);
- TEST_ASSERT_EQUAL(26, transfer.payload_size);
- TEST_ASSERT_EQUAL(26, transfer.payload.view.size);
- TEST_ASSERT_EQUAL_MEMORY("Last night, I had a dream.", transfer.payload.view.data, transfer.payload.view.size);
- TEST_ASSERT_NULL(transfer.payload.next);
- // Free the transfer payload.
- udpardRxFragmentFree(transfer.payload, mem_rx.fragment, mem_rx.payload);
- TEST_ASSERT_EQUAL(2, alloc_rx_session.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments);
- // Free the TX item.
- udpardTxFree(mem_tx, udpardTxPop(&tx, tx_item));
- TEST_ASSERT_EQUAL(4 * 2ULL, alloc_tx.allocated_fragments);
-
- // Fourth transfer. This one contains multiple frames. We process them one-by-one.
- // Frame #0.
- tx_item = udpardTxPeek(&tx);
- TEST_ASSERT_NOT_NULL(tx_item);
- const UdpardTxItem* prev_next = tx_item->next_in_transfer;
- TEST_ASSERT_NOT_NULL(prev_next);
- TEST_ASSERT_EQUAL(sub.at(0).udp_ip_endpoint.ip_address, tx_item->destination.ip_address);
- TEST_ASSERT_EQUAL(10'002'000, tx_item->deadline_usec);
- TEST_ASSERT_EQUAL(0xA7, tx_item->dscp);
- TEST_ASSERT_EQUAL(0,
- rxSubscriptionReceive(&sub.at(0),
- alloc_rx_payload,
- 10'008'000,
- tx_item->datagram_payload,
- 0,
- &transfer));
- TEST_ASSERT_EQUAL(3, alloc_rx_session.allocated_fragments);
- TEST_ASSERT_EQUAL(1, alloc_rx_fragment.allocated_fragments);
- TEST_ASSERT_EQUAL(1, alloc_rx_payload.allocated_fragments);
- // Free the TX item.
- udpardTxFree(mem_tx, udpardTxPop(&tx, tx_item));
- TEST_ASSERT_EQUAL(3 * 2ULL, alloc_tx.allocated_fragments);
- // Frame #1.
- tx_item = udpardTxPeek(&tx);
- TEST_ASSERT_NOT_NULL(tx_item);
- TEST_ASSERT_EQUAL_PTR(prev_next, tx_item);
- prev_next = tx_item->next_in_transfer;
- TEST_ASSERT_NOT_NULL(prev_next);
- TEST_ASSERT_EQUAL(sub.at(0).udp_ip_endpoint.ip_address, tx_item->destination.ip_address);
- TEST_ASSERT_EQUAL(10'002'000, tx_item->deadline_usec);
- TEST_ASSERT_EQUAL(0xA7, tx_item->dscp);
- TEST_ASSERT_EQUAL(0,
- rxSubscriptionReceive(&sub.at(0),
- alloc_rx_payload,
- 10'008'001,
- tx_item->datagram_payload,
- 0,
- &transfer));
- TEST_ASSERT_EQUAL(3, alloc_rx_session.allocated_fragments);
- TEST_ASSERT_EQUAL(2, alloc_rx_fragment.allocated_fragments);
- TEST_ASSERT_EQUAL(2, alloc_rx_payload.allocated_fragments);
- // Free the TX item.
- udpardTxFree(mem_tx, udpardTxPop(&tx, tx_item));
- TEST_ASSERT_EQUAL(2 * 2ULL, alloc_tx.allocated_fragments);
- // Frame #2.
- tx_item = udpardTxPeek(&tx);
- TEST_ASSERT_NOT_NULL(tx_item);
- TEST_ASSERT_EQUAL_PTR(prev_next, tx_item);
- prev_next = tx_item->next_in_transfer;
- TEST_ASSERT_NOT_NULL(prev_next);
- TEST_ASSERT_EQUAL(sub.at(0).udp_ip_endpoint.ip_address, tx_item->destination.ip_address);
- TEST_ASSERT_EQUAL(10'002'000, tx_item->deadline_usec);
- TEST_ASSERT_EQUAL(0xA7, tx_item->dscp);
- TEST_ASSERT_EQUAL(0,
- rxSubscriptionReceive(&sub.at(0),
- alloc_rx_payload,
- 10'008'002,
- tx_item->datagram_payload,
- 0,
- &transfer));
- TEST_ASSERT_EQUAL(3, alloc_rx_session.allocated_fragments);
- TEST_ASSERT_EQUAL(3, alloc_rx_fragment.allocated_fragments);
- TEST_ASSERT_EQUAL(3, alloc_rx_payload.allocated_fragments);
- // Free the TX item.
- udpardTxFree(mem_tx, udpardTxPop(&tx, tx_item));
- TEST_ASSERT_EQUAL(1 * 2ULL, alloc_tx.allocated_fragments);
- // Frame #3. This is the last frame of the transfer. The payload is truncated, see the extent.
- tx_item = udpardTxPeek(&tx);
- TEST_ASSERT_NOT_NULL(tx_item);
- TEST_ASSERT_EQUAL_PTR(prev_next, tx_item);
- prev_next = tx_item->next_in_transfer;
- TEST_ASSERT_NULL(prev_next);
- TEST_ASSERT_EQUAL(sub.at(0).udp_ip_endpoint.ip_address, tx_item->destination.ip_address);
- TEST_ASSERT_EQUAL(10'002'000, tx_item->deadline_usec);
- TEST_ASSERT_EQUAL(0xA7, tx_item->dscp);
- TEST_ASSERT_EQUAL(1,
- rxSubscriptionReceive(&sub.at(0),
- alloc_rx_payload,
- 10'008'003,
- tx_item->datagram_payload,
- 0,
- &transfer));
- TEST_ASSERT_EQUAL(3, alloc_rx_session.allocated_fragments);
- TEST_ASSERT_EQUAL(2, alloc_rx_fragment.allocated_fragments); // Extent truncation + head optimization.
- TEST_ASSERT_EQUAL(3, alloc_rx_payload.allocated_fragments); // Extent truncation.
- // Check the received transfer.
- TEST_ASSERT_EQUAL(10'008'000, transfer.timestamp_usec);
- TEST_ASSERT_EQUAL(UdpardPriorityOptional, transfer.priority);
- TEST_ASSERT_EQUAL(42, transfer.source_node_id);
- TEST_ASSERT_EQUAL(1, transfer.transfer_id);
- TEST_ASSERT_EQUAL(300, transfer.payload_size); // Defined by the configured extent setting for this sub.
- TEST_ASSERT_EQUAL(100, transfer.payload.view.size); // Defined by the MTU setting.
- std::array rx_eden{};
- TEST_ASSERT_EQUAL(300, udpardGather(transfer.payload, rx_eden.size(), rx_eden.data()));
- TEST_ASSERT_EQUAL_MEMORY(Eden.data(), rx_eden.data(), 300);
- // Free the transfer payload.
- udpardRxFragmentFree(transfer.payload, mem_rx.fragment, mem_rx.payload);
- TEST_ASSERT_EQUAL(3, alloc_rx_session.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments);
- // Free the TX item.
- udpardTxFree(mem_tx, udpardTxPop(&tx, tx_item));
- TEST_ASSERT_EQUAL(0, alloc_tx.allocated_fragments);
-
- // Close the subscriptions and ensure the memory is freed.
- udpardRxSubscriptionFree(&sub.at(0));
- udpardRxSubscriptionFree(&sub.at(1));
- udpardRxSubscriptionFree(&sub.at(2));
-
- // Final memory check.
- TEST_ASSERT_EQUAL(0, alloc_rx_session.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_tx.allocated_fragments);
-}
-
-void testRPC()
-{
- InstrumentedAllocator alloc_tx;
- InstrumentedAllocator alloc_rx_session;
- InstrumentedAllocator alloc_rx_fragment;
- InstrumentedAllocator alloc_rx_payload;
- instrumentedAllocatorNew(&alloc_tx);
- instrumentedAllocatorNew(&alloc_rx_session);
- instrumentedAllocatorNew(&alloc_rx_fragment);
- instrumentedAllocatorNew(&alloc_rx_payload);
- const UdpardTxMemoryResources mem_tx{
- .fragment = instrumentedAllocatorMakeMemoryResource(&alloc_tx),
- .payload = instrumentedAllocatorMakeMemoryResource(&alloc_tx),
- };
- const UdpardRxMemoryResources mem_rx{
- .session = instrumentedAllocatorMakeMemoryResource(&alloc_rx_session),
- .fragment = instrumentedAllocatorMakeMemoryResource(&alloc_rx_fragment),
- .payload = instrumentedAllocatorMakeMemoryDeleter(&alloc_rx_payload),
- };
- // Initialize the TX pipeline.
- UdpardTx tx{};
- const UdpardNodeID tx_node_id = 1234;
- TEST_ASSERT_EQUAL(0, udpardTxInit(&tx, &tx_node_id, 2, mem_tx));
- tx.mtu = 500;
- for (auto i = 0U; i <= UDPARD_PRIORITY_MAX; i++)
- {
- tx.dscp_value_per_priority[i] = static_cast(0xA0U + i);
- }
- // Initialize the RPC dispatcher and the RPC services.
- UdpardRxRPCDispatcher dispatcher{};
- TEST_ASSERT_EQUAL(0, udpardRxRPCDispatcherInit(&dispatcher, mem_rx));
- UdpardUDPIPEndpoint udp_ip_endpoint{};
- TEST_ASSERT_EQUAL(0, udpardRxRPCDispatcherStart(&dispatcher, 4321, &udp_ip_endpoint));
- UdpardRxRPCPort port_foo_a{};
- UdpardRxRPCPort port_foo_q{};
- TEST_ASSERT_EQUAL(1, udpardRxRPCDispatcherListen(&dispatcher, &port_foo_a, 200, false, 500));
- TEST_ASSERT_EQUAL(1, udpardRxRPCDispatcherListen(&dispatcher, &port_foo_q, 200, true, 500));
-
- // Send a request.
- UdpardTransferID transfer_id_shared = 0;
- const std::string_view Entry = "But this simple world held a perplexing riddle: The entire galaxy was a vast "
- "empty desert, but a highly intelligent civilization had appeared on the star "
- "nearest to us. In this mystery, his thoughts found an entry point.";
- TEST_ASSERT_EQUAL_INT32(1,
- udpardTxRequest(&tx,
- 10'000'000,
- UdpardPriorityFast,
- 200,
- 4321,
- transfer_id_shared++,
- makePayload(Entry),
- nullptr));
- TEST_ASSERT_EQUAL(1, tx.queue_size);
- TEST_ASSERT_EQUAL(1, transfer_id_shared);
-
- // Send a response.
- const std::string_view Forest = "In the dead, lonely, cold blackness, he saw the truth of the universe.";
- TEST_ASSERT_EQUAL_INT32(1,
- udpardTxRespond(&tx,
- 10'001'000,
- UdpardPriorityImmediate,
- 200,
- 4321,
- transfer_id_shared,
- makePayload(Forest),
- nullptr));
- TEST_ASSERT_EQUAL(2, tx.queue_size);
-
- // Transmit the enqueued frames by pushing them into the RPC dispatcher.
- UdpardRxRPCTransfer transfer{};
- UdpardRxRPCPort* active_port = nullptr;
- // First transfer.
- TEST_ASSERT_EQUAL(0, alloc_rx_session.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments);
- UdpardTxItem* tx_item = udpardTxPeek(&tx);
- TEST_ASSERT_NOT_NULL(tx_item);
- TEST_ASSERT_EQUAL(udp_ip_endpoint.ip_address, tx_item->destination.ip_address);
- TEST_ASSERT_NULL(tx_item->next_in_transfer);
- TEST_ASSERT_EQUAL(10'001'000, tx_item->deadline_usec);
- TEST_ASSERT_EQUAL(0xA1, tx_item->dscp);
- TEST_ASSERT_EQUAL(1,
- rxRPCDispatcherReceive(&dispatcher,
- alloc_rx_payload,
- 10'000'000,
- tx_item->datagram_payload,
- 0,
- &active_port,
- &transfer));
- TEST_ASSERT_EQUAL(1, alloc_rx_session.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments);
- TEST_ASSERT_EQUAL(1, alloc_rx_payload.allocated_fragments);
- // Check the received transfer.
- TEST_ASSERT_EQUAL(&port_foo_a, active_port);
- TEST_ASSERT_EQUAL(200, transfer.service_id);
- TEST_ASSERT_EQUAL(false, transfer.is_request);
- TEST_ASSERT_EQUAL(10'000'000, transfer.base.timestamp_usec);
- TEST_ASSERT_EQUAL(UdpardPriorityImmediate, transfer.base.priority);
- TEST_ASSERT_EQUAL(1234, transfer.base.source_node_id);
- TEST_ASSERT_EQUAL(1, transfer.base.transfer_id);
- TEST_ASSERT_EQUAL(Forest.size(), transfer.base.payload_size);
- TEST_ASSERT_EQUAL(Forest.size(), transfer.base.payload.view.size);
- TEST_ASSERT_EQUAL_MEMORY(Forest.data(), transfer.base.payload.view.data, transfer.base.payload.view.size);
- TEST_ASSERT_NULL(transfer.base.payload.next);
- // Free the transfer payload.
- udpardRxFragmentFree(transfer.base.payload, mem_rx.fragment, mem_rx.payload);
- TEST_ASSERT_EQUAL(1, alloc_rx_session.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments);
- // Send duplicates.
- TEST_ASSERT_EQUAL(0, // Duplicate on the same iface.
- rxRPCDispatcherReceive(&dispatcher,
- alloc_rx_payload,
- 10'000'100,
- tx_item->datagram_payload,
- 0,
- &active_port,
- &transfer));
- TEST_ASSERT_EQUAL(0, // Duplicate on another iface.
- rxRPCDispatcherReceive(&dispatcher,
- alloc_rx_payload,
- 10'000'200,
- tx_item->datagram_payload,
- 2,
- &active_port,
- &transfer));
- // Ensure the duplicates do no alter memory usage.
- TEST_ASSERT_EQUAL(1, alloc_rx_session.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments);
- // Free the TX item.
- udpardTxFree(mem_tx, udpardTxPop(&tx, tx_item));
- TEST_ASSERT_EQUAL(1 * 2ULL, alloc_tx.allocated_fragments);
-
- // Second transfer.
- tx_item = udpardTxPeek(&tx);
- TEST_ASSERT_NOT_NULL(tx_item);
- TEST_ASSERT_EQUAL(udp_ip_endpoint.ip_address, tx_item->destination.ip_address);
- TEST_ASSERT_NULL(tx_item->next_in_transfer);
- TEST_ASSERT_EQUAL(10'000'000, tx_item->deadline_usec);
- TEST_ASSERT_EQUAL(0xA2, tx_item->dscp);
- TEST_ASSERT_EQUAL(1,
- rxRPCDispatcherReceive(&dispatcher,
- alloc_rx_payload,
- 10'001'000,
- tx_item->datagram_payload,
- 1,
- &active_port,
- &transfer));
- TEST_ASSERT_EQUAL(2, alloc_rx_session.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments);
- TEST_ASSERT_EQUAL(1, alloc_rx_payload.allocated_fragments);
- // Check the received transfer.
- TEST_ASSERT_EQUAL(&port_foo_q, active_port);
- TEST_ASSERT_EQUAL(200, transfer.service_id);
- TEST_ASSERT_EQUAL(true, transfer.is_request);
- TEST_ASSERT_EQUAL(10'001'000, transfer.base.timestamp_usec);
- TEST_ASSERT_EQUAL(UdpardPriorityFast, transfer.base.priority);
- TEST_ASSERT_EQUAL(1234, transfer.base.source_node_id);
- TEST_ASSERT_EQUAL(0, transfer.base.transfer_id);
- TEST_ASSERT_EQUAL(Entry.size(), transfer.base.payload_size);
- TEST_ASSERT_EQUAL(Entry.size(), transfer.base.payload.view.size);
- TEST_ASSERT_EQUAL_MEMORY(Entry.data(), transfer.base.payload.view.data, transfer.base.payload.view.size);
- TEST_ASSERT_NULL(transfer.base.payload.next);
- // Free the transfer payload.
- udpardRxFragmentFree(transfer.base.payload, mem_rx.fragment, mem_rx.payload);
- TEST_ASSERT_EQUAL(2, alloc_rx_session.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments);
- // Send duplicates.
- TEST_ASSERT_EQUAL(0, // Duplicate on the same iface.
- rxRPCDispatcherReceive(&dispatcher,
- alloc_rx_payload,
- 10'001'100,
- tx_item->datagram_payload,
- 0,
- &active_port,
- &transfer));
- TEST_ASSERT_EQUAL(0, // Duplicate on another iface.
- rxRPCDispatcherReceive(&dispatcher,
- alloc_rx_payload,
- 10'001'200,
- tx_item->datagram_payload,
- 2,
- &active_port,
- &transfer));
- // Ensure the duplicates do no alter memory usage.
- TEST_ASSERT_EQUAL(2, alloc_rx_session.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments);
- // Free the TX item.
- udpardTxFree(mem_tx, udpardTxPop(&tx, tx_item));
- TEST_ASSERT_EQUAL(0, alloc_tx.allocated_fragments);
-
- // Destroy the ports.
- udpardRxRPCDispatcherCancel(&dispatcher, 200, false);
- udpardRxRPCDispatcherCancel(&dispatcher, 200, true);
-
- // Final memory check.
- TEST_ASSERT_EQUAL(0, alloc_rx_session.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments);
- TEST_ASSERT_EQUAL(0, alloc_tx.allocated_fragments);
-}
-
-} // namespace
-
-void setUp() {}
-
-void tearDown() {}
-
-int main()
-{
- UNITY_BEGIN();
- RUN_TEST(testPubSub);
- RUN_TEST(testRPC);
- return UNITY_END();
-}
diff --git a/tests/src/test_e2e_api.cpp b/tests/src/test_e2e_api.cpp
new file mode 100644
index 0000000..520cb4e
--- /dev/null
+++ b/tests/src/test_e2e_api.cpp
@@ -0,0 +1,450 @@
+/// This software is distributed under the terms of the MIT License.
+/// Copyright (C) OpenCyphal Development Team
+/// Copyright Amazon.com Inc. or its affiliates.
+/// SPDX-License-Identifier: MIT
+
+// ReSharper disable CppPassValueParameterByConstReference
+
+#include
+#include "helpers.h"
+#include
+#include
+#include
+
+namespace {
+
+struct CapturedFrame
+{
+ udpard_bytes_mut_t datagram;
+ uint_fast8_t iface_index;
+};
+
+struct FeedbackState
+{
+ size_t count = 0;
+ uint16_t acknowledgements = 0;
+};
+
+struct RxContext
+{
+ std::vector expected;
+ std::array sources{};
+ uint64_t remote_uid = 0;
+ size_t received = 0;
+ size_t collisions = 0;
+};
+
+// Refcount helpers keep captured datagrams alive.
+void tx_refcount_free(void* const user, const size_t size, void* const payload)
+{
+ (void)user;
+ udpard_tx_refcount_dec(udpard_bytes_t{ .size = size, .data = payload });
+}
+
+// Shared deleter for captured TX frames.
+constexpr udpard_deleter_vtable_t tx_refcount_deleter_vt{ .free = &tx_refcount_free };
+
+bool capture_tx_frame_impl(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection)
+{
+ auto* frames = static_cast*>(tx->user);
+ if (frames == nullptr) {
+ return false;
+ }
+ udpard_tx_refcount_inc(ejection->datagram);
+ void* const data = const_cast(ejection->datagram.data); // NOLINT(cppcoreguidelines-pro-type-const-cast)
+ frames->push_back(CapturedFrame{ .datagram = { .size = ejection->datagram.size, .data = data },
+ .iface_index = ejection->iface_index });
+ return true;
+}
+
+bool capture_tx_frame_subject(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection)
+{
+ return capture_tx_frame_impl(tx, ejection);
+}
+
+bool capture_tx_frame_p2p(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection, udpard_udpip_ep_t /*dest*/)
+{
+ return capture_tx_frame_impl(tx, ejection);
+}
+
+void drop_frame(const CapturedFrame& frame)
+{
+ udpard_tx_refcount_dec(udpard_bytes_t{ .size = frame.datagram.size, .data = frame.datagram.data });
+}
+
+void fill_random(std::vector& data)
+{
+ for (auto& byte : data) {
+ byte = static_cast(rand()) & 0xFFU;
+ }
+}
+
+constexpr udpard_tx_vtable_t tx_vtable{ .eject_subject = &capture_tx_frame_subject,
+ .eject_p2p = &capture_tx_frame_p2p };
+
+// Feedback callback records completion.
+void record_feedback(udpard_tx_t*, const udpard_tx_feedback_t fb)
+{
+ auto* st = static_cast(fb.user.ptr[0]);
+ if (st != nullptr) {
+ st->count++;
+ st->acknowledgements = fb.acknowledgements;
+ }
+}
+
+// RX callbacks validate payload and sender.
+void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer)
+{
+ auto* ctx = static_cast(rx->user);
+ TEST_ASSERT_EQUAL_UINT64(ctx->remote_uid, transfer.remote.uid);
+ for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) {
+ if ((transfer.remote.endpoints[i].ip != 0U) || (transfer.remote.endpoints[i].port != 0U)) {
+ TEST_ASSERT_EQUAL_UINT32(ctx->sources[i].ip, transfer.remote.endpoints[i].ip);
+ TEST_ASSERT_EQUAL_UINT16(ctx->sources[i].port, transfer.remote.endpoints[i].port);
+ }
+ }
+ std::vector assembled(transfer.payload_size_stored);
+ const udpard_fragment_t* cursor = transfer.payload;
+ const size_t gathered = udpard_fragment_gather(&cursor, 0, transfer.payload_size_stored, assembled.data());
+ TEST_ASSERT_EQUAL_size_t(transfer.payload_size_stored, gathered);
+ TEST_ASSERT_EQUAL_size_t(ctx->expected.size(), transfer.payload_size_wire);
+ if (!ctx->expected.empty()) {
+ TEST_ASSERT_EQUAL_MEMORY(ctx->expected.data(), assembled.data(), transfer.payload_size_stored);
+ }
+ udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment));
+ ctx->received++;
+}
+
+void on_collision(udpard_rx_t* const rx, udpard_rx_port_t* const /*port*/, const udpard_remote_t /*remote*/)
+{
+ auto* ctx = static_cast(rx->user);
+ ctx->collisions++;
+}
+constexpr udpard_rx_port_vtable_t callbacks{ .on_message = &on_message, .on_collision = &on_collision };
+
+// Ack port frees responses.
+void on_ack_response(udpard_rx_t*, udpard_rx_port_t* port, const udpard_rx_transfer_t tr)
+{
+ udpard_fragment_free_all(tr.payload, udpard_make_deleter(port->memory.fragment));
+}
+constexpr udpard_rx_port_vtable_t ack_callbacks{ .on_message = &on_ack_response, .on_collision = &on_collision };
+
+// Reliable delivery must survive data and ack loss.
+// Each node uses exactly one TX and one RX instance as per the library design.
+void test_reliable_delivery_under_losses()
+{
+ seed_prng();
+
+ // Allocators - one TX and one RX per node.
+ // Publisher node allocators.
+ instrumented_allocator_t pub_tx_alloc_transfer{};
+ instrumented_allocator_t pub_tx_alloc_payload{};
+ instrumented_allocator_t pub_rx_alloc_frag{};
+ instrumented_allocator_t pub_rx_alloc_session{};
+ instrumented_allocator_new(&pub_tx_alloc_transfer);
+ instrumented_allocator_new(&pub_tx_alloc_payload);
+ instrumented_allocator_new(&pub_rx_alloc_frag);
+ instrumented_allocator_new(&pub_rx_alloc_session);
+
+ // Subscriber node allocators.
+ instrumented_allocator_t sub_tx_alloc_transfer{};
+ instrumented_allocator_t sub_tx_alloc_payload{};
+ instrumented_allocator_t sub_rx_alloc_frag{};
+ instrumented_allocator_t sub_rx_alloc_session{};
+ instrumented_allocator_new(&sub_tx_alloc_transfer);
+ instrumented_allocator_new(&sub_tx_alloc_payload);
+ instrumented_allocator_new(&sub_rx_alloc_frag);
+ instrumented_allocator_new(&sub_rx_alloc_session);
+
+ // Memory resources.
+ udpard_tx_mem_resources_t pub_tx_mem{};
+ pub_tx_mem.transfer = instrumented_allocator_make_resource(&pub_tx_alloc_transfer);
+ for (auto& res : pub_tx_mem.payload) {
+ res = instrumented_allocator_make_resource(&pub_tx_alloc_payload);
+ }
+ const udpard_rx_mem_resources_t pub_rx_mem{ .session = instrumented_allocator_make_resource(&pub_rx_alloc_session),
+ .fragment = instrumented_allocator_make_resource(&pub_rx_alloc_frag) };
+
+ udpard_tx_mem_resources_t sub_tx_mem{};
+ sub_tx_mem.transfer = instrumented_allocator_make_resource(&sub_tx_alloc_transfer);
+ for (auto& res : sub_tx_mem.payload) {
+ res = instrumented_allocator_make_resource(&sub_tx_alloc_payload);
+ }
+ const udpard_rx_mem_resources_t sub_rx_mem{ .session = instrumented_allocator_make_resource(&sub_rx_alloc_session),
+ .fragment = instrumented_allocator_make_resource(&sub_rx_alloc_frag) };
+
+ // Publisher node: single TX, single RX (linked to TX for ACK processing).
+ constexpr uint64_t pub_uid = 0x1111222233334444ULL;
+ udpard_tx_t pub_tx{};
+ std::vector pub_frames;
+ TEST_ASSERT_TRUE(udpard_tx_new(&pub_tx, pub_uid, 10U, 64, pub_tx_mem, &tx_vtable));
+ pub_tx.user = &pub_frames;
+ pub_tx.ack_baseline_timeout = 8000;
+
+ udpard_rx_t pub_rx{};
+ udpard_rx_new(&pub_rx, &pub_tx);
+ udpard_rx_port_t pub_p2p_port{};
+ TEST_ASSERT_TRUE(
+ udpard_rx_port_new(&pub_p2p_port, pub_uid, 16, udpard_rx_unordered, 0, pub_rx_mem, &ack_callbacks));
+
+ // Subscriber node: single TX, single RX (linked to TX for sending ACKs).
+ constexpr uint64_t sub_uid = 0xABCDEF0012345678ULL;
+ udpard_tx_t sub_tx{};
+ std::vector sub_frames;
+ TEST_ASSERT_TRUE(udpard_tx_new(&sub_tx, sub_uid, 77U, 8, sub_tx_mem, &tx_vtable));
+ sub_tx.user = &sub_frames;
+
+ udpard_rx_t sub_rx{};
+ udpard_rx_new(&sub_rx, &sub_tx);
+ udpard_rx_port_t sub_port{};
+ const uint64_t topic_hash = 0x0123456789ABCDEFULL;
+ TEST_ASSERT_TRUE(udpard_rx_port_new(&sub_port, topic_hash, 6000, udpard_rx_unordered, 0, sub_rx_mem, &callbacks));
+
+ // Endpoints.
+ const std::array publisher_sources{
+ udpard_udpip_ep_t{ .ip = 0x0A000001U, .port = 7400U },
+ udpard_udpip_ep_t{ .ip = 0x0A000002U, .port = 7401U },
+ udpard_udpip_ep_t{ .ip = 0x0A000003U, .port = 7402U },
+ };
+ const std::array subscriber_sources{
+ udpard_udpip_ep_t{ .ip = 0x0A000010U, .port = 7600U },
+ udpard_udpip_ep_t{ .ip = 0x0A000011U, .port = 7601U },
+ udpard_udpip_ep_t{ .ip = 0x0A000012U, .port = 7602U },
+ };
+ // Payload and context.
+ std::vector payload(4096);
+ fill_random(payload);
+ RxContext ctx{};
+ ctx.expected = payload;
+ ctx.sources = publisher_sources;
+ ctx.remote_uid = pub_uid;
+ sub_rx.user = &ctx;
+
+ // Reliable transfer with staged losses.
+ FeedbackState fb{};
+ const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size());
+ pub_tx.mtu[0] = 600;
+ pub_tx.mtu[1] = 900;
+ pub_tx.mtu[2] = 500;
+ const udpard_us_t start = 0;
+ const udpard_us_t deadline = start + 200000;
+ const uint16_t iface_bitmap_all = UDPARD_IFACE_BITMAP_ALL;
+ const udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr };
+ TEST_ASSERT_TRUE(udpard_tx_push(&pub_tx,
+ start,
+ deadline,
+ iface_bitmap_all,
+ udpard_prio_fast,
+ topic_hash,
+ 1U,
+ payload_view,
+ &record_feedback,
+ make_user_context(&fb)));
+
+ // Send until acked; drop first data frame and first ack.
+ bool first_round = true;
+ udpard_us_t now = start;
+ size_t attempts = 0;
+ const size_t attempt_cap = 6;
+ while ((fb.count == 0) && (attempts < attempt_cap)) {
+ // Publisher transmits topic message.
+ pub_frames.clear();
+ udpard_tx_poll(&pub_tx, now, UDPARD_IFACE_BITMAP_ALL);
+ bool data_loss_done = false;
+ for (const auto& frame : pub_frames) {
+ const bool drop = first_round && !data_loss_done && (frame.iface_index == 1U);
+ if (drop) {
+ drop_frame(frame);
+ data_loss_done = true;
+ continue;
+ }
+ TEST_ASSERT_TRUE(udpard_rx_port_push(&sub_rx,
+ &sub_port,
+ now,
+ publisher_sources[frame.iface_index],
+ frame.datagram,
+ tx_payload_deleter,
+ frame.iface_index));
+ }
+ udpard_rx_poll(&sub_rx, now);
+
+ // Subscriber transmits ACKs (via sub_tx since sub_rx is linked to it).
+ sub_frames.clear();
+ udpard_tx_poll(&sub_tx, now, UDPARD_IFACE_BITMAP_ALL);
+ bool ack_sent = false;
+ for (const auto& ack : sub_frames) {
+ const bool drop_ack = first_round && !ack_sent;
+ if (drop_ack) {
+ drop_frame(ack);
+ continue;
+ }
+ ack_sent = true;
+ TEST_ASSERT_TRUE(udpard_rx_port_push(&pub_rx,
+ &pub_p2p_port,
+ now,
+ subscriber_sources[ack.iface_index],
+ ack.datagram,
+ tx_payload_deleter,
+ ack.iface_index));
+ }
+ udpard_rx_poll(&pub_rx, now);
+ first_round = false;
+ attempts++;
+ now += pub_tx.ack_baseline_timeout + 5000;
+ }
+
+ TEST_ASSERT_EQUAL_size_t(1, fb.count);
+ TEST_ASSERT_EQUAL_UINT32(1, fb.acknowledgements);
+ TEST_ASSERT_EQUAL_size_t(1, ctx.received);
+ TEST_ASSERT_EQUAL_size_t(0, ctx.collisions);
+
+ // Cleanup.
+ udpard_rx_port_free(&sub_rx, &sub_port);
+ udpard_rx_port_free(&pub_rx, &pub_p2p_port);
+ udpard_tx_free(&pub_tx);
+ udpard_tx_free(&sub_tx);
+
+ TEST_ASSERT_EQUAL_size_t(0, pub_tx_alloc_transfer.allocated_fragments);
+ TEST_ASSERT_EQUAL_size_t(0, pub_tx_alloc_payload.allocated_fragments);
+ TEST_ASSERT_EQUAL_size_t(0, pub_rx_alloc_frag.allocated_fragments);
+ TEST_ASSERT_EQUAL_size_t(0, pub_rx_alloc_session.allocated_fragments);
+ TEST_ASSERT_EQUAL_size_t(0, sub_tx_alloc_transfer.allocated_fragments);
+ TEST_ASSERT_EQUAL_size_t(0, sub_tx_alloc_payload.allocated_fragments);
+ TEST_ASSERT_EQUAL_size_t(0, sub_rx_alloc_frag.allocated_fragments);
+ TEST_ASSERT_EQUAL_size_t(0, sub_rx_alloc_session.allocated_fragments);
+
+ instrumented_allocator_reset(&pub_tx_alloc_transfer);
+ instrumented_allocator_reset(&pub_tx_alloc_payload);
+ instrumented_allocator_reset(&pub_rx_alloc_frag);
+ instrumented_allocator_reset(&pub_rx_alloc_session);
+ instrumented_allocator_reset(&sub_tx_alloc_transfer);
+ instrumented_allocator_reset(&sub_tx_alloc_payload);
+ instrumented_allocator_reset(&sub_rx_alloc_frag);
+ instrumented_allocator_reset(&sub_rx_alloc_session);
+}
+
+// Counters must reflect expired deliveries and ack failures.
+void test_reliable_stats_and_failures()
+{
+ seed_prng();
+
+ // Expiration path.
+ instrumented_allocator_t exp_alloc_transfer{};
+ instrumented_allocator_t exp_alloc_payload{};
+ instrumented_allocator_new(&exp_alloc_transfer);
+ instrumented_allocator_new(&exp_alloc_payload);
+ udpard_tx_mem_resources_t exp_mem{};
+ exp_mem.transfer = instrumented_allocator_make_resource(&exp_alloc_transfer);
+ for (auto& res : exp_mem.payload) {
+ res = instrumented_allocator_make_resource(&exp_alloc_payload);
+ }
+ udpard_tx_t exp_tx{};
+ std::vector exp_frames;
+ TEST_ASSERT_TRUE(udpard_tx_new(&exp_tx, 0x9999000011112222ULL, 2U, 4, exp_mem, &tx_vtable));
+ exp_tx.user = &exp_frames;
+ FeedbackState fb_fail{};
+ const uint16_t iface_bitmap_1 = (1U << 0U);
+ const udpard_bytes_scattered_t exp_payload = make_scattered("ping", 4);
+ TEST_ASSERT_TRUE(udpard_tx_push(&exp_tx,
+ 0,
+ 10,
+ iface_bitmap_1,
+ udpard_prio_fast,
+ 0xABCULL,
+ 5U,
+ exp_payload,
+ &record_feedback,
+ make_user_context(&fb_fail)));
+ udpard_tx_poll(&exp_tx, 0, UDPARD_IFACE_BITMAP_ALL);
+ for (const auto& f : exp_frames) {
+ drop_frame(f);
+ }
+ exp_frames.clear();
+ udpard_tx_poll(&exp_tx, 20, UDPARD_IFACE_BITMAP_ALL);
+ TEST_ASSERT_EQUAL_size_t(1, fb_fail.count);
+ TEST_ASSERT_EQUAL_UINT32(0, fb_fail.acknowledgements);
+ TEST_ASSERT_GREATER_THAN_UINT64(0, exp_tx.errors_expiration);
+ udpard_tx_free(&exp_tx);
+ TEST_ASSERT_EQUAL_size_t(0, exp_alloc_transfer.allocated_fragments);
+ TEST_ASSERT_EQUAL_size_t(0, exp_alloc_payload.allocated_fragments);
+ instrumented_allocator_reset(&exp_alloc_transfer);
+ instrumented_allocator_reset(&exp_alloc_payload);
+
+ // Ack push failure increments counters.
+ instrumented_allocator_t rx_alloc_frag{};
+ instrumented_allocator_t rx_alloc_session{};
+ instrumented_allocator_t src_alloc_transfer{};
+ instrumented_allocator_t src_alloc_payload{};
+ instrumented_allocator_new(&rx_alloc_frag);
+ instrumented_allocator_new(&rx_alloc_session);
+ instrumented_allocator_new(&src_alloc_transfer);
+ instrumented_allocator_new(&src_alloc_payload);
+ const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session),
+ .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) };
+ udpard_tx_mem_resources_t src_mem{};
+ src_mem.transfer = instrumented_allocator_make_resource(&src_alloc_transfer);
+ for (auto& res : src_mem.payload) {
+ res = instrumented_allocator_make_resource(&src_alloc_payload);
+ }
+
+ udpard_tx_t src_tx{};
+ std::vector src_frames;
+ TEST_ASSERT_TRUE(udpard_tx_new(&src_tx, 0x5555AAAABBBBCCCCULL, 3U, 4, src_mem, &tx_vtable));
+ src_tx.user = &src_frames;
+ udpard_rx_t rx{};
+ udpard_rx_port_t port{};
+ RxContext ctx{};
+ ctx.remote_uid = src_tx.local_uid;
+ ctx.sources = { udpard_udpip_ep_t{ .ip = 0x0A000021U, .port = 7700U }, udpard_udpip_ep_t{}, udpard_udpip_ep_t{} };
+ ctx.expected.assign({ 1U, 2U, 3U, 4U });
+ udpard_rx_new(&rx, nullptr);
+ rx.user = &ctx;
+ TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 0x12340000ULL, 64, udpard_rx_unordered, 0, rx_mem, &callbacks));
+
+ const udpard_bytes_scattered_t src_payload = make_scattered(ctx.expected.data(), ctx.expected.size());
+ FeedbackState fb_ignore{};
+ TEST_ASSERT_TRUE(udpard_tx_push(&src_tx,
+ 0,
+ 1000,
+ iface_bitmap_1,
+ udpard_prio_fast,
+ port.topic_hash,
+ 7U,
+ src_payload,
+ &record_feedback,
+ make_user_context(&fb_ignore)));
+ udpard_tx_poll(&src_tx, 0, UDPARD_IFACE_BITMAP_ALL);
+ const udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr };
+ for (const auto& f : src_frames) {
+ TEST_ASSERT_TRUE(udpard_rx_port_push(
+ &rx, &port, 0, ctx.sources[f.iface_index], f.datagram, tx_payload_deleter, f.iface_index));
+ }
+ udpard_rx_poll(&rx, 0);
+ TEST_ASSERT_GREATER_THAN_UINT64(0, rx.errors_ack_tx);
+ TEST_ASSERT_EQUAL_size_t(1, ctx.received);
+
+ udpard_rx_port_free(&rx, &port);
+ udpard_tx_free(&src_tx);
+ TEST_ASSERT_EQUAL_size_t(0, rx_alloc_frag.allocated_fragments);
+ TEST_ASSERT_EQUAL_size_t(0, rx_alloc_session.allocated_fragments);
+ TEST_ASSERT_EQUAL_size_t(0, src_alloc_transfer.allocated_fragments);
+ TEST_ASSERT_EQUAL_size_t(0, src_alloc_payload.allocated_fragments);
+ instrumented_allocator_reset(&rx_alloc_frag);
+ instrumented_allocator_reset(&rx_alloc_session);
+ instrumented_allocator_reset(&src_alloc_transfer);
+ instrumented_allocator_reset(&src_alloc_payload);
+}
+
+} // namespace
+
+extern "C" void setUp() {}
+
+extern "C" void tearDown() {}
+
+int main()
+{
+ UNITY_BEGIN();
+ RUN_TEST(test_reliable_delivery_under_losses);
+ RUN_TEST(test_reliable_stats_and_failures);
+ return UNITY_END();
+}
diff --git a/tests/src/test_e2e_edge.cpp b/tests/src/test_e2e_edge.cpp
new file mode 100644
index 0000000..c161a54
--- /dev/null
+++ b/tests/src/test_e2e_edge.cpp
@@ -0,0 +1,915 @@
+/// This software is distributed under the terms of the MIT License.
+/// Copyright (C) OpenCyphal Development Team
+/// Copyright Amazon.com Inc. or its affiliates.
+/// SPDX-License-Identifier: MIT
+
+// ReSharper disable CppPassValueParameterByConstReference
+
+#include
+#include "helpers.h"
+#include
+#include
+#include
+
+namespace {
+
+void on_message(udpard_rx_t* rx, udpard_rx_port_t* port, udpard_rx_transfer_t transfer);
+void on_collision(udpard_rx_t* rx, udpard_rx_port_t* port, udpard_remote_t remote);
+constexpr udpard_rx_port_vtable_t callbacks{ .on_message = &on_message, .on_collision = &on_collision };
+
+struct FbState
+{
+ size_t count = 0;
+ uint16_t acknowledgements = 0;
+};
+
+struct CapturedFrame
+{
+ udpard_bytes_mut_t datagram;
+ uint_fast8_t iface_index;
+};
+
+void tx_refcount_free(void* const user, const size_t size, void* const payload)
+{
+ (void)user;
+ udpard_tx_refcount_dec(udpard_bytes_t{ .size = size, .data = payload });
+}
+
+// Shared deleter for captured TX frames.
+constexpr udpard_deleter_vtable_t tx_refcount_deleter_vt{ .free = &tx_refcount_free };
+
+bool capture_tx_frame_impl(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection)
+{
+ auto* frames = static_cast*>(tx->user);
+ if (frames == nullptr) {
+ return false;
+ }
+ udpard_tx_refcount_inc(ejection->datagram);
+ void* const data = const_cast(ejection->datagram.data); // NOLINT(cppcoreguidelines-pro-type-const-cast)
+ frames->push_back(CapturedFrame{ .datagram = { .size = ejection->datagram.size, .data = data },
+ .iface_index = ejection->iface_index });
+ return true;
+}
+
+bool capture_tx_frame_subject(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection)
+{
+ return capture_tx_frame_impl(tx, ejection);
+}
+
+bool capture_tx_frame_p2p(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection, udpard_udpip_ep_t /*dest*/)
+{
+ return capture_tx_frame_impl(tx, ejection);
+}
+
+constexpr udpard_tx_vtable_t tx_vtable{ .eject_subject = &capture_tx_frame_subject,
+ .eject_p2p = &capture_tx_frame_p2p };
+
+void fb_record(udpard_tx_t*, const udpard_tx_feedback_t fb)
+{
+ auto* st = static_cast(fb.user.ptr[0]);
+ if (st != nullptr) {
+ st->count++;
+ st->acknowledgements = fb.acknowledgements;
+ }
+}
+
+void release_frames(std::vector& frames)
+{
+ for (const auto& [datagram, iface_index] : frames) {
+ udpard_tx_refcount_dec(udpard_bytes_t{ .size = datagram.size, .data = datagram.data });
+ }
+ frames.clear();
+}
+
+struct Context
+{
+ std::vector ids;
+ size_t collisions = 0;
+ uint64_t expected_uid = 0;
+ udpard_udpip_ep_t source{};
+};
+
+struct Fixture
+{
+ instrumented_allocator_t tx_alloc_transfer{};
+ instrumented_allocator_t tx_alloc_payload{};
+ instrumented_allocator_t rx_alloc_frag{};
+ instrumented_allocator_t rx_alloc_session{};
+ udpard_tx_t tx{};
+ udpard_rx_t rx{};
+ udpard_rx_port_t port{};
+ udpard_deleter_t tx_payload_deleter{};
+ std::vector frames;
+ Context ctx{};
+ udpard_udpip_ep_t dest{};
+ udpard_udpip_ep_t source{};
+ uint64_t topic_hash{ 0x90AB12CD34EF5678ULL };
+
+ Fixture(const Fixture&) = delete;
+ Fixture& operator=(const Fixture&) = delete;
+ Fixture(Fixture&&) = delete;
+ Fixture& operator=(Fixture&&) = delete;
+
+ explicit Fixture(const udpard_rx_mode_t mode, const udpard_us_t reordering_window)
+ {
+ instrumented_allocator_new(&tx_alloc_transfer);
+ instrumented_allocator_new(&tx_alloc_payload);
+ instrumented_allocator_new(&rx_alloc_frag);
+ instrumented_allocator_new(&rx_alloc_session);
+ udpard_tx_mem_resources_t tx_mem{};
+ tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer);
+ for (auto& res : tx_mem.payload) {
+ res = instrumented_allocator_make_resource(&tx_alloc_payload);
+ }
+ const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session),
+ .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) };
+ tx_payload_deleter = udpard_deleter_t{ .vtable = &tx_refcount_deleter_vt, .context = nullptr };
+ source = { .ip = 0x0A000001U, .port = 7501U };
+ dest = udpard_make_subject_endpoint(222U);
+
+ TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x0A0B0C0D0E0F1011ULL, 42U, 16, tx_mem, &tx_vtable));
+ tx.user = &frames;
+ udpard_rx_new(&rx, nullptr);
+ ctx.expected_uid = tx.local_uid;
+ ctx.source = source;
+ rx.user = &ctx;
+ TEST_ASSERT_TRUE(udpard_rx_port_new(&port, topic_hash, 1024, mode, reordering_window, rx_mem, &callbacks));
+ }
+
+ ~Fixture()
+ {
+ udpard_rx_port_free(&rx, &port);
+ udpard_tx_free(&tx);
+ TEST_ASSERT_EQUAL_size_t(0, rx_alloc_frag.allocated_fragments);
+ TEST_ASSERT_EQUAL_size_t(0, rx_alloc_session.allocated_fragments);
+ TEST_ASSERT_EQUAL_size_t(0, tx_alloc_transfer.allocated_fragments);
+ TEST_ASSERT_EQUAL_size_t(0, tx_alloc_payload.allocated_fragments);
+ instrumented_allocator_reset(&rx_alloc_frag);
+ instrumented_allocator_reset(&rx_alloc_session);
+ instrumented_allocator_reset(&tx_alloc_transfer);
+ instrumented_allocator_reset(&tx_alloc_payload);
+ }
+
+ void push_single(const udpard_us_t ts, const uint64_t transfer_id)
+ {
+ frames.clear();
+ std::array payload_buf{};
+ for (size_t i = 0; i < payload_buf.size(); i++) {
+ payload_buf[i] = static_cast(transfer_id >> (i * 8U));
+ }
+ const udpard_bytes_scattered_t payload = make_scattered(payload_buf.data(), payload_buf.size());
+ const udpard_us_t deadline = ts + 1000000;
+ for (auto& mtu_value : tx.mtu) {
+ mtu_value = UDPARD_MTU_DEFAULT;
+ }
+ constexpr uint16_t iface_bitmap_1 = (1U << 0U);
+ TEST_ASSERT_TRUE(udpard_tx_push(&tx,
+ ts,
+ deadline,
+ iface_bitmap_1,
+ udpard_prio_slow,
+ topic_hash,
+ transfer_id,
+ payload,
+ nullptr,
+ UDPARD_USER_CONTEXT_NULL));
+ udpard_tx_poll(&tx, ts, UDPARD_IFACE_BITMAP_ALL);
+ TEST_ASSERT_GREATER_THAN_UINT32(0U, static_cast(frames.size()));
+ for (const auto& [datagram, iface_index] : frames) {
+ TEST_ASSERT_TRUE(udpard_rx_port_push(&rx, &port, ts, source, datagram, tx_payload_deleter, iface_index));
+ }
+ }
+};
+
+/// Callbacks keep the payload memory under control.
+void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer)
+{
+ auto* const ctx = static_cast(rx->user);
+ ctx->ids.push_back(transfer.transfer_id);
+ TEST_ASSERT_EQUAL_UINT64(ctx->expected_uid, transfer.remote.uid);
+ TEST_ASSERT_EQUAL_UINT32(ctx->source.ip, transfer.remote.endpoints[0].ip);
+ TEST_ASSERT_EQUAL_UINT16(ctx->source.port, transfer.remote.endpoints[0].port);
+ udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment));
+}
+
+void on_collision(udpard_rx_t* const rx, udpard_rx_port_t* const /*port*/, const udpard_remote_t /*remote*/)
+{
+ auto* const ctx = static_cast(rx->user);
+ ctx->collisions++;
+}
+
+/// UNORDERED mode should drop duplicates while keeping arrival order.
+void test_udpard_rx_unordered_duplicates()
+{
+ Fixture fix{ udpard_rx_unordered, 0 };
+ udpard_us_t now = 0;
+
+ constexpr std::array ids{ 100, 20000, 10100, 5000, 20000, 100 };
+ for (const auto id : ids) {
+ fix.push_single(now, id);
+ udpard_rx_poll(&fix.rx, now);
+ now++;
+ }
+ udpard_rx_poll(&fix.rx, now + 100);
+
+ constexpr std::array expected{ 100, 20000, 10100, 5000 };
+ TEST_ASSERT_EQUAL_size_t(expected.size(), fix.ctx.ids.size());
+ for (size_t i = 0; i < expected.size(); i++) {
+ TEST_ASSERT_EQUAL_UINT64(expected[i], fix.ctx.ids[i]);
+ }
+ TEST_ASSERT_EQUAL_size_t(0, fix.ctx.collisions);
+}
+
+/// ORDERED mode waits for the window, then rejects late arrivals.
+void test_udpard_rx_ordered_out_of_order()
+{
+ Fixture fix{ udpard_rx_ordered, 50 };
+ udpard_us_t now = 0;
+
+ // First batch builds the ordered baseline.
+ fix.push_single(now, 100);
+ udpard_rx_poll(&fix.rx, now);
+ fix.push_single(++now, 300);
+ udpard_rx_poll(&fix.rx, now);
+ fix.push_single(++now, 200);
+ udpard_rx_poll(&fix.rx, now);
+
+ // Let the reordering window close for the early transfers.
+ now = 60;
+ udpard_rx_poll(&fix.rx, now);
+
+ // Queue far-future IDs while keeping the head at 300.
+ fix.push_single(now + 1, 10100);
+ udpard_rx_poll(&fix.rx, now + 1);
+ fix.push_single(now + 2, 10200);
+ udpard_rx_poll(&fix.rx, now + 2);
+
+ // Late arrivals inside the window shall be dropped.
+ fix.push_single(now + 3, 250);
+ udpard_rx_poll(&fix.rx, now + 3);
+ fix.push_single(now + 4, 150);
+ udpard_rx_poll(&fix.rx, now + 4);
+
+ // Allow the window to expire so the remaining interned transfers eject.
+ udpard_rx_poll(&fix.rx, now + 70);
+
+ constexpr std::array expected{ 100, 200, 300, 10100, 10200 };
+ TEST_ASSERT_EQUAL_size_t(expected.size(), fix.ctx.ids.size());
+ for (size_t i = 0; i < expected.size(); i++) {
+ TEST_ASSERT_EQUAL_UINT64(expected[i], fix.ctx.ids[i]);
+ }
+ TEST_ASSERT_EQUAL_size_t(0, fix.ctx.collisions);
+}
+
+/// ORDERED mode after head advance should reject late IDs arriving after window expiry.
+void test_udpard_rx_ordered_head_advanced_late()
+{
+ Fixture fix{ udpard_rx_ordered, 50 };
+ udpard_us_t now = 0;
+
+ fix.push_single(now, 100);
+ udpard_rx_poll(&fix.rx, now);
+ fix.push_single(++now, 300);
+ udpard_rx_poll(&fix.rx, now);
+ fix.push_single(++now, 200);
+ udpard_rx_poll(&fix.rx, now);
+ now = 60;
+ udpard_rx_poll(&fix.rx, now); // head -> 300
+
+ fix.push_single(++now, 420);
+ udpard_rx_poll(&fix.rx, now);
+ fix.push_single(++now, 450);
+ udpard_rx_poll(&fix.rx, now);
+ now = 120;
+ udpard_rx_poll(&fix.rx, now); // head -> 450
+
+ fix.push_single(++now, 320);
+ udpard_rx_poll(&fix.rx, now);
+ fix.push_single(++now, 310);
+ udpard_rx_poll(&fix.rx, now);
+
+ constexpr std::array expected{ 100, 200, 300, 420, 450 };
+ TEST_ASSERT_EQUAL_size_t(expected.size(), fix.ctx.ids.size());
+ for (size_t i = 0; i < expected.size(); i++) {
+ TEST_ASSERT_EQUAL_UINT64(expected[i], fix.ctx.ids[i]);
+ }
+ TEST_ASSERT_EQUAL_size_t(0, fix.ctx.collisions);
+}
+
+/// ORDERED mode rejects transfer-IDs far behind the recent history window.
+void test_udpard_rx_ordered_reject_far_past()
+{
+ Fixture fix{ udpard_rx_ordered, 50 };
+ udpard_us_t now = 0;
+
+ fix.push_single(now, 200000);
+ udpard_rx_poll(&fix.rx, now);
+
+ now = 60;
+ udpard_rx_poll(&fix.rx, now);
+
+ const uint64_t late_tid_close = 200000 - 1000;
+ fix.push_single(++now, late_tid_close);
+ udpard_rx_poll(&fix.rx, now);
+ udpard_rx_poll(&fix.rx, now + 100);
+
+ const uint64_t far_past_tid = 200000 - 100000;
+ fix.push_single(++now, far_past_tid);
+ udpard_rx_poll(&fix.rx, now);
+ udpard_rx_poll(&fix.rx, now + 50);
+
+ const uint64_t recent_tid = 200001;
+ fix.push_single(++now, recent_tid);
+ udpard_rx_poll(&fix.rx, now);
+ udpard_rx_poll(&fix.rx, now + 50);
+
+ constexpr std::array expected{ 200000, far_past_tid, recent_tid };
+ TEST_ASSERT_EQUAL_size_t(expected.size(), fix.ctx.ids.size());
+ for (size_t i = 0; i < expected.size(); i++) {
+ TEST_ASSERT_EQUAL_UINT64(expected[i], fix.ctx.ids[i]);
+ }
+ TEST_ASSERT_EQUAL_size_t(0, fix.ctx.collisions);
+}
+
+// Feedback must fire regardless of disposal path.
+void test_udpard_tx_feedback_always_called()
+{
+ instrumented_allocator_t tx_alloc_transfer{};
+ instrumented_allocator_t tx_alloc_payload{};
+ instrumented_allocator_new(&tx_alloc_transfer);
+ instrumented_allocator_new(&tx_alloc_payload);
+ udpard_tx_mem_resources_t tx_mem{};
+ tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer);
+ for (auto& res : tx_mem.payload) {
+ res = instrumented_allocator_make_resource(&tx_alloc_payload);
+ }
+ constexpr uint16_t iface_bitmap_1 = (1U << 0U);
+
+ // Expiration path triggers feedback=false.
+ {
+ std::vector frames;
+ udpard_tx_t tx{};
+ TEST_ASSERT_TRUE(udpard_tx_new(&tx, 1U, 1U, 4, tx_mem, &tx_vtable));
+ tx.user = &frames;
+ FbState fb{};
+ TEST_ASSERT_TRUE(udpard_tx_push(&tx,
+ 10,
+ 10,
+ iface_bitmap_1,
+ udpard_prio_fast,
+ 1,
+ 11,
+ make_scattered(nullptr, 0),
+ fb_record,
+ make_user_context(&fb)));
+ udpard_tx_poll(&tx, 11, UDPARD_IFACE_BITMAP_ALL);
+ TEST_ASSERT_EQUAL_size_t(1, fb.count);
+ TEST_ASSERT_EQUAL_UINT32(0, fb.acknowledgements);
+ release_frames(frames);
+ udpard_tx_free(&tx);
+ }
+
+ // Sacrifice path should also emit feedback.
+ {
+ std::vector frames;
+ udpard_tx_t tx{};
+ TEST_ASSERT_TRUE(udpard_tx_new(&tx, 2U, 1U, 1, tx_mem, &tx_vtable));
+ tx.user = &frames;
+ FbState fb_old{};
+ FbState fb_new{};
+ TEST_ASSERT_TRUE(udpard_tx_push(&tx,
+ 0,
+ 1000,
+ iface_bitmap_1,
+ udpard_prio_fast,
+ 2,
+ 21,
+ make_scattered(nullptr, 0),
+ fb_record,
+ make_user_context(&fb_old)));
+ (void)udpard_tx_push(&tx,
+ 0,
+ 1000,
+ iface_bitmap_1,
+ udpard_prio_fast,
+ 3,
+ 22,
+ make_scattered(nullptr, 0),
+ fb_record,
+ make_user_context(&fb_new));
+ TEST_ASSERT_EQUAL_size_t(1, fb_old.count);
+ TEST_ASSERT_EQUAL_UINT32(0, fb_old.acknowledgements);
+ TEST_ASSERT_GREATER_OR_EQUAL_UINT64(1, tx.errors_sacrifice);
+ TEST_ASSERT_EQUAL_size_t(0, fb_new.count);
+ release_frames(frames);
+ udpard_tx_free(&tx);
+ }
+
+ // Destroying a TX with pending transfers still calls feedback.
+ {
+ std::vector frames;
+ udpard_tx_t tx{};
+ TEST_ASSERT_TRUE(udpard_tx_new(&tx, 3U, 1U, 4, tx_mem, &tx_vtable));
+ tx.user = &frames;
+ FbState fb{};
+ TEST_ASSERT_TRUE(udpard_tx_push(&tx,
+ 0,
+ 1000,
+ iface_bitmap_1,
+ udpard_prio_fast,
+ 4,
+ 33,
+ make_scattered(nullptr, 0),
+ fb_record,
+ make_user_context(&fb)));
+ udpard_tx_free(&tx);
+ TEST_ASSERT_EQUAL_size_t(1, fb.count);
+ TEST_ASSERT_EQUAL_UINT32(0, fb.acknowledgements);
+ release_frames(frames);
+ }
+
+ instrumented_allocator_reset(&tx_alloc_transfer);
+ instrumented_allocator_reset(&tx_alloc_payload);
+}
+
+/// P2P helper should emit frames with auto transfer-ID and proper addressing.
+void test_udpard_tx_push_p2p()
+{
+ instrumented_allocator_t tx_alloc_transfer{};
+ instrumented_allocator_t tx_alloc_payload{};
+ instrumented_allocator_t rx_alloc_frag{};
+ instrumented_allocator_t rx_alloc_session{};
+ instrumented_allocator_new(&tx_alloc_transfer);
+ instrumented_allocator_new(&tx_alloc_payload);
+ instrumented_allocator_new(&rx_alloc_frag);
+ instrumented_allocator_new(&rx_alloc_session);
+ udpard_tx_mem_resources_t tx_mem{};
+ tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer);
+ for (auto& res : tx_mem.payload) {
+ res = instrumented_allocator_make_resource(&tx_alloc_payload);
+ }
+ udpard_tx_t tx{};
+ TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x1122334455667788ULL, 5U, 8, tx_mem, &tx_vtable));
+ std::vector frames;
+ tx.user = &frames;
+
+ const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session),
+ .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) };
+ udpard_rx_t rx{};
+ udpard_rx_port_t port{};
+ Context ctx{};
+ const udpard_udpip_ep_t source{ .ip = 0x0A0000AAU, .port = 7600U };
+ const udpard_udpip_ep_t dest{ .ip = 0x0A000010U, .port = 7400U };
+ const uint64_t local_uid = 0xCAFEBABECAFED00DULL;
+ ctx.expected_uid = tx.local_uid;
+ ctx.source = source;
+ rx.user = &ctx;
+ TEST_ASSERT_TRUE(udpard_rx_port_new(&port, local_uid, 1024, udpard_rx_unordered, 0, rx_mem, &callbacks));
+
+ udpard_remote_t remote{};
+ remote.uid = local_uid;
+ remote.endpoints[0U] = dest;
+
+ const std::array user_payload{ 0xAAU, 0xBBU, 0xCCU };
+ const udpard_bytes_scattered_t payload = make_scattered(user_payload.data(), user_payload.size());
+ const udpard_us_t now = 0;
+ uint64_t out_tid = 0;
+ TEST_ASSERT_TRUE(udpard_tx_push_p2p(
+ &tx, now, now + 1000000, udpard_prio_nominal, remote, payload, nullptr, UDPARD_USER_CONTEXT_NULL, &out_tid));
+ udpard_tx_poll(&tx, now, UDPARD_IFACE_BITMAP_ALL);
+ TEST_ASSERT_FALSE(frames.empty());
+
+ const udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr };
+ for (const auto& f : frames) {
+ TEST_ASSERT_TRUE(udpard_rx_port_push(&rx, &port, now, source, f.datagram, tx_payload_deleter, f.iface_index));
+ }
+ udpard_rx_poll(&rx, now);
+ TEST_ASSERT_EQUAL_size_t(1, ctx.ids.size());
+ TEST_ASSERT_EQUAL_UINT64(out_tid, ctx.ids[0]);
+ TEST_ASSERT_EQUAL_size_t(0, ctx.collisions);
+
+ udpard_rx_port_free(&rx, &port);
+ udpard_tx_free(&tx);
+ TEST_ASSERT_EQUAL(0, tx_alloc_transfer.allocated_fragments);
+ TEST_ASSERT_EQUAL(0, tx_alloc_payload.allocated_fragments);
+ TEST_ASSERT_EQUAL(0, rx_alloc_frag.allocated_fragments);
+ TEST_ASSERT_EQUAL(0, rx_alloc_session.allocated_fragments);
+ instrumented_allocator_reset(&tx_alloc_transfer);
+ instrumented_allocator_reset(&tx_alloc_payload);
+ instrumented_allocator_reset(&rx_alloc_frag);
+ instrumented_allocator_reset(&rx_alloc_session);
+}
+
+/// Test TX with minimum MTU to verify fragmentation at the edge.
+void test_udpard_tx_minimum_mtu()
+{
+ instrumented_allocator_t tx_alloc_transfer{};
+ instrumented_allocator_t tx_alloc_payload{};
+ instrumented_allocator_t rx_alloc_frag{};
+ instrumented_allocator_t rx_alloc_session{};
+ instrumented_allocator_new(&tx_alloc_transfer);
+ instrumented_allocator_new(&tx_alloc_payload);
+ instrumented_allocator_new(&rx_alloc_frag);
+ instrumented_allocator_new(&rx_alloc_session);
+
+ udpard_tx_mem_resources_t tx_mem{};
+ tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer);
+ for (auto& res : tx_mem.payload) {
+ res = instrumented_allocator_make_resource(&tx_alloc_payload);
+ }
+ const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session),
+ .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) };
+
+ udpard_tx_t tx{};
+ TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0xDEADBEEF12345678ULL, 100U, 256, tx_mem, &tx_vtable));
+ std::vector frames;
+ tx.user = &frames;
+
+ // Set MTU to minimum value
+ for (auto& mtu : tx.mtu) {
+ mtu = UDPARD_MTU_MIN;
+ }
+
+ udpard_rx_t rx{};
+ udpard_rx_port_t port{};
+ Context ctx{};
+ const uint64_t topic_hash = 0x1234567890ABCDEFULL;
+ ctx.expected_uid = tx.local_uid;
+ ctx.source = { .ip = 0x0A000001U, .port = 7501U };
+ udpard_rx_new(&rx, nullptr);
+ rx.user = &ctx;
+ TEST_ASSERT_TRUE(udpard_rx_port_new(&port, topic_hash, 4096, udpard_rx_unordered, 0, rx_mem, &callbacks));
+
+ // Send a payload that will require fragmentation at minimum MTU
+ std::array payload{};
+ for (size_t i = 0; i < payload.size(); i++) {
+ payload[i] = static_cast(i & 0xFFU);
+ }
+
+ const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size());
+ constexpr uint16_t iface_bitmap_1 = (1U << 0U);
+
+ const udpard_us_t now = 0;
+ frames.clear();
+ TEST_ASSERT_TRUE(udpard_tx_push(&tx,
+ now,
+ now + 1000000,
+ iface_bitmap_1,
+ udpard_prio_nominal,
+ topic_hash,
+ 1U,
+ payload_view,
+ nullptr,
+ UDPARD_USER_CONTEXT_NULL));
+ udpard_tx_poll(&tx, now, UDPARD_IFACE_BITMAP_ALL);
+
+ // With minimum MTU, we should have multiple frames
+ TEST_ASSERT_TRUE(frames.size() > 1);
+
+ // Deliver frames to RX
+ const udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr };
+ for (const auto& f : frames) {
+ TEST_ASSERT_TRUE(
+ udpard_rx_port_push(&rx, &port, now, ctx.source, f.datagram, tx_payload_deleter, f.iface_index));
+ }
+ udpard_rx_poll(&rx, now);
+
+ // Verify the transfer was received correctly
+ TEST_ASSERT_EQUAL_size_t(1, ctx.ids.size());
+ TEST_ASSERT_EQUAL_UINT64(1U, ctx.ids[0]);
+
+ // Cleanup
+ udpard_rx_port_free(&rx, &port);
+ udpard_tx_free(&tx);
+ TEST_ASSERT_EQUAL(0, tx_alloc_transfer.allocated_fragments);
+ TEST_ASSERT_EQUAL(0, tx_alloc_payload.allocated_fragments);
+ TEST_ASSERT_EQUAL(0, rx_alloc_frag.allocated_fragments);
+ TEST_ASSERT_EQUAL(0, rx_alloc_session.allocated_fragments);
+ instrumented_allocator_reset(&tx_alloc_transfer);
+ instrumented_allocator_reset(&tx_alloc_payload);
+ instrumented_allocator_reset(&rx_alloc_frag);
+ instrumented_allocator_reset(&rx_alloc_session);
+}
+
+/// Test with transfer-ID at uint64 boundary values (0, large values)
+void test_udpard_transfer_id_boundaries()
+{
+ Fixture fix{ udpard_rx_unordered, 0 };
+
+ // Test transfer-ID = 0 (first valid value)
+ fix.push_single(0, 0);
+ udpard_rx_poll(&fix.rx, 0);
+ TEST_ASSERT_EQUAL_size_t(1, fix.ctx.ids.size());
+ TEST_ASSERT_EQUAL_UINT64(0U, fix.ctx.ids[0]);
+
+ // Test a large transfer-ID value
+ fix.push_single(1, 0x7FFFFFFFFFFFFFFFULL); // Large but not at the extreme edge
+ udpard_rx_poll(&fix.rx, 1);
+ TEST_ASSERT_EQUAL_size_t(2, fix.ctx.ids.size());
+ TEST_ASSERT_EQUAL_UINT64(0x7FFFFFFFFFFFFFFFULL, fix.ctx.ids[1]);
+
+ // Test another large value to verify the history doesn't reject it
+ fix.push_single(2, 0x8000000000000000ULL);
+ udpard_rx_poll(&fix.rx, 2);
+ TEST_ASSERT_EQUAL_size_t(3, fix.ctx.ids.size());
+ TEST_ASSERT_EQUAL_UINT64(0x8000000000000000ULL, fix.ctx.ids[2]);
+
+ TEST_ASSERT_EQUAL_size_t(0, fix.ctx.collisions);
+}
+
+/// Test zero extent handling - should accept transfers but truncate payload
+void test_udpard_rx_zero_extent()
+{
+ instrumented_allocator_t tx_alloc_transfer{};
+ instrumented_allocator_t tx_alloc_payload{};
+ instrumented_allocator_t rx_alloc_frag{};
+ instrumented_allocator_t rx_alloc_session{};
+ instrumented_allocator_new(&tx_alloc_transfer);
+ instrumented_allocator_new(&tx_alloc_payload);
+ instrumented_allocator_new(&rx_alloc_frag);
+ instrumented_allocator_new(&rx_alloc_session);
+
+ udpard_tx_mem_resources_t tx_mem{};
+ tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer);
+ for (auto& res : tx_mem.payload) {
+ res = instrumented_allocator_make_resource(&tx_alloc_payload);
+ }
+ const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session),
+ .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) };
+
+ udpard_tx_t tx{};
+ TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0xAAAABBBBCCCCDDDDULL, 200U, 64, tx_mem, &tx_vtable));
+ std::vector frames;
+ tx.user = &frames;
+
+ udpard_rx_t rx{};
+ udpard_rx_port_t port{};
+ const uint64_t topic_hash = 0xFEDCBA9876543210ULL;
+ udpard_rx_new(&rx, nullptr);
+
+ // Create port with zero extent
+ TEST_ASSERT_TRUE(udpard_rx_port_new(&port, topic_hash, 0, udpard_rx_unordered, 0, rx_mem, &callbacks));
+
+ // Track received transfers
+ struct ZeroExtentContext
+ {
+ size_t count = 0;
+ size_t payload_size_stored = 0;
+ size_t payload_size_wire = 0;
+ };
+ ZeroExtentContext zctx{};
+
+ // Custom callback for zero extent test
+ struct ZeroExtentCallbacks
+ {
+ static void on_message(udpard_rx_t* const rx_arg,
+ udpard_rx_port_t* const port_arg,
+ const udpard_rx_transfer_t transfer)
+ {
+ auto* z = static_cast(rx_arg->user);
+ z->count++;
+ z->payload_size_stored = transfer.payload_size_stored;
+ z->payload_size_wire = transfer.payload_size_wire;
+ udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port_arg->memory.fragment));
+ }
+ static void on_collision(udpard_rx_t*, udpard_rx_port_t*, udpard_remote_t) {}
+ };
+ static constexpr udpard_rx_port_vtable_t zero_callbacks{ .on_message = &ZeroExtentCallbacks::on_message,
+ .on_collision = &ZeroExtentCallbacks::on_collision };
+ port.vtable = &zero_callbacks;
+ rx.user = &zctx;
+
+ // Send a small single-frame transfer
+ std::array payload{};
+ for (size_t i = 0; i < payload.size(); i++) {
+ payload[i] = static_cast(i);
+ }
+
+ const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size());
+ constexpr uint16_t iface_bitmap_1 = (1U << 0U);
+ const udpard_udpip_ep_t source{ .ip = 0x0A000002U, .port = 7502U };
+
+ const udpard_us_t now = 0;
+ frames.clear();
+ TEST_ASSERT_TRUE(udpard_tx_push(&tx,
+ now,
+ now + 1000000,
+ iface_bitmap_1,
+ udpard_prio_nominal,
+ topic_hash,
+ 5U,
+ payload_view,
+ nullptr,
+ UDPARD_USER_CONTEXT_NULL));
+ udpard_tx_poll(&tx, now, UDPARD_IFACE_BITMAP_ALL);
+ TEST_ASSERT_FALSE(frames.empty());
+
+ // Deliver to RX with zero extent
+ const udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr };
+ for (const auto& f : frames) {
+ TEST_ASSERT_TRUE(udpard_rx_port_push(&rx, &port, now, source, f.datagram, tx_payload_deleter, f.iface_index));
+ }
+ udpard_rx_poll(&rx, now);
+
+ // Transfer should be received - zero extent means minimal/no truncation for single-frame
+ // The library may still store some payload for single-frame transfers even with zero extent
+ TEST_ASSERT_EQUAL_size_t(1, zctx.count);
+ TEST_ASSERT_TRUE(zctx.payload_size_stored <= payload.size()); // At most the original size
+ TEST_ASSERT_EQUAL_size_t(payload.size(), zctx.payload_size_wire); // Wire size is original
+
+ // Cleanup
+ udpard_rx_port_free(&rx, &port);
+ udpard_tx_free(&tx);
+ TEST_ASSERT_EQUAL(0, tx_alloc_transfer.allocated_fragments);
+ TEST_ASSERT_EQUAL(0, tx_alloc_payload.allocated_fragments);
+ TEST_ASSERT_EQUAL(0, rx_alloc_frag.allocated_fragments);
+ TEST_ASSERT_EQUAL(0, rx_alloc_session.allocated_fragments);
+ instrumented_allocator_reset(&tx_alloc_transfer);
+ instrumented_allocator_reset(&tx_alloc_payload);
+ instrumented_allocator_reset(&rx_alloc_frag);
+ instrumented_allocator_reset(&rx_alloc_session);
+}
+
+/// Test empty payload transfer (zero-size payload)
+void test_udpard_empty_payload()
+{
+ Fixture fix{ udpard_rx_unordered, 0 };
+
+ // Send an empty payload
+ fix.frames.clear();
+ const udpard_bytes_scattered_t empty_payload = make_scattered(nullptr, 0);
+ const udpard_us_t deadline = 1000000;
+ constexpr uint16_t iface_bitmap_1 = (1U << 0U);
+
+ TEST_ASSERT_TRUE(udpard_tx_push(&fix.tx,
+ 0,
+ deadline,
+ iface_bitmap_1,
+ udpard_prio_nominal,
+ fix.topic_hash,
+ 10U,
+ empty_payload,
+ nullptr,
+ UDPARD_USER_CONTEXT_NULL));
+ udpard_tx_poll(&fix.tx, 0, UDPARD_IFACE_BITMAP_ALL);
+ TEST_ASSERT_FALSE(fix.frames.empty());
+
+ // Deliver to RX
+ for (const auto& f : fix.frames) {
+ TEST_ASSERT_TRUE(
+ udpard_rx_port_push(&fix.rx, &fix.port, 0, fix.source, f.datagram, fix.tx_payload_deleter, f.iface_index));
+ }
+ udpard_rx_poll(&fix.rx, 0);
+
+ // Empty transfer should be received
+ TEST_ASSERT_EQUAL_size_t(1, fix.ctx.ids.size());
+ TEST_ASSERT_EQUAL_UINT64(10U, fix.ctx.ids[0]);
+}
+
+/// Test priority levels from exceptional (0) to optional (7)
+void test_udpard_all_priority_levels()
+{
+ Fixture fix{ udpard_rx_unordered, 0 };
+ udpard_us_t now = 0;
+
+ constexpr uint16_t iface_bitmap_1 = (1U << 0U);
+
+ // Test all 8 priority levels
+ for (uint8_t prio = 0; prio < UDPARD_PRIORITY_COUNT; prio++) {
+ fix.frames.clear();
+ std::array payload{};
+ payload[0] = prio;
+ const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size());
+
+ TEST_ASSERT_TRUE(udpard_tx_push(&fix.tx,
+ now,
+ now + 1000000,
+ iface_bitmap_1,
+ static_cast(prio),
+ fix.topic_hash,
+ 100U + prio,
+ payload_view,
+ nullptr,
+ UDPARD_USER_CONTEXT_NULL));
+ udpard_tx_poll(&fix.tx, now, UDPARD_IFACE_BITMAP_ALL);
+ TEST_ASSERT_FALSE(fix.frames.empty());
+
+ for (const auto& f : fix.frames) {
+ TEST_ASSERT_TRUE(udpard_rx_port_push(
+ &fix.rx, &fix.port, now, fix.source, f.datagram, fix.tx_payload_deleter, f.iface_index));
+ }
+ udpard_rx_poll(&fix.rx, now);
+ now++;
+ }
+
+ // All 8 transfers should be received
+ TEST_ASSERT_EQUAL_size_t(UDPARD_PRIORITY_COUNT, fix.ctx.ids.size());
+ for (uint8_t prio = 0; prio < UDPARD_PRIORITY_COUNT; prio++) {
+ TEST_ASSERT_EQUAL_UINT64(100U + prio, fix.ctx.ids[prio]);
+ }
+}
+
+/// Test collision detection (topic hash mismatch)
+void test_udpard_topic_hash_collision()
+{
+ instrumented_allocator_t tx_alloc_transfer{};
+ instrumented_allocator_t tx_alloc_payload{};
+ instrumented_allocator_t rx_alloc_frag{};
+ instrumented_allocator_t rx_alloc_session{};
+ instrumented_allocator_new(&tx_alloc_transfer);
+ instrumented_allocator_new(&tx_alloc_payload);
+ instrumented_allocator_new(&rx_alloc_frag);
+ instrumented_allocator_new(&rx_alloc_session);
+
+ udpard_tx_mem_resources_t tx_mem{};
+ tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer);
+ for (auto& res : tx_mem.payload) {
+ res = instrumented_allocator_make_resource(&tx_alloc_payload);
+ }
+ const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session),
+ .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) };
+
+ udpard_tx_t tx{};
+ TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x1111222233334444ULL, 300U, 64, tx_mem, &tx_vtable));
+ std::vector frames;
+ tx.user = &frames;
+
+ udpard_rx_t rx{};
+ udpard_rx_port_t port{};
+ Context ctx{};
+ const uint64_t rx_topic_hash = 0xAAAAAAAAAAAAAAAAULL; // Different from TX
+ const uint64_t tx_topic_hash = 0xBBBBBBBBBBBBBBBBULL; // Different from RX
+ ctx.expected_uid = tx.local_uid;
+ ctx.source = { .ip = 0x0A000003U, .port = 7503U };
+ udpard_rx_new(&rx, nullptr);
+ rx.user = &ctx;
+ TEST_ASSERT_TRUE(udpard_rx_port_new(&port, rx_topic_hash, 1024, udpard_rx_unordered, 0, rx_mem, &callbacks));
+
+ // Send with mismatched topic hash
+ std::array payload{};
+ const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size());
+ constexpr uint16_t iface_bitmap_1 = (1U << 0U);
+
+ const udpard_us_t now = 0;
+ frames.clear();
+ TEST_ASSERT_TRUE(udpard_tx_push(&tx,
+ now,
+ now + 1000000,
+ iface_bitmap_1,
+ udpard_prio_nominal,
+ tx_topic_hash, // Different from port's topic_hash
+ 1U,
+ payload_view,
+ nullptr,
+ UDPARD_USER_CONTEXT_NULL));
+ udpard_tx_poll(&tx, now, UDPARD_IFACE_BITMAP_ALL);
+ TEST_ASSERT_FALSE(frames.empty());
+
+ // Deliver to RX - should trigger collision callback
+ const udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr };
+ for (const auto& f : frames) {
+ TEST_ASSERT_TRUE(
+ udpard_rx_port_push(&rx, &port, now, ctx.source, f.datagram, tx_payload_deleter, f.iface_index));
+ }
+ udpard_rx_poll(&rx, now);
+
+ // No transfers received, but collision detected
+ TEST_ASSERT_EQUAL_size_t(0, ctx.ids.size());
+ TEST_ASSERT_EQUAL_size_t(1, ctx.collisions);
+
+ // Cleanup
+ udpard_rx_port_free(&rx, &port);
+ udpard_tx_free(&tx);
+ TEST_ASSERT_EQUAL(0, tx_alloc_transfer.allocated_fragments);
+ TEST_ASSERT_EQUAL(0, tx_alloc_payload.allocated_fragments);
+ TEST_ASSERT_EQUAL(0, rx_alloc_frag.allocated_fragments);
+ TEST_ASSERT_EQUAL(0, rx_alloc_session.allocated_fragments);
+ instrumented_allocator_reset(&tx_alloc_transfer);
+ instrumented_allocator_reset(&tx_alloc_payload);
+ instrumented_allocator_reset(&rx_alloc_frag);
+ instrumented_allocator_reset(&rx_alloc_session);
+}
+
+} // namespace
+
+extern "C" void setUp() {}
+
+extern "C" void tearDown() {}
+
+int main()
+{
+ UNITY_BEGIN();
+ RUN_TEST(test_udpard_rx_unordered_duplicates);
+ RUN_TEST(test_udpard_rx_ordered_out_of_order);
+ RUN_TEST(test_udpard_rx_ordered_head_advanced_late);
+ RUN_TEST(test_udpard_rx_ordered_reject_far_past);
+ RUN_TEST(test_udpard_tx_feedback_always_called);
+ RUN_TEST(test_udpard_tx_push_p2p);
+ RUN_TEST(test_udpard_tx_minimum_mtu);
+ RUN_TEST(test_udpard_transfer_id_boundaries);
+ RUN_TEST(test_udpard_rx_zero_extent);
+ RUN_TEST(test_udpard_empty_payload);
+ RUN_TEST(test_udpard_all_priority_levels);
+ RUN_TEST(test_udpard_topic_hash_collision);
+ return UNITY_END();
+}
diff --git a/tests/src/test_e2e_random.cpp b/tests/src/test_e2e_random.cpp
new file mode 100644
index 0000000..7dc6926
--- /dev/null
+++ b/tests/src/test_e2e_random.cpp
@@ -0,0 +1,418 @@
+/// This software is distributed under the terms of the MIT License.
+/// Copyright (C) OpenCyphal Development Team
+/// Copyright Amazon.com Inc. or its affiliates.
+/// SPDX-License-Identifier: MIT
+
+// ReSharper disable CppPassValueParameterByConstReference
+
+#include
+#include "helpers.h"
+#include
+#include
+#include
+#include
+#include
+
+namespace {
+
+struct TransferKey
+{
+ uint64_t transfer_id;
+ uint64_t topic_hash;
+ bool operator==(const TransferKey& other) const
+ {
+ return (transfer_id == other.transfer_id) && (topic_hash == other.topic_hash);
+ }
+};
+
+struct TransferKeyHash
+{
+ size_t operator()(const TransferKey& key) const
+ {
+ return (std::hash{}(key.transfer_id) << 1U) ^ std::hash{}(key.topic_hash);
+ }
+};
+
+struct ExpectedPayload
+{
+ std::vector payload;
+ size_t payload_size_wire;
+};
+
+struct Context
+{
+ std::unordered_map expected;
+ size_t received = 0;
+ size_t collisions = 0;
+ size_t truncated = 0;
+ uint64_t remote_uid = 0;
+ size_t reliable_feedback_success = 0;
+ size_t reliable_feedback_failure = 0;
+ std::array remote_endpoints{};
+};
+
+struct Arrival
+{
+ udpard_bytes_mut_t datagram;
+ uint_fast8_t iface_index;
+};
+
+struct CapturedFrame
+{
+ udpard_bytes_mut_t datagram;
+ uint_fast8_t iface_index;
+};
+
+size_t random_range(const size_t min, const size_t max)
+{
+ const size_t span = max - min + 1U;
+ return min + (static_cast(rand()) % span);
+}
+
+void fill_random(std::vector& data)
+{
+ for (auto& byte : data) {
+ byte = static_cast(random_range(0, UINT8_MAX));
+ }
+}
+
+void shuffle_frames(std::vector& frames)
+{
+ for (size_t i = frames.size(); i > 1; i--) {
+ const size_t j = random_range(0, i - 1);
+ std::swap(frames[i - 1U], frames[j]);
+ }
+}
+
+void tx_refcount_free(void* const user, const size_t size, void* const payload)
+{
+ (void)user;
+ udpard_tx_refcount_dec(udpard_bytes_t{ .size = size, .data = payload });
+}
+
+// Shared deleter for captured TX frames.
+constexpr udpard_deleter_vtable_t tx_refcount_deleter_vt{ .free = &tx_refcount_free };
+
+bool capture_tx_frame_impl(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection)
+{
+ auto* frames = static_cast*>(tx->user);
+ if (frames == nullptr) {
+ return false;
+ }
+ udpard_tx_refcount_inc(ejection->datagram);
+ void* const data = const_cast(ejection->datagram.data); // NOLINT(cppcoreguidelines-pro-type-const-cast)
+ frames->push_back(CapturedFrame{ .datagram = { .size = ejection->datagram.size, .data = data },
+ .iface_index = ejection->iface_index });
+ return true;
+}
+
+bool capture_tx_frame_subject(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection)
+{
+ return capture_tx_frame_impl(tx, ejection);
+}
+
+bool capture_tx_frame_p2p(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection, udpard_udpip_ep_t /*dest*/)
+{
+ return capture_tx_frame_impl(tx, ejection);
+}
+
+constexpr udpard_tx_vtable_t tx_vtable{ .eject_subject = &capture_tx_frame_subject,
+ .eject_p2p = &capture_tx_frame_p2p };
+
+void record_feedback(udpard_tx_t*, const udpard_tx_feedback_t fb)
+{
+ auto* ctx = static_cast(fb.user.ptr[0]);
+ if (ctx != nullptr) {
+ if (fb.acknowledgements > 0U) {
+ ctx->reliable_feedback_success++;
+ } else {
+ ctx->reliable_feedback_failure++;
+ }
+ }
+}
+
+void on_ack_response(udpard_rx_t*, udpard_rx_port_t* port, const udpard_rx_transfer_t tr)
+{
+ udpard_fragment_free_all(tr.payload, udpard_make_deleter(port->memory.fragment));
+}
+constexpr udpard_rx_port_vtable_t ack_callbacks{ .on_message = &on_ack_response, .on_collision = nullptr };
+
+void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer)
+{
+ auto* const ctx = static_cast(rx->user);
+
+ // Match the incoming transfer against the expected table keyed by topic hash and transfer-ID.
+ const TransferKey key{ .transfer_id = transfer.transfer_id, .topic_hash = port->topic_hash };
+ const auto it = ctx->expected.find(key);
+ if (it == ctx->expected.end()) {
+ udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment));
+ return;
+ }
+
+ // Gather fragments into a contiguous buffer so we can compare the stored prefix (payload may be truncated).
+ std::vector assembled(transfer.payload_size_stored);
+ const udpard_fragment_t* payload_cursor = transfer.payload;
+ const size_t gathered = udpard_fragment_gather(&payload_cursor, 0, transfer.payload_size_stored, assembled.data());
+ TEST_ASSERT_EQUAL_size_t(transfer.payload_size_stored, gathered);
+ TEST_ASSERT_TRUE(transfer.payload_size_stored <= it->second.payload.size());
+ TEST_ASSERT_EQUAL_size_t(it->second.payload_size_wire, transfer.payload_size_wire);
+ if (transfer.payload_size_stored > 0U) {
+ TEST_ASSERT_EQUAL_MEMORY(it->second.payload.data(), assembled.data(), transfer.payload_size_stored);
+ }
+
+ // Verify remote and the return path discovery.
+ TEST_ASSERT_EQUAL_UINT64(ctx->remote_uid, transfer.remote.uid);
+ for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) {
+ if ((transfer.remote.endpoints[i].ip != 0U) || (transfer.remote.endpoints[i].port != 0U)) {
+ TEST_ASSERT_EQUAL_UINT32(ctx->remote_endpoints[i].ip, transfer.remote.endpoints[i].ip);
+ TEST_ASSERT_EQUAL_UINT16(ctx->remote_endpoints[i].port, transfer.remote.endpoints[i].port);
+ }
+ }
+ if (transfer.payload_size_stored < transfer.payload_size_wire) {
+ ctx->truncated++;
+ }
+
+ // Clean up.
+ udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment));
+ ctx->expected.erase(it);
+ ctx->received++;
+}
+
+void on_collision(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_remote_t remote)
+{
+ auto* ctx = static_cast(rx->user);
+ (void)port;
+ (void)remote;
+ ctx->collisions++;
+}
+constexpr udpard_rx_port_vtable_t callbacks{ .on_message = &on_message, .on_collision = &on_collision };
+
+/// Randomized end-to-end TX/RX covering fragmentation, reordering, and extent-driven truncation.
+void test_udpard_tx_rx_end_to_end()
+{
+ seed_prng();
+
+ // TX allocator setup and pipeline initialization.
+ instrumented_allocator_t tx_alloc_transfer{};
+ instrumented_allocator_new(&tx_alloc_transfer);
+ instrumented_allocator_t tx_alloc_payload{};
+ instrumented_allocator_new(&tx_alloc_payload);
+ udpard_tx_mem_resources_t tx_mem{};
+ tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer);
+ for (auto& res : tx_mem.payload) {
+ res = instrumented_allocator_make_resource(&tx_alloc_payload);
+ }
+ udpard_tx_t tx{};
+ TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x0A0B0C0D0E0F1011ULL, 123U, 256, tx_mem, &tx_vtable));
+ instrumented_allocator_t ack_alloc_transfer{};
+ instrumented_allocator_t ack_alloc_payload{};
+ instrumented_allocator_new(&ack_alloc_transfer);
+ instrumented_allocator_new(&ack_alloc_payload);
+ udpard_tx_mem_resources_t ack_mem{};
+ ack_mem.transfer = instrumented_allocator_make_resource(&ack_alloc_transfer);
+ for (auto& res : ack_mem.payload) {
+ res = instrumented_allocator_make_resource(&ack_alloc_payload);
+ }
+ udpard_tx_t ack_tx{};
+ TEST_ASSERT_TRUE(udpard_tx_new(&ack_tx, 0x1020304050607080ULL, 321U, 256, ack_mem, &tx_vtable));
+
+ // RX allocator setup and shared RX instance with callbacks.
+ instrumented_allocator_t rx_alloc_frag{};
+ instrumented_allocator_new(&rx_alloc_frag);
+ instrumented_allocator_t rx_alloc_session{};
+ instrumented_allocator_new(&rx_alloc_session);
+ const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session),
+ .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) };
+ udpard_rx_t rx;
+ udpard_rx_new(&rx, &ack_tx);
+ instrumented_allocator_t ack_rx_alloc_frag{};
+ instrumented_allocator_t ack_rx_alloc_session{};
+ instrumented_allocator_new(&ack_rx_alloc_frag);
+ instrumented_allocator_new(&ack_rx_alloc_session);
+ const udpard_rx_mem_resources_t ack_rx_mem{ .session = instrumented_allocator_make_resource(&ack_rx_alloc_session),
+ .fragment = instrumented_allocator_make_resource(&ack_rx_alloc_frag) };
+ udpard_rx_t ack_rx{};
+ udpard_rx_port_t ack_port{};
+ udpard_rx_new(&ack_rx, &tx);
+
+ // Test parameters.
+ constexpr std::array topic_hashes{ 0x123456789ABCDEF0ULL,
+ 0x0FEDCBA987654321ULL,
+ 0x00ACE00ACE00ACEULL };
+ constexpr std::array modes{ udpard_rx_ordered, udpard_rx_unordered, udpard_rx_ordered };
+ constexpr std::array windows{ 2000, 0, 5000 };
+ constexpr std::array extents{ 1000, 5000, SIZE_MAX };
+
+ // Configure ports with varied extents and reordering windows to cover truncation and different RX modes.
+ std::array ports{};
+ for (size_t i = 0; i < ports.size(); i++) {
+ TEST_ASSERT_TRUE(
+ udpard_rx_port_new(&ports[i], topic_hashes[i], extents[i], modes[i], windows[i], rx_mem, &callbacks));
+ }
+
+ // Setup the context.
+ Context ctx{};
+ ctx.remote_uid = tx.local_uid;
+ for (size_t i = 0; i < ports.size(); i++) {
+ ctx.remote_endpoints[i] = { .ip = static_cast(0x0A000001U + i),
+ .port = static_cast(7400U + i) };
+ }
+ rx.user = &ctx;
+ constexpr udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr };
+ // Ack path wiring.
+ std::vector frames;
+ tx.user = &frames;
+ std::vector ack_frames;
+ ack_tx.user = &ack_frames;
+ TEST_ASSERT_TRUE(
+ udpard_rx_port_new(&ack_port, tx.local_uid, 16, udpard_rx_unordered, 0, ack_rx_mem, &ack_callbacks));
+ std::array ack_sources{};
+ for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) {
+ ack_sources[i] = { .ip = static_cast(0x0A000020U + i), .port = static_cast(7700U + i) };
+ }
+
+ // Main test loop: generate transfers, push into TX, drain and shuffle frames, push into RX.
+ std::array transfer_ids{ static_cast(rand()),
+ static_cast(rand()),
+ static_cast(rand()) };
+ size_t reliable_total = 0;
+ udpard_us_t now = 0;
+ for (size_t transfer_index = 0; transfer_index < 1000; transfer_index++) {
+ now += static_cast(random_range(1000, 5000));
+ frames.clear();
+
+ // Pick a port, build a random payload, and remember what to expect on that topic.
+ const size_t port_index = random_range(0, ports.size() - 1U);
+ const uint64_t transfer_id = transfer_ids[port_index]++;
+ const size_t payload_size = random_range(0, 10000);
+ std::vector payload(payload_size);
+ fill_random(payload);
+ const bool reliable = (random_range(0, 3) == 0); // About a quarter reliable.
+ if (reliable) {
+ reliable_total++;
+ }
+
+ // Each transfer is sent on all redundant interfaces with different MTUs to exercise fragmentation variety.
+ const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size());
+ const auto priority = static_cast(random_range(0, UDPARD_PRIORITY_COUNT - 1U));
+ const TransferKey key{ .transfer_id = transfer_id, .topic_hash = topic_hashes[port_index] };
+ const bool inserted =
+ ctx.expected.emplace(key, ExpectedPayload{ .payload = payload, .payload_size_wire = payload.size() }).second;
+ TEST_ASSERT_TRUE(inserted);
+
+ // Generate MTUs per redundant interface.
+ std::array mtu_values{};
+ for (auto& x : mtu_values) {
+ x = random_range(UDPARD_MTU_MIN, 3000U);
+ }
+ for (size_t iface = 0; iface < UDPARD_IFACE_COUNT_MAX; iface++) {
+ tx.mtu[iface] = mtu_values[iface];
+ }
+ // Enqueue one transfer spanning all interfaces.
+ const udpard_us_t deadline = now + 1000000;
+ TEST_ASSERT_TRUE(udpard_tx_push(&tx,
+ now,
+ deadline,
+ UDPARD_IFACE_BITMAP_ALL,
+ priority,
+ topic_hashes[port_index],
+ transfer_id,
+ payload_view,
+ reliable ? &record_feedback : nullptr,
+ reliable ? make_user_context(&ctx) : UDPARD_USER_CONTEXT_NULL));
+ udpard_tx_poll(&tx, now, UDPARD_IFACE_BITMAP_ALL);
+
+ // Shuffle and push frames into the RX pipeline, simulating out-of-order redundant arrival.
+ std::vector arrivals;
+ arrivals.reserve(frames.size());
+ for (const auto& [datagram, iface_index] : frames) {
+ arrivals.push_back(Arrival{ .datagram = datagram, .iface_index = iface_index });
+ }
+ shuffle_frames(arrivals);
+ const size_t keep_iface = reliable ? random_range(0, UDPARD_IFACE_COUNT_MAX - 1U) : 0U;
+ const size_t loss_iface = reliable ? ((keep_iface + 1U) % UDPARD_IFACE_COUNT_MAX) : UDPARD_IFACE_COUNT_MAX;
+ const size_t ack_loss_iface = loss_iface;
+ for (const auto& [datagram, iface_index] : arrivals) {
+ const bool drop = reliable && (iface_index == loss_iface) && ((rand() % 3) == 0);
+ if (drop) {
+ udpard_tx_refcount_dec(udpard_bytes_t{ .size = datagram.size, .data = datagram.data });
+ } else {
+ TEST_ASSERT_TRUE(udpard_rx_port_push(&rx,
+ &ports[port_index],
+ now,
+ ctx.remote_endpoints[iface_index],
+ datagram,
+ tx_payload_deleter,
+ iface_index));
+ }
+ now += 1;
+ }
+
+ // Let the RX pipeline purge timeouts and deliver ready transfers.
+ udpard_rx_poll(&rx, now);
+ ack_frames.clear();
+ udpard_tx_poll(&ack_tx, now, UDPARD_IFACE_BITMAP_ALL);
+ bool ack_delivered = false;
+ for (const auto& [datagram, iface_index] : ack_frames) {
+ const bool drop_ack = reliable && (iface_index == ack_loss_iface);
+ if (drop_ack) {
+ udpard_tx_refcount_dec(udpard_bytes_t{ .size = datagram.size, .data = datagram.data });
+ continue;
+ }
+ ack_delivered = true;
+ TEST_ASSERT_TRUE(udpard_rx_port_push(
+ &ack_rx, &ack_port, now, ack_sources[iface_index], datagram, tx_payload_deleter, iface_index));
+ }
+ if (reliable && !ack_delivered && !ack_frames.empty()) {
+ const auto& [datagram, iface_index] = ack_frames.front();
+ TEST_ASSERT_TRUE(udpard_rx_port_push(
+ &ack_rx, &ack_port, now, ack_sources[iface_index], datagram, tx_payload_deleter, iface_index));
+ }
+ udpard_rx_poll(&ack_rx, now);
+ }
+
+ // Final poll/validation and cleanup.
+ udpard_rx_poll(&rx, now + 1000000);
+ udpard_rx_poll(&ack_rx, now + 1000000);
+ TEST_ASSERT_TRUE(ctx.expected.empty());
+ TEST_ASSERT_EQUAL_size_t(1000, ctx.received);
+ TEST_ASSERT_TRUE(ctx.truncated > 0);
+ TEST_ASSERT_EQUAL_size_t(0, ctx.collisions);
+ TEST_ASSERT_EQUAL_size_t(reliable_total, ctx.reliable_feedback_success);
+ TEST_ASSERT_EQUAL_size_t(0, ctx.reliable_feedback_failure);
+ for (auto& port : ports) {
+ udpard_rx_port_free(&rx, &port);
+ }
+ udpard_rx_port_free(&ack_rx, &ack_port);
+ udpard_tx_free(&tx);
+ udpard_tx_free(&ack_tx);
+ TEST_ASSERT_EQUAL_size_t(0, rx_alloc_frag.allocated_fragments);
+ TEST_ASSERT_EQUAL_size_t(0, rx_alloc_session.allocated_fragments);
+ TEST_ASSERT_EQUAL_size_t(0, tx_alloc_transfer.allocated_fragments);
+ TEST_ASSERT_EQUAL_size_t(0, tx_alloc_payload.allocated_fragments);
+ TEST_ASSERT_EQUAL_size_t(0, ack_alloc_transfer.allocated_fragments);
+ TEST_ASSERT_EQUAL_size_t(0, ack_alloc_payload.allocated_fragments);
+ TEST_ASSERT_EQUAL_size_t(0, ack_rx_alloc_frag.allocated_fragments);
+ TEST_ASSERT_EQUAL_size_t(0, ack_rx_alloc_session.allocated_fragments);
+ instrumented_allocator_reset(&rx_alloc_frag);
+ instrumented_allocator_reset(&rx_alloc_session);
+ instrumented_allocator_reset(&tx_alloc_transfer);
+ instrumented_allocator_reset(&tx_alloc_payload);
+ instrumented_allocator_reset(&ack_alloc_transfer);
+ instrumented_allocator_reset(&ack_alloc_payload);
+ instrumented_allocator_reset(&ack_rx_alloc_frag);
+ instrumented_allocator_reset(&ack_rx_alloc_session);
+}
+
+} // namespace
+
+extern "C" void setUp() {}
+
+extern "C" void tearDown() {}
+
+int main()
+{
+ UNITY_BEGIN();
+ RUN_TEST(test_udpard_tx_rx_end_to_end);
+ return UNITY_END();
+}
diff --git a/tests/src/test_e2e_reliable_ordered.cpp b/tests/src/test_e2e_reliable_ordered.cpp
new file mode 100644
index 0000000..009c1d7
--- /dev/null
+++ b/tests/src/test_e2e_reliable_ordered.cpp
@@ -0,0 +1,463 @@
+/// This software is distributed under the terms of the MIT License.
+/// Copyright (C) OpenCyphal Development Team
+/// Copyright Amazon.com Inc. or its affiliates.
+/// SPDX-License-Identifier: MIT
+/// This test validates reliable delivery with ORDERED mode under packet loss and reordering.
+
+#include
+#include "helpers.h"
+#include
+#include
+#include
+
+namespace {
+
+constexpr size_t CyphalHeaderSize = 48; // Cyphal/UDP header size
+
+struct CapturedFrame
+{
+ udpard_bytes_mut_t datagram;
+ uint_fast8_t iface_index;
+};
+
+void tx_refcount_free(void* const user, const size_t size, void* const payload)
+{
+ (void)user;
+ udpard_tx_refcount_dec(udpard_bytes_t{ .size = size, .data = payload });
+}
+
+constexpr udpard_deleter_vtable_t tx_refcount_deleter_vt{ .free = &tx_refcount_free };
+constexpr udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr };
+
+void drop_frame(const CapturedFrame& frame)
+{
+ udpard_tx_refcount_dec(udpard_bytes_t{ .size = frame.datagram.size, .data = frame.datagram.data });
+}
+
+// Extract transfer_id from Cyphal/UDP header (bytes 16-23 of datagram).
+uint64_t extract_transfer_id(const udpard_bytes_mut_t& datagram)
+{
+ if (datagram.size < 24) {
+ return 0;
+ }
+ const auto* p = static_cast