diff --git a/.clang-format b/.clang-format index d6b6f78..f7f1224 100644 --- a/.clang-format +++ b/.clang-format @@ -1,96 +1,17 @@ ---- -Language: Cpp -# BasedOnStyle: LLVM -AccessModifierOffset: -4 -AlignAfterOpenBracket: Align +Language: Cpp +BasedOnStyle: Mozilla +IndentWidth: 4 +ColumnLimit: 120 +Standard: c++20 +IndentExternBlock: NoIndent +AlwaysBreakAfterDefinitionReturnType: None +BreakAfterReturnType: None +AllowShortFunctionsOnASingleLine: All AlignConsecutiveAssignments: true +AlignConsecutiveBitFields: true AlignConsecutiveDeclarations: true -AlignEscapedNewlines: Left -AlignOperands: true +AlignConsecutiveMacros: true +AlignConsecutiveShortCaseStatements: { Enabled: true } +AlignEscapedNewlines: LeftWithLastLine AlignTrailingComments: true -AllowAllParametersOfDeclarationOnNextLine: false -AllowShortBlocksOnASingleLine: false -AllowShortCaseLabelsOnASingleLine: false -AllowShortFunctionsOnASingleLine: Inline -AllowShortIfStatementsOnASingleLine: Never -AllowShortLoopsOnASingleLine: false -AlwaysBreakAfterDefinitionReturnType: None -AlwaysBreakAfterReturnType: None -AlwaysBreakBeforeMultilineStrings: false -AlwaysBreakTemplateDeclarations: Yes -BinPackArguments: false -BinPackParameters: false -BraceWrapping: - AfterCaseLabel: true - AfterClass: true - AfterControlStatement: true - AfterEnum: true - AfterFunction: true - AfterNamespace: true - AfterStruct: true - AfterUnion: true - BeforeCatch: true - BeforeElse: true - IndentBraces: false - SplitEmptyFunction: false - SplitEmptyRecord: false - SplitEmptyNamespace: false - AfterExternBlock: false # Keeps the contents un-indented. -BreakBeforeBinaryOperators: None -BreakBeforeBraces: Custom -BreakBeforeTernaryOperators: true -BreakConstructorInitializers: AfterColon -# BreakInheritanceList: AfterColon -BreakStringLiterals: true -ColumnLimit: 120 -CommentPragmas: '^ (coverity|pragma:)' -CompactNamespaces: false -ConstructorInitializerAllOnOneLineOrOnePerLine: true -ConstructorInitializerIndentWidth: 4 -ContinuationIndentWidth: 4 -Cpp11BracedListStyle: true -DerivePointerAlignment: false -DisableFormat: false -ExperimentalAutoDetectBinPacking: false -FixNamespaceComments: true -ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ] -IncludeBlocks: Preserve -IndentCaseLabels: false -IndentPPDirectives: AfterHash -IndentWidth: 4 -IndentWrappedFunctionNames: false -KeepEmptyLinesAtTheStartOfBlocks: false -MacroBlockBegin: '' -MacroBlockEnd: '' -MaxEmptyLinesToKeep: 1 -NamespaceIndentation: None -PenaltyBreakAssignment: 2 -PenaltyBreakBeforeFirstCallParameter: 10000 # Raised intentionally; prefer breaking all -PenaltyBreakComment: 300 -PenaltyBreakFirstLessLess: 120 -PenaltyBreakString: 1000 -PenaltyExcessCharacter: 1000000 -PenaltyReturnTypeOnItsOwnLine: 10000 # Raised intentionally because it hurts readability -PointerAlignment: Left -ReflowComments: true -SortIncludes: false -SortUsingDeclarations: false -SpaceAfterCStyleCast: true -SpaceAfterTemplateKeyword: true -SpaceBeforeAssignmentOperators: true -SpaceBeforeCpp11BracedList: false -SpaceBeforeInheritanceColon: true -SpaceBeforeParens: ControlStatements -SpaceBeforeCtorInitializerColon: true -SpaceBeforeRangeBasedForLoopColon: true -SpaceInEmptyParentheses: false -SpacesBeforeTrailingComments: 2 -SpacesInAngles: false -SpacesInCStyleCastParentheses: false -SpacesInContainerLiterals: false -SpacesInParentheses: false -SpacesInSquareBrackets: false -Standard: Cpp11 -TabWidth: 8 -UseTab: Never -... +SortIncludes: false diff --git a/.clang-tidy b/.clang-tidy index eae1f9f..503d9af 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -22,11 +22,18 @@ Checks: >- -boost-use-ranges, -hicpp-static-assert, -misc-static-assert, - -modernize-macro-to-enum, - -cppcoreguidelines-macro-to-enum, - -bugprone-casting-through-void, + -*-macro-to-enum, + -*-macro-usage, + -*-enum-size, + -*-use-using, + -*-casting-through-void, -misc-include-cleaner, -cppcoreguidelines-avoid-do-while, + -*-magic-numbers, + -*-use-enum-class, + -*-use-trailing-return-type, + -*-deprecated-headers, + -*-avoid-c-arrays, CheckOptions: - key: readability-function-cognitive-complexity.Threshold value: '99' diff --git a/.gdbinit b/.gdbinit new file mode 100644 index 0000000..b27c408 --- /dev/null +++ b/.gdbinit @@ -0,0 +1,3 @@ +skip make_frame_base +skip make_frame +skip cavl2_min diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index ab97081..c7efd7c 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -9,7 +9,7 @@ jobs: container: ghcr.io/opencyphal/toolshed:ts24.4.3 strategy: matrix: - toolchain: [ 'clang', 'gcc' ] + toolchain: [ "clang", "gcc" ] include: - toolchain: gcc c-compiler: gcc @@ -24,23 +24,28 @@ jobs: # language=bash - run: > cmake - -B ${{ github.workspace }}/build + -B $GITHUB_WORKSPACE/build -DCMAKE_BUILD_TYPE=Debug -DCMAKE_C_COMPILER=${{ matrix.c-compiler }} -DCMAKE_CXX_COMPILER=${{ matrix.cxx-compiler }} . # language=bash - run: | - cd ${{ github.workspace }}/build + cd $GITHUB_WORKSPACE/build make VERBOSE=1 -j$(nproc) make test ARGS="--verbose" + - name: Archive workspace + if: always() + run: | + cd $GITHUB_WORKSPACE + tar --use-compress-program="gzip -9" -cf $RUNNER_TEMP/workspace.tar.gz . - uses: actions/upload-artifact@v4 if: always() with: # The matrix is shown for convenience but this is fragile because the values may not be string-convertible. # Shall it break one day, feel free to remove the matrix from here. name: ${{github.job}}-#${{strategy.job-index}}-${{job.status}}-${{join(matrix.*, ',')}} - path: ${{github.workspace}}/**/* + path: ${{runner.temp}}/workspace.tar.gz retention-days: 2 optimizations: @@ -49,13 +54,13 @@ jobs: container: ghcr.io/opencyphal/toolshed:ts24.4.3 strategy: matrix: - toolchain: [ 'clang', 'gcc' ] + toolchain: [ "clang", "gcc" ] build_type: [ Release, MinSizeRel ] include: - toolchain: gcc c-compiler: gcc cxx-compiler: g++ - cxx-flags: -fno-strict-aliasing # GCC in MinSizeRel C++20 mode misoptimizes the Cavl test. + cxx-flags: "" - toolchain: clang c-compiler: clang cxx-compiler: clang++ @@ -66,7 +71,7 @@ jobs: # language=bash - run: > cmake - -B ${{ github.workspace }}/build + -B $GITHUB_WORKSPACE/build -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} -DCMAKE_C_COMPILER=${{ matrix.c-compiler }} -DCMAKE_CXX_COMPILER=${{ matrix.cxx-compiler }} @@ -75,35 +80,68 @@ jobs: . # language=bash - run: | - cd ${{ github.workspace }}/build + cd $GITHUB_WORKSPACE/build make VERBOSE=1 -j$(nproc) make test ARGS="--verbose" + - name: Archive workspace + if: always() + run: | + cd $GITHUB_WORKSPACE + tar --use-compress-program="gzip -9" -cf $RUNNER_TEMP/workspace.tar.gz . - uses: actions/upload-artifact@v4 if: always() with: # The matrix is shown for convenience but this is fragile because the values may not be string-convertible. # Shall it break one day, feel free to remove the matrix from here. name: ${{github.job}}-#${{strategy.job-index}}-${{job.status}}-${{join(matrix.*, ',')}} - path: ${{github.workspace}}/**/* + path: ${{runner.temp}}/workspace.tar.gz retention-days: 2 - avr: + coverage: if: github.event_name == 'push' runs-on: ubuntu-latest - env: - mcu: at90can64 - flags: -Wall -Wextra -Werror -pedantic -Wconversion -Wtype-limits - strategy: - matrix: - std: [ 'c99', 'c11', 'gnu99', 'gnu11' ] + container: ghcr.io/opencyphal/toolshed:ts24.4.3 steps: - uses: actions/checkout@v4 + with: + submodules: true + # language=bash + - run: > + cmake -B $GITHUB_WORKSPACE/build -DCMAKE_BUILD_TYPE=Debug -DNO_STATIC_ANALYSIS=ON -DENABLE_COVERAGE=ON . # language=bash - run: | - sudo apt update -y && sudo apt upgrade -y - sudo apt install gcc-avr avr-libc - avr-gcc --version - - run: avr-gcc libudpard/*.c -c -std=${{matrix.std}} -mmcu=${{env.mcu}} ${{env.flags}} + cd $GITHUB_WORKSPACE/build + make -j$(nproc) && make test && make coverage + - name: Archive workspace + if: always() + run: | + cd $GITHUB_WORKSPACE + tar --use-compress-program="gzip -9" -cf $RUNNER_TEMP/workspace.tar.gz . + - uses: actions/upload-artifact@v4 + if: always() + with: + name: ${{github.job}}-#${{strategy.job-index}}-${{job.status}} + path: ${{runner.temp}}/workspace.tar.gz + retention-days: 30 + + # TODO: re-enable this + # avr: + # if: github.event_name == 'push' + # runs-on: ubuntu-latest + # env: + # mcu: at90can64 + # flags: -Wall -Wextra -Werror -pedantic -Wconversion -Wtype-limits + # strategy: + # matrix: + # std: [ 'c99', 'c11', 'gnu99', 'gnu11' ] + # steps: + # - uses: actions/checkout@v4 + # # language=bash + # - run: | + # sudo apt update -y && sudo apt upgrade -y + # sudo apt install gcc-avr avr-libc + # avr-gcc --version + # - run: avr-gcc -Ilib/cavl/ libudpard/*.c -c -std=${{matrix.std}} -mmcu=${{env.mcu}} ${{env.flags}} arm: if: github.event_name == 'push' @@ -112,14 +150,14 @@ jobs: flags: -Wall -Wextra -Werror -pedantic -Wconversion -Wtype-limits -Wcast-align -Wfatal-errors strategy: matrix: - std: [ 'c99', 'c11', 'gnu99', 'gnu11' ] + std: [ "c99", "c11", "gnu99", "gnu11" ] steps: - uses: actions/checkout@v4 # language=bash - run: | sudo apt update -y && sudo apt upgrade -y sudo apt-get install -y gcc-arm-none-eabi - - run: arm-none-eabi-gcc libudpard/*.c -c -std=${{matrix.std}} ${{ env.flags }} + - run: arm-none-eabi-gcc -Ilib/cavl/ libudpard/*.c -c -std=${{matrix.std}} ${{ env.flags }} sonar: runs-on: ubuntu-latest @@ -138,12 +176,12 @@ jobs: steps: - uses: actions/checkout@v4 with: - fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis + fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis submodules: true - uses: actions/setup-java@v4 with: java-version: 17 - distribution: 'zulu' + distribution: "zulu" # language=bash - run: | clang --version @@ -178,6 +216,6 @@ jobs: - uses: actions/checkout@v4 - uses: DoozyX/clang-format-lint-action@v0.20 with: - source: './libudpard ./tests' - extensions: 'c,h,cpp,hpp' + source: "./libudpard ./tests" + extensions: "c,h,cpp,hpp" clangFormatVersion: ${{ env.LLVM_VERSION }} diff --git a/.gitignore b/.gitignore index 30e85f4..d42729f 100644 --- a/.gitignore +++ b/.gitignore @@ -20,16 +20,17 @@ *.hex *.dSYM/ *build/ +build*/ cmake-build-*/ -build-avr/ +.cache/ .metadata .settings .project .cproject .pydevproject -.gdbinit .scannerwork/ .vscode/ +.sisyphus/ **/.idea/* !**/.idea/dictionaries !**/.idea/dictionaries/* diff --git a/.idea/dictionaries/project.xml b/.idea/dictionaries/project.xml new file mode 100644 index 0000000..8d9a0c0 --- /dev/null +++ b/.idea/dictionaries/project.xml @@ -0,0 +1,24 @@ + + + + abcdefghij + abcdefghijk + abcdefghijklmnopqrst + acks + dups + efgh + fghij + fstate + klmno + klmnopqrst + lmnopqrst + mnop + noinit + objcount + optin + pqrst + stdatomic + tidwin + + + \ No newline at end of file diff --git a/.zed/tasks.json b/.zed/tasks.json new file mode 100644 index 0000000..d7e48a6 --- /dev/null +++ b/.zed/tasks.json @@ -0,0 +1,37 @@ +[ + { + "label": "Configure (CMake)", + "command": "cmake -B build", + "tags": ["build"] + }, + { + "label": "Build", + "command": "cmake --build build", + "tags": ["build"] + }, + { + "label": "Configure and Build", + "command": "cmake -B build && cmake --build build", + "tags": ["build"] + }, + { + "label": "Run Tests", + "command": "ctest --test-dir build --output-on-failure", + "tags": ["test"] + }, + { + "label": "Clean", + "command": "rm -rf build", + "tags": ["build"] + }, + { + "label": "Format Code", + "command": "cmake --build build --target format", + "tags": ["format"] + }, + { + "label": "Full Build and Test", + "command": "cmake -B build && cmake --build build && ctest --test-dir build --output-on-failure", + "tags": ["build", "test"] + } +] diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 0000000..8e77306 --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,14 @@ +# LibUDPard instructions for AI agents + +Please read `README.md` for general information about LibUDPard, and `CONTRIBUTING.md` for development-related notes. + +Keep the code and comments very brief. Be sure every significant code block is preceded with a brief comment. + +If you need a build directory, create one in the project root named with a `build` prefix; +you can also use existing build directories if you prefer so, +but avoid using `cmake-build-*` because these are used by CLion. +When building the code, don't hesitate to use multiple jobs to use all CPU cores. + +Run all tests in debug build to ensure that all assertion checks are enabled. + +It is best to use Clang-Format to format the code when done editing. diff --git a/CLAUDE.md b/CLAUDE.md new file mode 120000 index 0000000..47dc3e3 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1 @@ +AGENTS.md \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index 68d7b54..624b4bb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -12,13 +12,14 @@ cmake_minimum_required(VERSION 3.20) project(udpard) enable_testing() +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) + # Shared Clang-Format target for all subprojects. find_program(clang_format NAMES clang-format) if (NOT clang_format) message(STATUS "Could not locate clang-format") else () file(GLOB_RECURSE format_files - ${CMAKE_CURRENT_SOURCE_DIR}/demo/*.[ch] ${CMAKE_CURRENT_SOURCE_DIR}/libudpard/*.[ch] ${CMAKE_CURRENT_SOURCE_DIR}/tests/*.[ch] ${CMAKE_CURRENT_SOURCE_DIR}/tests/*.[ch]pp) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d226f55..2875211 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -18,7 +18,7 @@ are impossible to track at the source code level. An exception applies for the case of false-positive (invalid) warnings -- those should not be mentioned in the codebase. Unfortunately, some rules are hard or impractical to enforce automatically, -so code reviewers shall be aware of MISRA and general high-reliability coding practices +so code reviewers should be aware of MISRA and general high-reliability coding practices to prevent non-compliant code from being accepted into upstream. ## Build & test @@ -27,6 +27,8 @@ Consult with the CI workflow files for the required tools and build & test instr You may want to use the [toolshed](https://github.com/OpenCyphal/docker_toolchains/pkgs/container/toolshed) container for this. +To run tests with coverage reports, refer to the instructions in `tests/CMakeLists.txt`. + ## Releasing Simply create a new release & tag on GitHub. diff --git a/MIGRATION_v1.x_to_v2.0.md b/MIGRATION_v1.x_to_v2.0.md deleted file mode 100644 index 4771b45..0000000 --- a/MIGRATION_v1.x_to_v2.0.md +++ /dev/null @@ -1,197 +0,0 @@ -# Migration Guide: Upgrading from LibUDPard v1.x to v2.0 - -This migration guide provides step-by-step instructions to help you update your application code from LibUDPard version 1.x to version 2.0. The guide highlights the key changes in the API and offers recommendations on how to adapt your code accordingly. - -## Introduction - -LibUDPard version 2.0 introduces several significant changes to improve memory management and payload handling. This guide will help you understand these changes and update your application code to be compatible with the new version. - -These changes do not affect wire compatibility. - -## Version Changes - -- **LibUDPard Version**: - - **Old**: `UDPARD_VERSION_MAJOR 1`, `UDPARD_VERSION_MINOR 2` - - **New**: `UDPARD_VERSION_MAJOR 2`, `UDPARD_VERSION_MINOR 0` -- **Cyphal Specification Version**: Remains the same (`1.0`). - -## Key API Changes - -### UdpardTx Structure Changes - -- **Memory Resource Field**: The `UdpardTx` structure's `memory` field type has changed from `UdpardMemoryResource` to `UdpardTxMemoryResources`. - - ```c - // In v1.x - struct UdpardTx { - // ... - struct UdpardMemoryResource memory; - // ... - }; - - // In v2.0 - struct UdpardTx { - // ... - struct UdpardTxMemoryResources memory; - // ... - }; - ``` - -### Memory Management Adjustments - -- **Separate Memory Resources**: `UdpardTxMemoryResources` now allows separate memory resources for fragment handles and payload storage. - - ```c - struct UdpardTxMemoryResources { - struct UdpardMemoryResource fragment; // For UdpardTxItem allocations - struct UdpardMemoryResource payload; // For datagram payload allocations - }; - ``` - -- **Memory Allocation Changes**: The number of memory allocations per datagram has increased from one to two: - - **v1.x**: One allocation per datagram (including `UdpardTxItem` and payload). - - **v2.0**: Two allocations per datagram—one for `UdpardTxItem` and one for the payload. - -### UdpardTxItem Structure Updates - -- **Mutable datagram_payload Field**: The `datagram_payload` field in `UdpardTxItem` is now mutable, allowing ownership transfer of the payload. - -- **New priority Field**: A new `priority` field has been added to `UdpardTxItem` to retain the original transfer priority level. - - ```c - struct UdpardTxItem { - // ... - enum UdpardPriority priority; // New field in v2.0 - struct UdpardMutablePayload datagram_payload; // Now mutable - // ... - }; - ``` - -### Function Signature Modifications - -- **udpardTxInit**: The `memory` parameter type has changed to `UdpardTxMemoryResources`. - - ```c - // In v1.x - int_fast8_t udpardTxInit( - struct UdpardTx* self, - const UdpardNodeID* local_node_id, - size_t queue_capacity, - struct UdpardMemoryResource memory - ); - - // In v2.0 - int_fast8_t udpardTxInit( - struct UdpardTx* self, - const UdpardNodeID* local_node_id, - size_t queue_capacity, - struct UdpardTxMemoryResources memory - ); - ``` - -- **udpardTxFree**: The `memory` parameter type has changed to `UdpardTxMemoryResources`. - - ```c - // In v1.x - void udpardTxFree( - const struct UdpardMemoryResource memory, - struct UdpardTxItem* item - ); - - // In v2.0 - void udpardTxFree( - const struct UdpardTxMemoryResources memory, - struct UdpardTxItem* item - ); - ``` - -- **udpardTxPeek**: The return type has changed from `const struct UdpardTxItem*` to `struct UdpardTxItem*` to allow modification of the `datagram_payload` field. - - ```c - // In v1.x - const struct UdpardTxItem* udpardTxPeek(const struct UdpardTx* self); - - // In v2.0 - struct UdpardTxItem* udpardTxPeek(const struct UdpardTx* self); - ``` - -## Migration Steps - -Follow these steps to update your application code to be compatible with LibUDPard v2.0. - -### 1. Update UdpardTx Initialization - -- **Adjust the `udpardTxInit` Call**: Update the `memory` parameter to use `UdpardTxMemoryResources`. - - ```c - // Before (v1.x) - struct UdpardMemoryResource tx_memory = { /*...*/ }; - udpardTxInit(&tx_instance, &local_node_id, queue_capacity, tx_memory); - - // After (v2.0) - struct UdpardTxMemoryResources tx_memory = { - .fragment = { /*...*/ }, - .payload = { /*...*/ } - }; - udpardTxInit(&tx_instance, &local_node_id, queue_capacity, tx_memory); - ``` - -- **Define Separate Memory Resources**: Initialize separate memory resources for fragments and payloads. - -### 2. Adjust Memory Resources - -- **Update Memory Allocation Logic**: Ensure that your memory allocator handles two separate allocations per datagram—one for `UdpardTxItem` and one for the payload. - - ```c - // Example allocator adjustments - void* allocate_fragment(void* user_reference, size_t size) { /*...*/ } - void* allocate_payload(void* user_reference, size_t size) { /*...*/ } - ``` - -### 3. Modify UdpardTxItem Usage - -- **Handle Mutable Payloads**: Since `datagram_payload` is now mutable, you can transfer ownership of the payload to another component (e.g., transmission media) by nullifying the `size` and `data` fields after copying. - - ```c - struct UdpardTxItem* tx_item = udpardTxPeek(&tx_instance); - if (tx_item) { - // Transfer ownership of the payload - transmit_payload(tx_item->datagram_payload.data, tx_item->datagram_payload.size); - tx_item->datagram_payload.data = NULL; - tx_item->datagram_payload.size = 0; - - // Pop and free the item after transmission - udpardTxPop(&tx_instance, tx_item); - udpardTxFree(tx_instance.memory, tx_item); - } - ``` - -- **Utilize the New priority Field**: Access the `priority` field in `UdpardTxItem` if needed for your application logic. - - ```c - enum UdpardPriority tx_priority = tx_item->priority; - ``` - -### 4. Revise Function Calls - -- **Update `udpardTxFree` Calls**: Pass the updated `memory` parameter type. - - ```c - // Before (v1.x) - udpardTxFree(tx_memory, tx_item); - - // After (v2.0) - udpardTxFree(tx_instance.memory, tx_item); - ``` - -- **Modify `udpardTxPeek` Usage**: Since `udpardTxPeek` now returns a mutable pointer, update your code to handle the mutable `UdpardTxItem`. - - ```c - // Before (v1.x) - const struct UdpardTxItem* tx_item = udpardTxPeek(&tx_instance); - - // After (v2.0) - struct UdpardTxItem* tx_item = udpardTxPeek(&tx_instance); - ``` - -- **Ensure Correct Deallocation**: When freeing payloads, use the appropriate memory resource from `UdpardTxMemoryResources`. diff --git a/README.md b/README.md index 5a5197b..55bc1eb 100644 --- a/README.md +++ b/README.md @@ -1,55 +1,48 @@ -# Compact Cyphal/UDP in C +
+ +# Cyphal/UDP transport in C [![Main Workflow](https://github.com/OpenCyphal-Garage/libudpard/actions/workflows/main.yml/badge.svg)](https://github.com/OpenCyphal-Garage/libudpard/actions/workflows/main.yml) [![Reliability Rating](https://sonarcloud.io/api/project_badges/measure?project=libudpard&metric=reliability_rating)](https://sonarcloud.io/summary?id=libudpard) [![Coverage](https://sonarcloud.io/api/project_badges/measure?project=libudpard&metric=coverage)](https://sonarcloud.io/summary?id=libudpard) [![Forum](https://img.shields.io/discourse/users.svg?server=https%3A%2F%2Fforum.opencyphal.org&color=1700b3)](https://forum.opencyphal.org) -LibUDPard is a compact implementation of the Cyphal/UDP protocol in C99/C11 for high-integrity real-time -embedded systems. - -[Cyphal](https://opencyphal.org) is an open lightweight data bus standard designed for reliable intravehicular -communication in aerospace and robotic applications via CAN bus, UDP, and other robust transports. +
-We pronounce LibUDPard as *lib-you-dee-pee-ard*. +----- -## Features +LibUDPard is a robust implementation of the Cyphal/UDP transport layer in C99/C11 for high-integrity real-time systems. -Some of the features listed here are intrinsic properties of Cyphal. +[Cyphal](https://opencyphal.org) is an open technology for real-time intravehicular distributed computing and +communication based on modern networking standards (Ethernet, CAN FD, etc.). +It was created to address the challenge of on-board deterministic computing and data distribution in +next-generation intelligent vehicles: manned and unmanned aircraft, spacecraft, robots, and cars. -- Full branch coverage and extensive static analysis. - -- Compliance with automatically enforceable MISRA C rules (reach out to https://forum.opencyphal.org for details). +## Features +- Zero-copy RX pipeline -- payload is moved from the NIC driver all the way to the application without copying. +- ≤1-copy TX pipeline with deduplication across multiple interfaces and scattered input buffer support. +- Support for redundant network interfaces with seamless interface aggregation and zero fail-over delay. +- Robust message reassembler supporting highly distorted datagram streams: + out-of-order fragments, message ordering recovery, fragment/message deduplication, interleaving, variable MTU, ... +- Robust message ordering recovery for ordering-sensitive applications (e.g., state estimators, control loops) + with well-defined deterministic recovery in the event of lost messages. +- Packet loss mitigation via: + - reliable topics (retransmit until acknowledged; callback notifications for successful/failed deliveries). + - redundant interfaces (packet lost on one interface may be received on another, transparent to the application); +- Heap not required (but supported); the library can be used with fixed-size block pool allocators. - Detailed time complexity and memory requirement models for the benefit of real-time high-integrity applications. - -- Purely reactive time-deterministic API without the need for background servicing. - -- Zero-copy data pipeline on reception -- - payload is moved from the underlying NIC driver all the way to the application without copying. - -- Support for redundant network interfaces with seamless interface aggregation and no fail-over delay. - -- Out-of-order multi-frame transfer reassembly, including cross-transfer interleaved frames. - -- Support for repetition-coding forward error correction (FEC) for lossy links (e.g., wireless) - transparent to the application. - -- No dependency on heap memory; the library can be used with fixed-size block pool allocators. - -- Compatibility with all conventional 8/16/32/64-bit platforms. - -- Compatibility with extremely resource-constrained baremetal environments starting from 64K ROM and 64K RAM. - -- Implemented in ≈2000 lines of code. +- Highly scalable: designed to handle thousands of topics and hundreds of concurrent transfers with minimal resources. +- Runs anywhere out of the box, including extremely resource-constrained baremetal environments with ~100K ROM/RAM. + No porting required. +- Partial MISRA C compliance (reach out to ). +- Full implementation in a single C file with only 2k lines of straightforward C99! +- Extensive verification suite. ## Usage -The library implements the Cyphal/UDP protocol, which is a transport-layer entity. -An application using this library will need to implement the presentation layer above the library, -perhaps with the help of the [Nunavut transpiler](https://github.com/OpenCyphal/nunavut), -and the network layer below the library using a third-party UDP/IP stack implementation with multicast/IGMP support -(TCP and ARP are not needed). +An application using this library will need to provide a third-party UDP/IP stack with multicast/IGMP support +(TCP not needed). In the most straightforward case, the network layer can be based on the standard Berkeley socket API or a lightweight embedded stack such as LwIP. @@ -57,31 +50,37 @@ or a lightweight embedded stack such as LwIP. %%{init: {"fontFamily": "Ubuntu Mono, monospace", "flowchart": {"curve": "basis"}}}%% flowchart TD classDef OpenCyphal color:#00DAC6,fill:#1700b3,stroke:#00DAC6,stroke-width:2px,font-weight:600 - Application <-->|messages,\nrequests,\nresponses| LibUDPard[fa:fa-code LibUDPard] + Application <-->|messages| LibUDPard[fa:fa-code LibUDPard] class LibUDPard OpenCyphal - LibUDPard <-->|multicast datagrams| UDP + LibUDPard <-->|datagrams| UDP subgraph domain_udpip["3rd-party UDP/IP+IGMP stack"] UDP <--> IP["IPv4, IGMPv1+"] <--> MAC end MAC <--> PHY ``` -To integrate the library into your application, simply copy the files under `libudpard/` into your project tree, -or add this entire repository as a submodule. -The library contains only one translation unit named `udpard.c`; -no special compiler options are needed to build it. -The library should be compatible with all conventional computer architectures where a standards-compliant C99 compiler -is available. +To integrate the library into your application, simply copy `udpard.c` and `udpard.h` from `libudpard/` +into your project tree, or add this entire repository as a submodule; +also ensure you have [`cavl2.h`](https://github.com/pavel-kirienko/cavl) somewhere in your include paths. + +The library contains only one translation unit named `udpard.c`; no special compiler options are needed to build it. +The library should be compatible out of the box with all conventional computer architectures where a +standards-compliant C99 compiler is available. **Read the API docs in [`libudpard/udpard.h`](libudpard/udpard.h).** -For complete usage examples, please refer to . ## Revisions +### v3.0 -- WORK IN PROGRESS + +The library has been redesigned from scratch to support Cyphal v1.1, named topics, and reliable transfers. +No porting guide is provided since the changes are too significant; +please refer to the new API docs in `libudpard/udpard.h`. + ### v2.0 - Updating from LibUDPard v1 to v2 involves several significant changes to improve memory management and payload handling. -- Please follow [MIGRATION_v1.x_to_v2.0](MIGRATION_v1.x_to_v2.0.md) guide and carefully update your code. +- Please follow `MIGRATION_v1.x_to_v2.0.md` guide (available in v2 tree). ### v1.0 diff --git a/lib/cavl/cavl2.h b/lib/cavl/cavl2.h new file mode 100644 index 0000000..a102077 --- /dev/null +++ b/lib/cavl/cavl2.h @@ -0,0 +1,572 @@ +/// Source: https://github.com/pavel-kirienko/cavl +/// +/// Cavl is a single-header C library providing an implementation of AVL tree suitable for deeply embedded systems. +/// To integrate it into your project, simply copy this file into your source tree. +/// You can define build option macros before including the header to customize the behavior. +/// All definitions are prefixed with cavl2 to avoid collisions with other major versions of the library. +/// Read the API docs below. +/// +/// See also O1Heap -- a deterministic memory manager for hard-real-time +/// high-integrity embedded systems. +/// +/// Version history: +/// +/// - v1.0: initial release. +/// - v2.0: +/// - Simplify the API and improve naming. +/// - The header file now bears the major version number, which simplifies vendoring: a project now can safely +/// depend on cavl without the risk of version compatibility issues. +/// - For the same reason as above, all definitions are now prefixed with cavl2 instead of cavl. +/// - Add optional CAVL2_T macro to allow overriding the cavl2_t type. This is needed for libudpard/libcanard/etc +/// and is generally useful because it allows library vendors to avoid exposing cavl via the library API. +/// Also add CAVL2_RELATION to simplify comparator implementations. +/// - Add the trivial factory definition because it is needed in nearly every application using cavl. +/// - New traversal function cavl2_next_greater() offering the same time complexity but without recursion/callbacks. +/// +/// ------------------------------------------------------------------------------------------------------------------- +/// +/// Copyright (c) Pavel Kirienko +/// +/// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +/// documentation files (the "Software"), to deal in the Software without restriction, including without limitation +/// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +/// and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +/// +/// The above copyright notice and this permission notice shall be included in all copies or substantial portions of +/// the Software. +/// +/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +/// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS +/// OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +/// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +// ReSharper disable CppCStyleCast CppZeroConstantCanBeReplacedWithNullptr CppTooWideScopeInitStatement +// ReSharper disable CppRedundantElaboratedTypeSpecifier CppRedundantInlineSpecifier +#pragma once + +#include +#include +#include + +/// If Cavl is used in throughput-critical code, then it is recommended to disable assertion checks as they may +/// be costly in terms of execution time. +#ifndef CAVL2_ASSERT +#if defined(CAVL2_NO_ASSERT) && CAVL2_NO_ASSERT +#define CAVL2_ASSERT(x) (void)0 +#else +#include +#define CAVL2_ASSERT(x) assert(x) +#endif +#endif + +#ifdef __cplusplus +// This is, strictly speaking, useless because we do not define any functions with external linkage here, +// but it tells static analyzers that what follows should be interpreted as C code rather than C++. +extern "C" +{ +#endif + +// ---------------------------------------- PUBLIC API SECTION ---------------------------------------- + +/// CAVL2_T can be defined before including this header to provide a custom struct type for the node element. +/// The custom type must have the same fields as the default struct cavl2_t. +/// This option is useful if Cavl is integrated into a library without exposing it through the library API. +#ifndef CAVL2_T +/// The tree node/root. The user data is to be added through composition/inheritance. +/// The memory layout of this type is compatible with void*[4], which is useful if this type cannot be exposed in API. +/// Per standard convention, nodes that compare smaller are put on the left. +/// Usage example: +/// struct my_user_type_t { +/// struct cavl2_t base; ///< Tree node. Should be the first element, otherwise, offsetof() will be needed. +/// ... user data ... +/// }; +struct cavl2_t +{ + struct cavl2_t* up; ///< Parent node, NULL in the root. + struct cavl2_t* lr[2]; ///< Left child (lesser), right child (greater). + int_fast8_t bf; ///< Balance factor is positive when right-heavy. Allowed values are {-1, 0, +1}. +}; +#define CAVL2_T struct cavl2_t +#endif + +#if defined(static_assert) || defined(__cplusplus) +static_assert(sizeof(CAVL2_T) <= sizeof(void* [4]), "Bad size"); +#endif + +/// The comparator result can be overridden to simplify comparator functions. +/// The type shall be a signed integer type. +/// Only three possible states of the result are considered: negative, zero, and positive; the magnitude is ignored. +#ifndef CAVL2_RELATION +#define CAVL2_RELATION ptrdiff_t +#endif +/// Returns POSITIVE if the search target is GREATER than the provided node, negative if smaller, zero on match (found). +typedef CAVL2_RELATION (*cavl2_comparator_t)(const void* user, const CAVL2_T* node); + +/// If provided, the factory will be invoked when the sought node does not exist in the tree. +/// It is expected to return a new node that will be inserted immediately (without the need to traverse the tree again). +/// If the factory returns NULL or is not provided, the tree is not modified. +typedef CAVL2_T* (*cavl2_factory_t)(void* user); + +/// Look for a node in the tree using the specified comparator. The worst-case complexity is O(log n). +/// - If the node is found (i.e., zero comparison result), return it. +/// - If the node is not found and the factory is NULL, return NULL. +/// - Otherwise, construct a new node using the factory; if the result is not NULL, insert it; return the result. +/// The user_comparator is passed into the comparator unmodified. +/// The user_factory is passed into the factory unmodified. +/// The root node may be replaced in the process iff the factory is not NULL and it returns a new node; +/// otherwise, the root node will not be modified. +/// If comparator is NULL, returns NULL. +static inline CAVL2_T* cavl2_find_or_insert(CAVL2_T** const root, + const void* const user_comparator, + const cavl2_comparator_t comparator, + void* const user_factory, + const cavl2_factory_t factory); + +/// A convenience wrapper over cavl2_find_or_insert() that passes NULL factory, so the tree is never modified. +/// Since the tree is not modified, the root pointer is passed by value, unlike in the mutating version. +static inline CAVL2_T* cavl2_find(CAVL2_T* root, const void* const user_comparator, const cavl2_comparator_t comparator) +{ + return cavl2_find_or_insert(&root, user_comparator, comparator, NULL, NULL); +} + +/// Remove the specified node from its tree. The root node may be replaced in the process. +/// The worst-case complexity is O(log n). +/// The function has no effect if either of the pointers are NULL. +/// If the node is not in the tree, the behavior is undefined; it may create cycles in the tree which is deadly. +/// It is safe to pass the result of cavl2_find/cavl2_find_or_insert directly as the second argument: +/// cavl2_remove(&root, cavl2_find(&root, user, search_comparator)); +/// The removed node will have all of its pointers set to NULL. +static inline void cavl2_remove(CAVL2_T** const root, CAVL2_T* const node); + +/// Replace the specified node with another node without rebalancing. +/// This is useful when you want to replace a node with an equivalent one (same key ordering). +/// The new node takes over the position (parent, children, balance factor) of the old node. +/// The old node will have all of its pointers set to NULL. +/// The new node must not already be in the tree; if it is, the behavior is undefined. +/// The new node's fields (up, lr, bf) will be overwritten to match the old node's position in the tree. +/// The complexity is O(1). +/// The function has no effect if any of the pointers are NULL. +/// If the old node is not in the tree, the behavior is undefined. +static inline void cavl2_replace(CAVL2_T** const root, CAVL2_T* const old_node, CAVL2_T* const new_node); + +/// True iff the node is in the tree. The complexity is O(1). +/// Returns false if the node is NULL. +/// Assumes that the node pointers are NULL when it is not inserted (this is ensured by the removal function). +static inline bool cavl2_is_inserted(const CAVL2_T* const root, const CAVL2_T* const node) +{ + bool out = false; + if (node != NULL) { + out = (node->up != NULL) || (node->lr[0] != NULL) || (node->lr[1] != NULL) || (node == root); + } + return out; +} + +/// Remove the specified node if it is inserted in the tree; otherwise, do nothing. +/// This is a convenience wrapper that combines cavl2_is_inserted() and cavl2_remove(). +/// Returns true if the node was inserted and has been removed, false otherwise. +static inline bool cavl2_remove_if(CAVL2_T** const root, CAVL2_T* const node) +{ + bool removed = false; + if ((root != NULL) && cavl2_is_inserted(*root, node)) { + cavl2_remove(root, node); + removed = true; + } + return removed; +} + +/// Return the min-/max-valued node stored in the tree, depending on the flag. This is an extremely fast query. +/// Returns NULL iff the argument is NULL (i.e., the tree is empty). The worst-case complexity is O(log n). +static inline CAVL2_T* cavl2_extremum(CAVL2_T* const root, const bool maximum) +{ + CAVL2_T* result = NULL; + CAVL2_T* c = root; + while (c != NULL) { + result = c; + c = c->lr[maximum]; + } + return result; +} + +// clang-format off +/// Convenience wrappers for cavl2_extremum(). +static inline CAVL2_T* cavl2_min(CAVL2_T* const root) { return cavl2_extremum(root, false); } +static inline CAVL2_T* cavl2_max(CAVL2_T* const root) { return cavl2_extremum(root, true); } +// clang-format on + +/// Returns the next greater node in the in-order traversal of the tree. +/// Does nothing and returns NULL if the argument is NULL. Behavior undefined if the node is not in the tree. +/// To use it, first invoke cavl2_min() to get the first node, then call this function repeatedly until it returns NULL: +/// for (CAVL2_T* p = cavl2_min(root); p != NULL; p = cavl2_next_greater(p)) { +/// ... +/// } +/// The asymptotic complexity for traversing the entire tree is O(n), identical to the traditional recursive traversal. +static inline CAVL2_T* cavl2_next_greater(CAVL2_T* const node) +{ + CAVL2_T* c = NULL; + if (node != NULL) { + if (node->lr[1] != NULL) { + c = cavl2_min(node->lr[1]); + } else { + const CAVL2_T* n = node; + CAVL2_T* p = node->up; + while ((p != NULL) && (p->lr[1] == n)) { + n = p; + p = p->up; + } + c = p; + } + } + return c; +} + +/// Find the smallest node whose value is greater than or equal to the search target, in O(log n). +/// Returns the first node for which the comparator returns a non-positive result. +/// If no such node exists (all nodes compare less than target), returns NULL. +/// The comparator returns: positive if target>candidate, zero if target==candidate, negative if target 5; target=5 => 5; target=8 => NULL. +static inline CAVL2_T* cavl2_lower_bound(CAVL2_T* const root, + const void* const user, + const cavl2_comparator_t comparator) +{ + CAVL2_T* result = NULL; + if ((root != NULL) && (comparator != NULL)) { + CAVL2_T* n = root; + while (n != NULL) { + const CAVL2_RELATION cmp = comparator(user, n); + if (cmp <= 0) { + result = n; + n = n->lr[0]; + } else { + n = n->lr[1]; + } + } + } + return result; +} + +/// Find the smallest node whose value is strictly greater than the search target (upper bound). +/// Returns the first node for which the comparator returns a negative result. +/// See cavl2_lower_bound() for details. +/// Example: tree={1,3,5,7}, target=4 => 5; target=5 => 7; target=7 => NULL. +static inline CAVL2_T* cavl2_upper_bound(CAVL2_T* const root, + const void* const user, + const cavl2_comparator_t comparator) +{ + CAVL2_T* result = NULL; + if ((root != NULL) && (comparator != NULL)) { + CAVL2_T* n = root; + while (n != NULL) { + const CAVL2_RELATION cmp = comparator(user, n); + if (cmp < 0) { + result = n; + n = n->lr[0]; + } else { + n = n->lr[1]; + } + } + } + return result; +} + +/// Find the largest node whose value is less than or equal to the search target, in O(log n). +/// Returns the last node for which the comparator returns a non-negative result. +/// See cavl2_lower_bound() for details. +/// Example: tree={1,3,5,7}, target=4 => 3; target=5 => 5; target=0 => NULL. +static inline CAVL2_T* cavl2_predecessor(CAVL2_T* const root, + const void* const user, + const cavl2_comparator_t comparator) +{ + CAVL2_T* result = NULL; + if ((root != NULL) && (comparator != NULL)) { + CAVL2_T* n = root; + while (n != NULL) { + const CAVL2_RELATION cmp = comparator(user, n); + if (cmp >= 0) { + result = n; + n = n->lr[1]; + } else { + n = n->lr[0]; + } + } + } + return result; +} + +/// The successor counterpart of cavl2_predecessor() is an alias of cavl2_lower_bound(), provided for completeness only. +/// Example: tree={1,3,5,7}, target=4 => 5; target=5 => 5; target=8 => NULL. +static inline CAVL2_T* cavl2_successor(CAVL2_T* const root, const void* const user, const cavl2_comparator_t comparator) +{ + return cavl2_lower_bound(root, user, comparator); +} + +/// The trivial factory is useful in most applications. It simply returns the user pointed converted to CAVL2_T. +/// It is meant for use with cavl2_find_or_insert(). +static inline CAVL2_T* cavl2_trivial_factory(void* const user) +{ + return (CAVL2_T*)user; +} + +/// A convenience macro for use when a struct is a member of multiple AVL trees. For example: +/// +/// struct my_type_t { +/// struct cavl2_t tree_a; +/// struct cavl2_t tree_b; +/// ... +/// }; +/// +/// If we only have tree_a, we don't need this helper because the C standard guarantees that the address of a struct +/// equals the address of its first member, always, so simply casting a tree node to (struct my_type_t*) yields +/// a valid pointer to the struct. However, if we have more than one tree nodes in a struct, for the other ones +/// we will need to subtract the offset of the tree node field from the address of the tree node to get to the owner. +/// This macro does exactly that. Example: +/// +/// struct cavl2_t* tree_node_b = cavl2_find(...); // whatever +/// if (tree_node_b == NULL) { ... } // do something else +/// struct my_type_t* my_struct = CAVL2_TO_OWNER(tree_node_b, struct my_type_t, tree_b); +#define CAVL2_TO_OWNER(tree_node_ptr, owner_type, owner_tree_node_field) \ + ((owner_type*)cavl2_impl_to_owner_helper((tree_node_ptr), offsetof(owner_type, owner_tree_node_field))) // NOLINT + +// ---------------------------------------- END OF PUBLIC API SECTION ---------------------------------------- +// ---------------------------------------- POLICE LINE DO NOT CROSS ---------------------------------------- + +/// INTERNAL USE ONLY. +static inline void* cavl2_impl_to_owner_helper(const void* const tree_node_ptr, const size_t offset) +{ + return (tree_node_ptr == NULL) ? NULL : (void*)((char*)tree_node_ptr - offset); +} + +/// INTERNAL USE ONLY. Makes the '!r' child of node 'x' its parent; i.e., rotates 'x' toward 'r'. +static inline void cavl2_impl_rotate(CAVL2_T* const x, const bool r) +{ + CAVL2_ASSERT((x != NULL) && (x->lr[!r] != NULL) && ((x->bf >= -1) && (x->bf <= +1))); + CAVL2_T* const z = x->lr[!r]; + if (x->up != NULL) { + x->up->lr[x->up->lr[1] == x] = z; + } + z->up = x->up; + x->up = z; + x->lr[!r] = z->lr[r]; + if (x->lr[!r] != NULL) { + x->lr[!r]->up = x; + } + z->lr[r] = x; +} + +/// INTERNAL USE ONLY. +/// Accepts a node and how its balance factor needs to be changed -- either +1 or -1. +/// Returns the new node to replace the old one if tree rotation took place, same node otherwise. +static inline CAVL2_T* cavl2_impl_adjust_balance(CAVL2_T* const x, const bool increment) +{ + CAVL2_ASSERT((x != NULL) && ((x->bf >= -1) && (x->bf <= +1))); + CAVL2_T* out = x; + const int_fast8_t new_bf = (int_fast8_t)(x->bf + (increment ? +1 : -1)); + if ((new_bf < -1) || (new_bf > 1)) { + const bool r = new_bf < 0; // bf<0 if left-heavy --> right rotation is needed. + const int_fast8_t sign = r ? +1 : -1; // Positive if we are rotating right. + CAVL2_T* const z = x->lr[!r]; + CAVL2_ASSERT(z != NULL); // Heavy side cannot be empty. NOLINTNEXTLINE(clang-analyzer-core.NullDereference) + if ((z->bf * sign) <= 0) { // Parent and child are heavy on the same side or the child is balanced. + out = z; + cavl2_impl_rotate(x, r); + if (0 == z->bf) { + x->bf = (int_fast8_t)(-sign); + z->bf = (int_fast8_t)(+sign); + } else { + x->bf = 0; + z->bf = 0; + } + } else { // Otherwise, the child needs to be rotated in the opposite direction first. + CAVL2_T* const y = z->lr[r]; + CAVL2_ASSERT(y != NULL); // Heavy side cannot be empty. + out = y; + cavl2_impl_rotate(z, !r); + cavl2_impl_rotate(x, r); + if ((y->bf * sign) < 0) { + x->bf = (int_fast8_t)(+sign); + y->bf = 0; + z->bf = 0; + } else if ((y->bf * sign) > 0) { + x->bf = 0; + y->bf = 0; + z->bf = (int_fast8_t)(-sign); + } else { + x->bf = 0; + z->bf = 0; + } + } + } else { + x->bf = new_bf; // Balancing not needed, just update the balance factor and call it a day. + } + return out; +} + +/// INTERNAL USE ONLY. +/// Takes the culprit node (the one that is added); returns NULL or the root of the tree (possibly new one). +/// When adding a new node, set its balance factor to zero and call this function to propagate the changes upward. +static inline CAVL2_T* cavl2_impl_retrace_on_growth(CAVL2_T* const added) +{ + CAVL2_ASSERT((added != NULL) && (0 == added->bf)); + CAVL2_T* c = added; // Child + CAVL2_T* p = added->up; // Parent + while (p != NULL) { + const bool r = p->lr[1] == c; // c is the right child of parent + CAVL2_ASSERT(p->lr[r] == c); + c = cavl2_impl_adjust_balance(p, r); + p = c->up; + if (0 == c->bf) { // The height change of the subtree made this parent balanced (as all things should be), + break; // hence, the height of the outer subtree is unchanged, so upper balance factors are unchanged. + } + } + CAVL2_ASSERT(c != NULL); + return (NULL == p) ? c : NULL; // New root or nothing. +} + +static inline CAVL2_T* cavl2_find_or_insert(CAVL2_T** const root, + const void* const user_comparator, + const cavl2_comparator_t comparator, + void* const user_factory, + const cavl2_factory_t factory) +{ + CAVL2_T* out = NULL; + if ((root != NULL) && (comparator != NULL)) { + CAVL2_T* up = *root; + CAVL2_T** n = root; + while (*n != NULL) { + const CAVL2_RELATION cmp = comparator(user_comparator, *n); + if (0 == cmp) { + out = *n; + break; + } + up = *n; + n = &(*n)->lr[cmp > 0]; + CAVL2_ASSERT((NULL == *n) || ((*n)->up == up)); + } + if (NULL == out) { + out = (NULL == factory) ? NULL : factory(user_factory); + if (out != NULL) { + *n = out; // Overwrite the pointer to the new node in the parent node. + out->lr[0] = NULL; + out->lr[1] = NULL; + out->up = up; + out->bf = 0; + CAVL2_T* const rt = cavl2_impl_retrace_on_growth(out); + if (rt != NULL) { + *root = rt; + } + } + } + } + return out; +} + +static inline void cavl2_remove(CAVL2_T** const root, CAVL2_T* const node) +{ + if ((root != NULL) && (node != NULL)) { + CAVL2_ASSERT(*root != NULL); // Otherwise, the node would have to be NULL. + CAVL2_ASSERT((node->up != NULL) || (node == *root)); + CAVL2_T* p = NULL; // The lowest parent node that suffered a shortening of its subtree. + bool r = false; // Which side of the above was shortened. + // The first step is to update the topology and remember the node where to start the retracing from later. + // Balancing is not performed yet so we may end up with an unbalanced tree. + if ((node->lr[0] != NULL) && (node->lr[1] != NULL)) { + CAVL2_T* const re = cavl2_extremum(node->lr[1], false); + CAVL2_ASSERT((re != NULL) && (NULL == re->lr[0]) && (re->up != NULL)); + re->bf = node->bf; + re->lr[0] = node->lr[0]; + re->lr[0]->up = re; + if (re->up != node) { + p = re->up; // Retracing starts with the ex-parent of our replacement node. + CAVL2_ASSERT(p->lr[0] == re); + p->lr[0] = re->lr[1]; // Reducing the height of the left subtree here. + if (p->lr[0] != NULL) { + p->lr[0]->up = p; + } + re->lr[1] = node->lr[1]; + re->lr[1]->up = re; + r = false; + } else { // In this case, we are reducing the height of the right subtree, so r=1. + p = re; // Retracing starts with the replacement node itself as we are deleting its parent. + r = true; // The right child of the replacement node remains the same so we don't bother relinking it. + } + re->up = node->up; + if (re->up != NULL) { + re->up->lr[re->up->lr[1] == node] = re; // Replace link in the parent of node. + } else { + *root = re; + } + } else { // Either or both of the children are NULL. + p = node->up; + const bool rr = node->lr[1] != NULL; + if (node->lr[rr] != NULL) { + node->lr[rr]->up = p; + } + if (p != NULL) { + r = p->lr[1] == node; + p->lr[r] = node->lr[rr]; + if (p->lr[r] != NULL) { + p->lr[r]->up = p; + } + } else { + *root = node->lr[rr]; + } + } + // Now that the topology is updated, perform the retracing to restore balance. We climb up adjusting the + // balance factors until we reach the root or a parent whose balance factor becomes plus/minus one, which + // means that that parent was able to absorb the balance delta; in other words, the height of the outer + // subtree is unchanged, so upper balance factors shall be kept unchanged. + if (p != NULL) { + CAVL2_T* c = NULL; + for (;;) { + c = cavl2_impl_adjust_balance(p, !r); + p = c->up; + if ((c->bf != 0) || (NULL == p)) { // Reached the root or the height difference is absorbed by c. + break; + } + r = p->lr[1] == c; + } + if (NULL == p) { + CAVL2_ASSERT(c != NULL); + *root = c; + } + } + // Invalidate the node's pointers to indicate it is no longer in the tree. + node->up = NULL; + node->lr[0] = NULL; + node->lr[1] = NULL; + } +} + +static inline void cavl2_replace(CAVL2_T** const root, CAVL2_T* const old_node, CAVL2_T* const new_node) +{ + if ((root != NULL) && (old_node != NULL) && (new_node != NULL)) { + CAVL2_ASSERT(*root != NULL); // Otherwise, old_node would have to be NULL. + CAVL2_ASSERT((old_node->up != NULL) || (old_node == *root)); // old_node must be in the tree. + CAVL2_ASSERT((new_node->up == NULL) && (new_node->lr[0] == NULL) && (new_node->lr[1] == NULL)); + // Copy the structural data from the old node to the new node. + new_node->up = old_node->up; + new_node->lr[0] = old_node->lr[0]; + new_node->lr[1] = old_node->lr[1]; + new_node->bf = old_node->bf; + // Update the parent to point to the new node. + if (old_node->up != NULL) { + old_node->up->lr[old_node->up->lr[1] == old_node] = new_node; + } else { + *root = new_node; + } + // Update the children to point to the new parent. + if (old_node->lr[0] != NULL) { + old_node->lr[0]->up = new_node; + } + if (old_node->lr[1] != NULL) { + old_node->lr[1]->up = new_node; + } + // Invalidate the old node's pointers to indicate it is no longer in the tree. + old_node->up = NULL; + old_node->lr[0] = NULL; + old_node->lr[1] = NULL; + } +} + +#ifdef __cplusplus +} +#endif diff --git a/libudpard/_udpard_cavl.h b/libudpard/_udpard_cavl.h deleted file mode 100644 index e8fe2e2..0000000 --- a/libudpard/_udpard_cavl.h +++ /dev/null @@ -1,338 +0,0 @@ -/// Source: https://github.com/pavel-kirienko/cavl -/// -/// Cavl is a single-header C library providing an implementation of AVL tree suitable for deeply embedded systems. -/// To integrate it into your project, simply copy this file into your source tree. Read the API docs below. -/// -/// See also O1Heap -- a deterministic memory manager for hard-real-time -/// high-integrity embedded systems. -/// -/// Copyright (c) 2021 Pavel Kirienko -/// -/// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -/// documentation files (the "Software"), to deal in the Software without restriction, including without limitation -/// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -/// and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -/// -/// The above copyright notice and this permission notice shall be included in all copies or substantial portions of -/// the Software. -/// -/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -/// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS -/// OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -/// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#pragma once - -#include "udpard.h" - -/// Modified for use with Libudpard: use the same assertion check macro if provided. -#ifdef UDPARD_ASSERT -# define CAVL_ASSERT UDPARD_ASSERT -#else -// Intentional violation of MISRA: inclusion not at the top of the file to eliminate unnecessary dependency on assert.h. -# include // NOSONAR -# define CAVL_ASSERT assert -#endif - -#ifdef __cplusplus -// This is, strictly speaking, useless because we do not define any functions with external linkage here, -// but it tells static analyzers that what follows should be interpreted as C code rather than C++. -extern "C" { -#endif - -// ---------------------------------------- PUBLIC API SECTION ---------------------------------------- - -/// Modified for use with LibUDPard: expose the Cavl structure via public API as UdpardTreeNode. -typedef struct UdpardTreeNode Cavl; - -/// Returns POSITIVE if the search target is GREATER than the provided node, negative if smaller, zero on match (found). -/// Values other than {-1, 0, +1} are not recommended to avoid overflow during the narrowing conversion of the result. -typedef int_fast8_t (*CavlPredicate)(void* user_reference, const Cavl* node); - -/// If provided, the factory will be invoked when the sought node does not exist in the tree. -/// It is expected to return a new node that will be inserted immediately (without the need to traverse the tree again). -/// If the factory returns NULL or is not provided, the tree is not modified. -typedef Cavl* (*CavlFactory)(void* user_reference); - -/// Look for a node in the tree using the specified search predicate. The worst-case complexity is O(log n). -/// - If the node is found, return it. -/// - If the node is not found and the factory is NULL, return NULL. -/// - Otherwise, construct a new node using the factory; if the result is not NULL, insert it; return the result. -/// The user_reference is passed into the predicate & factory unmodified. -/// The root node may be replaced in the process. -/// If predicate is NULL, returns NULL. -static inline Cavl* cavlSearch(Cavl** const root, - void* const user_reference, - const CavlPredicate predicate, - const CavlFactory factory); - -/// Remove the specified node from its tree. The root node may be replaced in the process. -/// The worst-case complexity is O(log n). -/// The function has no effect if either of the pointers are NULL. -/// If the node is not in the tree, the behavior is undefined; it may create cycles in the tree which is deadly. -/// It is safe to pass the result of cavlSearch() directly as the second argument: -/// cavlRemove(&root, cavlSearch(&root, user_reference, search_predicate, NULL)); -/// It is recommended to invalidate the pointers stored in the node after its removal. -static inline void cavlRemove(Cavl** const root, const Cavl* const node); - -/// Return the min-/max-valued node stored in the tree, depending on the flag. This is an extremely fast query. -/// Returns NULL iff the argument is NULL (i.e., the tree is empty). The worst-case complexity is O(log n). -static inline Cavl* cavlFindExtremum(Cavl* const root, const bool maximum) -{ - Cavl* result = NULL; - Cavl* c = root; - while (c != NULL) - { - result = c; - c = c->lr[maximum]; - } - return result; -} - -// ---------------------------------------- END OF PUBLIC API SECTION ---------------------------------------- -// ---------------------------------------- POLICE LINE DO NOT CROSS ---------------------------------------- - -/// INTERNAL USE ONLY. Makes the '!r' child of node 'x' its parent; i.e., rotates 'x' toward 'r'. -static inline void cavlPrivateRotate(Cavl* const x, const bool r) -{ - CAVL_ASSERT((x != NULL) && (x->lr[!r] != NULL) && ((x->bf >= -1) && (x->bf <= +1))); - Cavl* const z = x->lr[!r]; - if (x->up != NULL) - { - x->up->lr[x->up->lr[1] == x] = z; - } - z->up = x->up; - x->up = z; - x->lr[!r] = z->lr[r]; - if (x->lr[!r] != NULL) - { - x->lr[!r]->up = x; - } - z->lr[r] = x; -} - -/// INTERNAL USE ONLY. -/// Accepts a node and how its balance factor needs to be changed -- either +1 or -1. -/// Returns the new node to replace the old one if tree rotation took place, same node otherwise. -static inline Cavl* cavlPrivateAdjustBalance(Cavl* const x, const bool increment) -{ - CAVL_ASSERT((x != NULL) && ((x->bf >= -1) && (x->bf <= +1))); - Cavl* out = x; - const int_fast8_t new_bf = (int_fast8_t) (x->bf + (increment ? +1 : -1)); - if ((new_bf < -1) || (new_bf > 1)) - { - const bool r = new_bf < 0; // bf<0 if left-heavy --> right rotation is needed. - const int_fast8_t sign = r ? +1 : -1; // Positive if we are rotating right. - Cavl* const z = x->lr[!r]; - CAVL_ASSERT(z != NULL); // Heavy side cannot be empty. - // NOLINTNEXTLINE(clang-analyzer-core.NullDereference) - if ((z->bf * sign) <= 0) // Parent and child are heavy on the same side or the child is balanced. - { - out = z; - cavlPrivateRotate(x, r); - if (0 == z->bf) - { - x->bf = (int_fast8_t) (-sign); - z->bf = (int_fast8_t) (+sign); - } - else - { - x->bf = 0; - z->bf = 0; - } - } - else // Otherwise, the child needs to be rotated in the opposite direction first. - { - Cavl* const y = z->lr[r]; - CAVL_ASSERT(y != NULL); // Heavy side cannot be empty. - out = y; - cavlPrivateRotate(z, !r); - cavlPrivateRotate(x, r); - if ((y->bf * sign) < 0) - { - x->bf = (int_fast8_t) (+sign); - y->bf = 0; - z->bf = 0; - } - else if ((y->bf * sign) > 0) - { - x->bf = 0; - y->bf = 0; - z->bf = (int_fast8_t) (-sign); - } - else - { - x->bf = 0; - z->bf = 0; - } - } - } - else - { - x->bf = new_bf; // Balancing not needed, just update the balance factor and call it a day. - } - return out; -} - -/// INTERNAL USE ONLY. -/// Takes the culprit node (the one that is added); returns NULL or the root of the tree (possibly new one). -/// When adding a new node, set its balance factor to zero and call this function to propagate the changes upward. -static inline Cavl* cavlPrivateRetraceOnGrowth(Cavl* const added) -{ - CAVL_ASSERT((added != NULL) && (0 == added->bf)); - Cavl* c = added; // Child - Cavl* p = added->up; // Parent - while (p != NULL) - { - const bool r = p->lr[1] == c; // c is the right child of parent - CAVL_ASSERT(p->lr[r] == c); - c = cavlPrivateAdjustBalance(p, r); - p = c->up; - if (0 == c->bf) - { // The height change of the subtree made this parent perfectly balanced (as all things should be), - break; // hence, the height of the outer subtree is unchanged, so upper balance factors are unchanged. - } - } - CAVL_ASSERT(c != NULL); - return (NULL == p) ? c : NULL; // New root or nothing. -} - -static inline Cavl* cavlSearch(Cavl** const root, - void* const user_reference, - const CavlPredicate predicate, - const CavlFactory factory) -{ - Cavl* out = NULL; - if ((root != NULL) && (predicate != NULL)) - { - Cavl* up = *root; - Cavl** n = root; - while (*n != NULL) - { - const int_fast8_t cmp = predicate(user_reference, *n); - if (0 == cmp) - { - out = *n; - break; - } - up = *n; - n = &(*n)->lr[cmp > 0]; - CAVL_ASSERT((NULL == *n) || ((*n)->up == up)); - } - if (NULL == out) - { - out = (NULL == factory) ? NULL : factory(user_reference); - if (out != NULL) - { - *n = out; // Overwrite the pointer to the new node in the parent node. - out->lr[0] = NULL; - out->lr[1] = NULL; - out->up = up; - out->bf = 0; - Cavl* const rt = cavlPrivateRetraceOnGrowth(out); - if (rt != NULL) - { - *root = rt; - } - } - } - } - return out; -} - -static inline void cavlRemove(Cavl** const root, const Cavl* const node) -{ - if ((root != NULL) && (node != NULL)) - { - CAVL_ASSERT(*root != NULL); // Otherwise, the node would have to be NULL. - CAVL_ASSERT((node->up != NULL) || (node == *root)); - Cavl* p = NULL; // The lowest parent node that suffered a shortening of its subtree. - bool r = false; // Which side of the above was shortened. - // The first step is to update the topology and remember the node where to start the retracing from later. - // Balancing is not performed yet so we may end up with an unbalanced tree. - if ((node->lr[0] != NULL) && (node->lr[1] != NULL)) - { - Cavl* const re = cavlFindExtremum(node->lr[1], false); - CAVL_ASSERT((re != NULL) && (NULL == re->lr[0]) && (re->up != NULL)); - re->bf = node->bf; - re->lr[0] = node->lr[0]; - re->lr[0]->up = re; - if (re->up != node) - { - p = re->up; // Retracing starts with the ex-parent of our replacement node. - CAVL_ASSERT(p->lr[0] == re); - p->lr[0] = re->lr[1]; // Reducing the height of the left subtree here. - if (p->lr[0] != NULL) - { - p->lr[0]->up = p; - } - re->lr[1] = node->lr[1]; - re->lr[1]->up = re; - r = false; - } - else // In this case, we are reducing the height of the right subtree, so r=1. - { - p = re; // Retracing starts with the replacement node itself as we are deleting its parent. - r = true; // The right child of the replacement node remains the same so we don't bother relinking it. - } - re->up = node->up; - if (re->up != NULL) - { - re->up->lr[re->up->lr[1] == node] = re; // Replace link in the parent of node. - } - else - { - *root = re; - } - } - else // Either or both of the children are NULL. - { - p = node->up; - const bool rr = node->lr[1] != NULL; - if (node->lr[rr] != NULL) - { - node->lr[rr]->up = p; - } - if (p != NULL) - { - r = p->lr[1] == node; - p->lr[r] = node->lr[rr]; - if (p->lr[r] != NULL) - { - p->lr[r]->up = p; - } - } - else - { - *root = node->lr[rr]; - } - } - // Now that the topology is updated, perform the retracing to restore balance. We climb up adjusting the - // balance factors until we reach the root or a parent whose balance factor becomes plus/minus one, which - // means that that parent was able to absorb the balance delta; in other words, the height of the outer - // subtree is unchanged, so upper balance factors shall be kept unchanged. - if (p != NULL) - { - Cavl* c = NULL; - for (;;) - { - c = cavlPrivateAdjustBalance(p, !r); - p = c->up; - if ((c->bf != 0) || (NULL == p)) // Reached the root or the height difference is absorbed by c. - { - break; - } - r = p->lr[1] == c; - } - if (NULL == p) - { - CAVL_ASSERT(c != NULL); - *root = c; - } - } - } -} - -#ifdef __cplusplus -} -#endif diff --git a/libudpard/udpard.c b/libudpard/udpard.c index c7a7241..b6a685b 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -3,1880 +3,2492 @@ /// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. /// Author: Pavel Kirienko -#include "udpard.h" -#include "_udpard_cavl.h" +// ReSharper disable CppDFATimeOver +#include "udpard.h" +#include #include -// --------------------------------------------- BUILD CONFIGURATION --------------------------------------------- - /// Define this macro to include build configuration header. /// Usage example with CMake: "-DUDPARD_CONFIG_HEADER=\"${CMAKE_CURRENT_SOURCE_DIR}/my_udpard_config.h\"" #ifdef UDPARD_CONFIG_HEADER -# include UDPARD_CONFIG_HEADER +#include UDPARD_CONFIG_HEADER #endif /// By default, this macro resolves to the standard assert(). The user can redefine this if necessary. /// To disable assertion checks completely, make it expand into `(void)(0)`. #ifndef UDPARD_ASSERT -// Intentional violation of MISRA: inclusion not at the top of the file to eliminate unnecessary dependency on assert.h. -# include // NOSONAR // Intentional violation of MISRA: assertion macro cannot be replaced with a function definition. -# define UDPARD_ASSERT(x) assert(x) // NOSONAR +#define UDPARD_ASSERT(x) assert(x) // NOSONAR #endif -#if !defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) -# error "Unsupported language: ISO C99 or a newer version is required." +#if __STDC_VERSION__ < 201112L +// Intentional violation of MISRA: static assertion macro cannot be replaced with a function definition. +#define static_assert(x, ...) typedef char _static_assert_gl(_static_assertion_, __LINE__)[(x) ? 1 : -1] // NOSONAR +#define _static_assert_gl(a, b) _static_assert_gl_impl(a, b) // NOSONAR +// Intentional violation of MISRA: the paste operator ## cannot be avoided in this context. +#define _static_assert_gl_impl(a, b) a##b // NOSONAR #endif -// --------------------------------------------- COMMON DEFINITIONS --------------------------------------------- - -typedef uint_least8_t byte_t; ///< For compatibility with platforms where byte size is not 8 bits. - -static const uint_fast8_t ByteWidth = 8U; -static const byte_t ByteMask = 0xFFU; +#if !defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) +#error "Unsupported language: ISO C99 or a newer version is required." +#endif -#define RX_SLOT_COUNT 2 -#define TIMESTAMP_UNSET UINT64_MAX -#define FRAME_INDEX_UNSET UINT32_MAX -#define TRANSFER_ID_UNSET UINT64_MAX +#define CAVL2_T udpard_tree_t +#define CAVL2_RELATION int32_t +#define CAVL2_ASSERT(x) UDPARD_ASSERT(x) // NOSONAR +#include "cavl2.h" // NOSONAR -typedef struct -{ - enum UdpardPriority priority; - UdpardNodeID src_node_id; - UdpardNodeID dst_node_id; - uint16_t data_specifier; - UdpardTransferID transfer_id; -} TransferMetadata; +typedef unsigned char byte_t; ///< For compatibility with platforms where byte size is not 8 bits. -#define DATA_SPECIFIER_SERVICE_NOT_MESSAGE_MASK 0x8000U -#define DATA_SPECIFIER_SERVICE_REQUEST_NOT_RESPONSE_MASK 0x4000U -#define DATA_SPECIFIER_SERVICE_ID_MASK 0x3FFFU +/// Sessions will be garbage-collected after being idle for this long, along with unfinished transfers, if any. +/// Pending slots within a live session will also be reset after this timeout to avoid storing stale data indefinitely. +#define SESSION_LIFETIME (60 * MEGA) -#define HEADER_SIZE_BYTES 24U -#define HEADER_VERSION 1U -/// The frame index is a 31-bit unsigned integer. The most significant bit is used to indicate the end of transfer. -#define HEADER_FRAME_INDEX_EOT_MASK 0x80000000UL -#define HEADER_FRAME_INDEX_MAX 0x7FFFFFFFUL -#define HEADER_FRAME_INDEX_MASK HEADER_FRAME_INDEX_MAX +/// The maximum number of incoming transfers that can be in the state of incomplete reassembly simultaneously. +/// Additional transfers will replace the oldest ones. +/// This number should normally be at least as large as there are priority levels. More is fine but rarely useful. +#define RX_SLOT_COUNT UDPARD_PRIORITY_COUNT -/// The port number is defined in the Cyphal/UDP Specification. -#define UDP_PORT 9382U +/// The number of most recent transfers to keep in the history for ACK retransmission and duplicate detection. +/// Should be a power of two to allow replacement of modulo operation with a bitwise AND. +/// +/// Implementation node: we used to store bitmap windows instead of a full list of recent transfer-IDs, but they +/// were found to offer no advantage except in the perfect scenario of non-restarting senders, and an increased +/// implementation complexity (more branches, more lines of code), so they were replaced with a simple list. +/// The list works equally well given a non-contiguous transfer-ID stream, unlike the bitmap, thus more robust. +#define RX_TRANSFER_HISTORY_COUNT 32U + +/// In the ORDERED reassembly mode, with the most recently received transfer-ID N, the library will reject +/// transfers with transfer-ID less than or equal to N-ORDERING_WINDOW (modulo 2^64) as late. +/// This limit is chosen rather arbitrarily; its value does not affect the resource utilization in any way. +/// One trade-off to keep in mind is that a very large window may somewhat increase the likelihood of choosing a new +/// random transfer-ID that falls within the window, thus being rejected as late by receivers; however, given the +/// 64-bit ID space, this value will have to be extremely large to have any measurable effect on that probability. +#define RX_TRANSFER_ORDERING_WINDOW 8192U + +#define UDP_PORT 9382U +#define IPv4_MCAST_PREFIX 0xEF000000UL +static_assert((UDPARD_IPv4_SUBJECT_ID_MAX & (UDPARD_IPv4_SUBJECT_ID_MAX + 1)) == 0, + "UDPARD_IPv4_SUBJECT_ID_MAX must be one less than a power of 2"); + +#define BIG_BANG INT64_MIN +#define HEAT_DEATH INT64_MAX + +#define KILO 1000LL +#define MEGA 1000000LL + +/// Pending ack transfers expire after this long if not transmitted. +#define ACK_TX_DEADLINE MEGA + +/// The ACK message payload is structured as follows, in DSDL notation: +/// +/// uint64 topic_hash # Topic hash of the original message being acknowledged. +/// uint64 transfer_id # Transfer-ID of the original message being acknowledged. +/// # If there is any additional data not defined by the format, it must be ignored. +#define ACK_SIZE_BYTES 16U -// See Cyphal/UDP Specification, section 4.3.2.1 Endpoints. -#define SUBJECT_MULTICAST_GROUP_ADDRESS_MASK 0xEF000000UL -#define SERVICE_MULTICAST_GROUP_ADDRESS_MASK 0xEF010000UL +static size_t smaller(const size_t a, const size_t b) { return (a < b) ? a : b; } +static size_t larger(const size_t a, const size_t b) { return (a > b) ? a : b; } +static int64_t min_i64(const int64_t a, const int64_t b) { return (a < b) ? a : b; } +static int64_t max_i64(const int64_t a, const int64_t b) { return (a > b) ? a : b; } +static udpard_us_t earlier(const udpard_us_t a, const udpard_us_t b) { return min_i64(a, b); } +static udpard_us_t later(const udpard_us_t a, const udpard_us_t b) { return max_i64(a, b); } -static uint32_t makeSubjectIPGroupAddress(const UdpardPortID subject_id) +/// Two memory resources are considered identical if they share the same user pointer and the same allocation function. +/// The deallocation function is intentionally excluded from the comparison. +static bool mem_same(const udpard_mem_t a, const udpard_mem_t b) { - return SUBJECT_MULTICAST_GROUP_ADDRESS_MASK | ((uint32_t) subject_id); + return (a.context == b.context) && (a.vtable == b.vtable); } -static uint32_t makeServiceIPGroupAddress(const UdpardNodeID destination_node_id) +static void* mem_alloc(const udpard_mem_t memory, const size_t size) { - return SERVICE_MULTICAST_GROUP_ADDRESS_MASK | ((uint32_t) destination_node_id); + return memory.vtable->alloc(memory.context, size); } -static struct UdpardUDPIPEndpoint makeSubjectUDPIPEndpoint(const UdpardPortID subject_id) +static void mem_free(const udpard_mem_t memory, const size_t size, void* const data) { - return (struct UdpardUDPIPEndpoint) {.ip_address = makeSubjectIPGroupAddress(subject_id), // - .udp_port = UDP_PORT}; + memory.vtable->base.free(memory.context, size, data); } -static struct UdpardUDPIPEndpoint makeServiceUDPIPEndpoint(const UdpardNodeID destination_node_id) +static void mem_free_payload(const udpard_deleter_t memory, const udpard_bytes_mut_t payload) { - return (struct UdpardUDPIPEndpoint) {.ip_address = makeServiceIPGroupAddress(destination_node_id), - .udp_port = UDP_PORT}; + if (payload.data != NULL) { + memory.vtable->free(memory.context, payload.size, payload.data); + } } -/// Used for inserting new items into AVL trees. Refer to the documentation for cavlSearch() for details. -static struct UdpardTreeNode* avlTrivialFactory(void* const user_reference) +static byte_t* serialize_u32(byte_t* ptr, const uint32_t value) { - return (struct UdpardTreeNode*) user_reference; + for (size_t i = 0; i < sizeof(value); i++) { + *ptr++ = (byte_t)((byte_t)(value >> (i * 8U)) & 0xFFU); + } + return ptr; } -static size_t smaller(const size_t a, const size_t b) +static byte_t* serialize_u64(byte_t* ptr, const uint64_t value) { - return (a < b) ? a : b; + for (size_t i = 0; i < sizeof(value); i++) { + *ptr++ = (byte_t)((byte_t)(value >> (i * 8U)) & 0xFFU); + } + return ptr; } -static size_t larger(const size_t a, const size_t b) +static const byte_t* deserialize_u32(const byte_t* ptr, uint32_t* const out_value) { - return (a > b) ? a : b; + UDPARD_ASSERT((ptr != NULL) && (out_value != NULL)); + *out_value = 0; + for (size_t i = 0; i < sizeof(*out_value); i++) { + *out_value |= (uint32_t)((uint32_t)*ptr << (i * 8U)); // NOLINT(google-readability-casting) NOSONAR + ptr++; + } + return ptr; } -static uint32_t max32(const uint32_t a, const uint32_t b) +static const byte_t* deserialize_u64(const byte_t* ptr, uint64_t* const out_value) { - return (a > b) ? a : b; + UDPARD_ASSERT((ptr != NULL) && (out_value != NULL)); + *out_value = 0; + for (size_t i = 0; i < sizeof(*out_value); i++) { + *out_value |= ((uint64_t)*ptr << (i * 8U)); + ptr++; + } + return ptr; } -/// Returns the sign of the subtraction of the operands; zero if equal. This is useful for AVL search. -static int_fast8_t compare32(const uint32_t a, const uint32_t b) +// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling) +static void mem_zero(const size_t size, void* const data) { (void)memset(data, 0, size); } + +udpard_deleter_t udpard_make_deleter(const udpard_mem_t memory) { - int_fast8_t result = 0; - if (a > b) - { - result = +1; - } - if (a < b) - { - result = -1; - } - return result; + return (udpard_deleter_t){ .vtable = &memory.vtable->base, .context = memory.context }; } -static void* memAlloc(const struct UdpardMemoryResource memory, const size_t size) +bool udpard_is_valid_endpoint(const udpard_udpip_ep_t ep) { - UDPARD_ASSERT(memory.allocate != NULL); - return memory.allocate(memory.user_reference, size); + return (ep.port != 0) && (ep.ip != 0) && (ep.ip != UINT32_MAX); } -static void memFree(const struct UdpardMemoryResource memory, const size_t size, void* const data) +static uint16_t valid_ep_bitmap(const udpard_udpip_ep_t remote_ep[UDPARD_IFACE_COUNT_MAX]) { - UDPARD_ASSERT(memory.deallocate != NULL); - memory.deallocate(memory.user_reference, size, data); + uint16_t bitmap = 0U; + if (remote_ep != NULL) { + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + if (udpard_is_valid_endpoint(remote_ep[i])) { + bitmap |= (1U << i); + } + } + } + return bitmap; } -static void memFreePayload(const struct UdpardMemoryDeleter memory, const struct UdpardMutablePayload payload) +udpard_udpip_ep_t udpard_make_subject_endpoint(const uint32_t subject_id) { - UDPARD_ASSERT(memory.deallocate != NULL); - if (payload.data != NULL) - { - memory.deallocate(memory.user_reference, payload.size, payload.data); - } -} - -static void memZero(const size_t size, void* const data) -{ - // NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling) - (void) memset(data, 0, size); -} - -// --------------------------------------------- HEADER CRC --------------------------------------------- - -#define HEADER_CRC_INITIAL 0xFFFFU -#define HEADER_CRC_RESIDUE 0x0000U -#define HEADER_CRC_SIZE_BYTES 2U - -static uint16_t headerCRCAddByte(const uint16_t crc, const byte_t byte) -{ - static const uint16_t Table[256] = { - 0x0000U, 0x1021U, 0x2042U, 0x3063U, 0x4084U, 0x50A5U, 0x60C6U, 0x70E7U, 0x8108U, 0x9129U, 0xA14AU, 0xB16BU, - 0xC18CU, 0xD1ADU, 0xE1CEU, 0xF1EFU, 0x1231U, 0x0210U, 0x3273U, 0x2252U, 0x52B5U, 0x4294U, 0x72F7U, 0x62D6U, - 0x9339U, 0x8318U, 0xB37BU, 0xA35AU, 0xD3BDU, 0xC39CU, 0xF3FFU, 0xE3DEU, 0x2462U, 0x3443U, 0x0420U, 0x1401U, - 0x64E6U, 0x74C7U, 0x44A4U, 0x5485U, 0xA56AU, 0xB54BU, 0x8528U, 0x9509U, 0xE5EEU, 0xF5CFU, 0xC5ACU, 0xD58DU, - 0x3653U, 0x2672U, 0x1611U, 0x0630U, 0x76D7U, 0x66F6U, 0x5695U, 0x46B4U, 0xB75BU, 0xA77AU, 0x9719U, 0x8738U, - 0xF7DFU, 0xE7FEU, 0xD79DU, 0xC7BCU, 0x48C4U, 0x58E5U, 0x6886U, 0x78A7U, 0x0840U, 0x1861U, 0x2802U, 0x3823U, - 0xC9CCU, 0xD9EDU, 0xE98EU, 0xF9AFU, 0x8948U, 0x9969U, 0xA90AU, 0xB92BU, 0x5AF5U, 0x4AD4U, 0x7AB7U, 0x6A96U, - 0x1A71U, 0x0A50U, 0x3A33U, 0x2A12U, 0xDBFDU, 0xCBDCU, 0xFBBFU, 0xEB9EU, 0x9B79U, 0x8B58U, 0xBB3BU, 0xAB1AU, - 0x6CA6U, 0x7C87U, 0x4CE4U, 0x5CC5U, 0x2C22U, 0x3C03U, 0x0C60U, 0x1C41U, 0xEDAEU, 0xFD8FU, 0xCDECU, 0xDDCDU, - 0xAD2AU, 0xBD0BU, 0x8D68U, 0x9D49U, 0x7E97U, 0x6EB6U, 0x5ED5U, 0x4EF4U, 0x3E13U, 0x2E32U, 0x1E51U, 0x0E70U, - 0xFF9FU, 0xEFBEU, 0xDFDDU, 0xCFFCU, 0xBF1BU, 0xAF3AU, 0x9F59U, 0x8F78U, 0x9188U, 0x81A9U, 0xB1CAU, 0xA1EBU, - 0xD10CU, 0xC12DU, 0xF14EU, 0xE16FU, 0x1080U, 0x00A1U, 0x30C2U, 0x20E3U, 0x5004U, 0x4025U, 0x7046U, 0x6067U, - 0x83B9U, 0x9398U, 0xA3FBU, 0xB3DAU, 0xC33DU, 0xD31CU, 0xE37FU, 0xF35EU, 0x02B1U, 0x1290U, 0x22F3U, 0x32D2U, - 0x4235U, 0x5214U, 0x6277U, 0x7256U, 0xB5EAU, 0xA5CBU, 0x95A8U, 0x8589U, 0xF56EU, 0xE54FU, 0xD52CU, 0xC50DU, - 0x34E2U, 0x24C3U, 0x14A0U, 0x0481U, 0x7466U, 0x6447U, 0x5424U, 0x4405U, 0xA7DBU, 0xB7FAU, 0x8799U, 0x97B8U, - 0xE75FU, 0xF77EU, 0xC71DU, 0xD73CU, 0x26D3U, 0x36F2U, 0x0691U, 0x16B0U, 0x6657U, 0x7676U, 0x4615U, 0x5634U, - 0xD94CU, 0xC96DU, 0xF90EU, 0xE92FU, 0x99C8U, 0x89E9U, 0xB98AU, 0xA9ABU, 0x5844U, 0x4865U, 0x7806U, 0x6827U, - 0x18C0U, 0x08E1U, 0x3882U, 0x28A3U, 0xCB7DU, 0xDB5CU, 0xEB3FU, 0xFB1EU, 0x8BF9U, 0x9BD8U, 0xABBBU, 0xBB9AU, - 0x4A75U, 0x5A54U, 0x6A37U, 0x7A16U, 0x0AF1U, 0x1AD0U, 0x2AB3U, 0x3A92U, 0xFD2EU, 0xED0FU, 0xDD6CU, 0xCD4DU, - 0xBDAAU, 0xAD8BU, 0x9DE8U, 0x8DC9U, 0x7C26U, 0x6C07U, 0x5C64U, 0x4C45U, 0x3CA2U, 0x2C83U, 0x1CE0U, 0x0CC1U, - 0xEF1FU, 0xFF3EU, 0xCF5DU, 0xDF7CU, 0xAF9BU, 0xBFBAU, 0x8FD9U, 0x9FF8U, 0x6E17U, 0x7E36U, 0x4E55U, 0x5E74U, - 0x2E93U, 0x3EB2U, 0x0ED1U, 0x1EF0U, - }; - return (uint16_t) ((uint16_t) (crc << ByteWidth) ^ - Table[(uint16_t) ((uint16_t) (crc >> ByteWidth) ^ byte) & ByteMask]); + return (udpard_udpip_ep_t){ .ip = IPv4_MCAST_PREFIX | (subject_id & UDPARD_IPv4_SUBJECT_ID_MAX), .port = UDP_PORT }; } -static uint16_t headerCRCCompute(const size_t size, const void* const data) +typedef struct { - UDPARD_ASSERT((data != NULL) || (size == 0U)); - uint16_t out = HEADER_CRC_INITIAL; - const byte_t* p = (const byte_t*) data; - for (size_t i = 0; i < size; i++) - { - out = headerCRCAddByte(out, *p); - ++p; + const udpard_bytes_scattered_t* cursor; ///< Initially points at the head. + size_t position; ///< Position within the current fragment, initially zero. +} bytes_scattered_reader_t; + +/// Sequentially reads data from a scattered byte array into a contiguous destination buffer. +/// Requires that the total amount of read data does not exceed the total size of the scattered array. +static void bytes_scattered_read(bytes_scattered_reader_t* const reader, const size_t size, void* const destination) +{ + UDPARD_ASSERT((reader != NULL) && (reader->cursor != NULL) && (destination != NULL)); + byte_t* ptr = (byte_t*)destination; + size_t remaining = size; + while (remaining > 0U) { + UDPARD_ASSERT(reader->position <= reader->cursor->bytes.size); + while (reader->position == reader->cursor->bytes.size) { // Advance while skipping empty fragments. + reader->position = 0U; + reader->cursor = reader->cursor->next; + UDPARD_ASSERT(reader->cursor != NULL); + } + UDPARD_ASSERT(reader->position < reader->cursor->bytes.size); + const size_t progress = smaller(remaining, reader->cursor->bytes.size - reader->position); + UDPARD_ASSERT((progress > 0U) && (progress <= remaining)); + UDPARD_ASSERT((reader->position + progress) <= reader->cursor->bytes.size); + // NOLINTNEXTLINE(*DeprecatedOrUnsafeBufferHandling) + (void)memcpy(ptr, ((const byte_t*)reader->cursor->bytes.data) + reader->position, progress); + ptr += progress; + remaining -= progress; + reader->position += progress; } - return out; } -// --------------------------------------------- TRANSFER CRC --------------------------------------------- - -#define TRANSFER_CRC_INITIAL 0xFFFFFFFFUL -#define TRANSFER_CRC_OUTPUT_XOR 0xFFFFFFFFUL -#define TRANSFER_CRC_RESIDUE_BEFORE_OUTPUT_XOR 0xB798B438UL -#define TRANSFER_CRC_RESIDUE_AFTER_OUTPUT_XOR (TRANSFER_CRC_RESIDUE_BEFORE_OUTPUT_XOR ^ TRANSFER_CRC_OUTPUT_XOR) -#define TRANSFER_CRC_SIZE_BYTES 4U - -static uint32_t transferCRCAddByte(const uint32_t crc, const byte_t byte) -{ - static const uint32_t Table[256] = { - 0x00000000UL, 0xF26B8303UL, 0xE13B70F7UL, 0x1350F3F4UL, 0xC79A971FUL, 0x35F1141CUL, 0x26A1E7E8UL, 0xD4CA64EBUL, - 0x8AD958CFUL, 0x78B2DBCCUL, 0x6BE22838UL, 0x9989AB3BUL, 0x4D43CFD0UL, 0xBF284CD3UL, 0xAC78BF27UL, 0x5E133C24UL, - 0x105EC76FUL, 0xE235446CUL, 0xF165B798UL, 0x030E349BUL, 0xD7C45070UL, 0x25AFD373UL, 0x36FF2087UL, 0xC494A384UL, - 0x9A879FA0UL, 0x68EC1CA3UL, 0x7BBCEF57UL, 0x89D76C54UL, 0x5D1D08BFUL, 0xAF768BBCUL, 0xBC267848UL, 0x4E4DFB4BUL, - 0x20BD8EDEUL, 0xD2D60DDDUL, 0xC186FE29UL, 0x33ED7D2AUL, 0xE72719C1UL, 0x154C9AC2UL, 0x061C6936UL, 0xF477EA35UL, - 0xAA64D611UL, 0x580F5512UL, 0x4B5FA6E6UL, 0xB93425E5UL, 0x6DFE410EUL, 0x9F95C20DUL, 0x8CC531F9UL, 0x7EAEB2FAUL, - 0x30E349B1UL, 0xC288CAB2UL, 0xD1D83946UL, 0x23B3BA45UL, 0xF779DEAEUL, 0x05125DADUL, 0x1642AE59UL, 0xE4292D5AUL, - 0xBA3A117EUL, 0x4851927DUL, 0x5B016189UL, 0xA96AE28AUL, 0x7DA08661UL, 0x8FCB0562UL, 0x9C9BF696UL, 0x6EF07595UL, - 0x417B1DBCUL, 0xB3109EBFUL, 0xA0406D4BUL, 0x522BEE48UL, 0x86E18AA3UL, 0x748A09A0UL, 0x67DAFA54UL, 0x95B17957UL, - 0xCBA24573UL, 0x39C9C670UL, 0x2A993584UL, 0xD8F2B687UL, 0x0C38D26CUL, 0xFE53516FUL, 0xED03A29BUL, 0x1F682198UL, - 0x5125DAD3UL, 0xA34E59D0UL, 0xB01EAA24UL, 0x42752927UL, 0x96BF4DCCUL, 0x64D4CECFUL, 0x77843D3BUL, 0x85EFBE38UL, - 0xDBFC821CUL, 0x2997011FUL, 0x3AC7F2EBUL, 0xC8AC71E8UL, 0x1C661503UL, 0xEE0D9600UL, 0xFD5D65F4UL, 0x0F36E6F7UL, - 0x61C69362UL, 0x93AD1061UL, 0x80FDE395UL, 0x72966096UL, 0xA65C047DUL, 0x5437877EUL, 0x4767748AUL, 0xB50CF789UL, - 0xEB1FCBADUL, 0x197448AEUL, 0x0A24BB5AUL, 0xF84F3859UL, 0x2C855CB2UL, 0xDEEEDFB1UL, 0xCDBE2C45UL, 0x3FD5AF46UL, - 0x7198540DUL, 0x83F3D70EUL, 0x90A324FAUL, 0x62C8A7F9UL, 0xB602C312UL, 0x44694011UL, 0x5739B3E5UL, 0xA55230E6UL, - 0xFB410CC2UL, 0x092A8FC1UL, 0x1A7A7C35UL, 0xE811FF36UL, 0x3CDB9BDDUL, 0xCEB018DEUL, 0xDDE0EB2AUL, 0x2F8B6829UL, - 0x82F63B78UL, 0x709DB87BUL, 0x63CD4B8FUL, 0x91A6C88CUL, 0x456CAC67UL, 0xB7072F64UL, 0xA457DC90UL, 0x563C5F93UL, - 0x082F63B7UL, 0xFA44E0B4UL, 0xE9141340UL, 0x1B7F9043UL, 0xCFB5F4A8UL, 0x3DDE77ABUL, 0x2E8E845FUL, 0xDCE5075CUL, - 0x92A8FC17UL, 0x60C37F14UL, 0x73938CE0UL, 0x81F80FE3UL, 0x55326B08UL, 0xA759E80BUL, 0xB4091BFFUL, 0x466298FCUL, - 0x1871A4D8UL, 0xEA1A27DBUL, 0xF94AD42FUL, 0x0B21572CUL, 0xDFEB33C7UL, 0x2D80B0C4UL, 0x3ED04330UL, 0xCCBBC033UL, - 0xA24BB5A6UL, 0x502036A5UL, 0x4370C551UL, 0xB11B4652UL, 0x65D122B9UL, 0x97BAA1BAUL, 0x84EA524EUL, 0x7681D14DUL, - 0x2892ED69UL, 0xDAF96E6AUL, 0xC9A99D9EUL, 0x3BC21E9DUL, 0xEF087A76UL, 0x1D63F975UL, 0x0E330A81UL, 0xFC588982UL, - 0xB21572C9UL, 0x407EF1CAUL, 0x532E023EUL, 0xA145813DUL, 0x758FE5D6UL, 0x87E466D5UL, 0x94B49521UL, 0x66DF1622UL, - 0x38CC2A06UL, 0xCAA7A905UL, 0xD9F75AF1UL, 0x2B9CD9F2UL, 0xFF56BD19UL, 0x0D3D3E1AUL, 0x1E6DCDEEUL, 0xEC064EEDUL, - 0xC38D26C4UL, 0x31E6A5C7UL, 0x22B65633UL, 0xD0DDD530UL, 0x0417B1DBUL, 0xF67C32D8UL, 0xE52CC12CUL, 0x1747422FUL, - 0x49547E0BUL, 0xBB3FFD08UL, 0xA86F0EFCUL, 0x5A048DFFUL, 0x8ECEE914UL, 0x7CA56A17UL, 0x6FF599E3UL, 0x9D9E1AE0UL, - 0xD3D3E1ABUL, 0x21B862A8UL, 0x32E8915CUL, 0xC083125FUL, 0x144976B4UL, 0xE622F5B7UL, 0xF5720643UL, 0x07198540UL, - 0x590AB964UL, 0xAB613A67UL, 0xB831C993UL, 0x4A5A4A90UL, 0x9E902E7BUL, 0x6CFBAD78UL, 0x7FAB5E8CUL, 0x8DC0DD8FUL, - 0xE330A81AUL, 0x115B2B19UL, 0x020BD8EDUL, 0xF0605BEEUL, 0x24AA3F05UL, 0xD6C1BC06UL, 0xC5914FF2UL, 0x37FACCF1UL, - 0x69E9F0D5UL, 0x9B8273D6UL, 0x88D28022UL, 0x7AB90321UL, 0xAE7367CAUL, 0x5C18E4C9UL, 0x4F48173DUL, 0xBD23943EUL, - 0xF36E6F75UL, 0x0105EC76UL, 0x12551F82UL, 0xE03E9C81UL, 0x34F4F86AUL, 0xC69F7B69UL, 0xD5CF889DUL, 0x27A40B9EUL, - 0x79B737BAUL, 0x8BDCB4B9UL, 0x988C474DUL, 0x6AE7C44EUL, 0xBE2DA0A5UL, 0x4C4623A6UL, 0x5F16D052UL, 0xAD7D5351UL, - }; - return (crc >> ByteWidth) ^ Table[byte ^ (crc & ByteMask)]; -} - -/// Do not forget to apply the output XOR when done, or use transferCRCCompute(). -static uint32_t transferCRCAdd(const uint32_t crc, const size_t size, const void* const data) +static size_t bytes_scattered_size(const udpard_bytes_scattered_t head) { - UDPARD_ASSERT((data != NULL) || (size == 0U)); - uint32_t out = crc; - const byte_t* p = (const byte_t*) data; - for (size_t i = 0; i < size; i++) - { - out = transferCRCAddByte(out, *p); - ++p; + size_t size = head.bytes.size; + const udpard_bytes_scattered_t* current = head.next; + while (current != NULL) { + size += current->bytes.size; + current = current->next; } - return out; + return size; } -static uint32_t transferCRCCompute(const size_t size, const void* const data) +/// We require that the fragment tree does not contain fully-contained or equal-range fragments. This implies that no +/// two fragments have the same offset, and that fragments ordered by offset also order by their ends. +static int32_t cavl_compare_fragment_offset(const void* const user, const udpard_tree_t* const node) { - return transferCRCAdd(TRANSFER_CRC_INITIAL, size, data) ^ TRANSFER_CRC_OUTPUT_XOR; + const size_t u = *(const size_t*)user; + const size_t v = ((const udpard_fragment_t*)node)->offset; // clang-format off + if (u < v) { return -1; } + if (u > v) { return +1; } + return 0; // clang-format on } - -// ===================================================================================================================== -// ================================================= TX PIPELINE ================================================= -// ===================================================================================================================== - -/// Chain of TX frames prepared for insertion into a TX queue. -typedef struct +static int32_t cavl_compare_fragment_end(const void* const user, const udpard_tree_t* const node) { - struct UdpardTxItem* head; - struct UdpardTxItem* tail; - size_t count; -} TxChain; + const size_t u = *(const size_t*)user; + const udpard_fragment_t* const f = (const udpard_fragment_t*)node; + const size_t v = f->offset + f->view.size; // clang-format off + if (u < v) { return -1; } + if (u > v) { return +1; } + return 0; // clang-format on +} -static bool txValidateMemoryResources(const struct UdpardTxMemoryResources memory) +// NOLINTNEXTLINE(misc-no-recursion) +void udpard_fragment_free_all(udpard_fragment_t* const frag, const udpard_deleter_t fragment_deleter) { - return (memory.fragment.allocate != NULL) && (memory.fragment.deallocate != NULL) && - (memory.payload.allocate != NULL) && (memory.payload.deallocate != NULL); + if (frag != NULL) { + // Descend the tree + for (size_t i = 0; i < 2; i++) { + if (frag->index_offset.lr[i] != NULL) { + frag->index_offset.lr[i]->up = NULL; // Prevent backtrack ascension from this branch + udpard_fragment_free_all((udpard_fragment_t*)frag->index_offset.lr[i], fragment_deleter); + frag->index_offset.lr[i] = NULL; // Avoid dangly pointers even if we're headed for imminent destruction + } + } + // Delete this fragment + udpard_fragment_t* const parent = (udpard_fragment_t*)frag->index_offset.up; + mem_free_payload(frag->payload_deleter, frag->origin); + fragment_deleter.vtable->free(fragment_deleter.context, sizeof(udpard_fragment_t), frag); + // Ascend the tree. + if (parent != NULL) { + parent->index_offset.lr[parent->index_offset.lr[1] == (udpard_tree_t*)frag] = NULL; + udpard_fragment_free_all(parent, fragment_deleter); // tail call + } + } } -static struct UdpardTxItem* txNewItem(const struct UdpardTxMemoryResources memory, - const uint_least8_t dscp_value_per_priority[UDPARD_PRIORITY_MAX + 1U], - const UdpardMicrosecond deadline_usec, - const enum UdpardPriority priority, - const struct UdpardUDPIPEndpoint endpoint, - const size_t datagram_payload_size, - void* const user_transfer_reference) +udpard_fragment_t* udpard_fragment_seek(const udpard_fragment_t* frag, const size_t offset) { - struct UdpardTxItem* out = memAlloc(memory.fragment, sizeof(struct UdpardTxItem)); - if (out != NULL) - { - // No tree linkage by default. - out->base.up = NULL; - out->base.lr[0] = NULL; - out->base.lr[1] = NULL; - out->base.bf = 0; - // Init metadata. - out->priority = priority; - out->next_in_transfer = NULL; // Last by default. - out->deadline_usec = deadline_usec; - UDPARD_ASSERT(priority <= UDPARD_PRIORITY_MAX); - out->dscp = dscp_value_per_priority[priority]; - out->destination = endpoint; - out->user_transfer_reference = user_transfer_reference; - - void* const payload_data = memAlloc(memory.payload, datagram_payload_size); - if (NULL != payload_data) - { - out->datagram_payload.data = payload_data; - out->datagram_payload.size = datagram_payload_size; + if (frag != NULL) { + while (frag->index_offset.up != NULL) { // Only if the given node is not already the root. + frag = (const udpard_fragment_t*)frag->index_offset.up; } - else - { - memFree(memory.fragment, sizeof(struct UdpardTxItem), out); - out = NULL; + if (offset == 0) { // Common fast path. + return (udpard_fragment_t*)cavl2_min((udpard_tree_t*)frag); + } + udpard_fragment_t* const f = + (udpard_fragment_t*)cavl2_predecessor((udpard_tree_t*)frag, &offset, &cavl_compare_fragment_offset); + if ((f != NULL) && ((f->offset + f->view.size) > offset)) { + UDPARD_ASSERT(f->offset <= offset); + return f; } } - return out; + return NULL; } -/// Frames with identical weight are processed in the FIFO order. -/// Frames with higher weight compare smaller (i.e., put on the left side of the tree). -static int_fast8_t txAVLPredicate(void* const user_reference, // NOSONAR Cavl API requires pointer to non-const. - const struct UdpardTreeNode* const node) +udpard_fragment_t* udpard_fragment_next(const udpard_fragment_t* frag) { - const struct UdpardTxItem* const target = (const struct UdpardTxItem*) user_reference; - const struct UdpardTxItem* const other = (const struct UdpardTxItem*) (const void*) node; - UDPARD_ASSERT((target != NULL) && (other != NULL)); - return (target->priority >= other->priority) ? +1 : -1; + return (frag != NULL) ? ((udpard_fragment_t*)cavl2_next_greater((udpard_tree_t*)frag)) : NULL; } -/// The primitive serialization functions are endian-agnostic. -static byte_t* txSerializeU16(byte_t* const destination_buffer, const uint16_t value) +size_t udpard_fragment_gather(const udpard_fragment_t** cursor, + const size_t offset, + const size_t size, + void* const destination) { - byte_t* ptr = destination_buffer; - *ptr++ = (byte_t) (value & ByteMask); - *ptr++ = (byte_t) ((byte_t) (value >> ByteWidth) & ByteMask); - return ptr; -} + size_t copied = 0; + if ((cursor != NULL) && (*cursor != NULL) && (destination != NULL)) { + const size_t end_offset = (*cursor)->offset + (*cursor)->view.size; + const udpard_fragment_t* f = NULL; + if ((offset < (*cursor)->offset) || (offset > end_offset)) { + f = udpard_fragment_seek(*cursor, offset); + } else if (offset == end_offset) { // Common case during sequential access. + f = udpard_fragment_next(*cursor); + } else { + f = *cursor; + } + if ((f != NULL) && (size > 0U)) { + const udpard_fragment_t* last = f; + size_t pos = offset; + byte_t* const out = (byte_t*)destination; + while ((f != NULL) && (copied < size)) { // Copy contiguous fragments starting at the requested offset. + UDPARD_ASSERT(f->offset <= pos); + UDPARD_ASSERT(pos < (f->offset + f->view.size)); + UDPARD_ASSERT(f->view.data != NULL); + const size_t bias = pos - f->offset; + const size_t to_copy = smaller(f->view.size - bias, size - copied); + // NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling) + (void)memcpy(out + copied, ((const byte_t*)f->view.data) + bias, to_copy); + copied += to_copy; + pos += to_copy; + last = f; + if (copied < size) { + f = udpard_fragment_next(f); + UDPARD_ASSERT((f == NULL) || (f->offset == pos)); + } + } + *cursor = last; // Keep iterator non-NULL. + } + UDPARD_ASSERT(NULL != *cursor); + } + return copied; +} + +// --------------------------------------------- CRC --------------------------------------------- + +#define CRC_INITIAL 0xFFFFFFFFUL +#define CRC_OUTPUT_XOR 0xFFFFFFFFUL +#define CRC_RESIDUE_BEFORE_OUTPUT_XOR 0xB798B438UL +#define CRC_RESIDUE_AFTER_OUTPUT_XOR (CRC_RESIDUE_BEFORE_OUTPUT_XOR ^ CRC_OUTPUT_XOR) +#define CRC_SIZE_BYTES 4U + +static const uint32_t crc_table[256] = { + 0x00000000UL, 0xF26B8303UL, 0xE13B70F7UL, 0x1350F3F4UL, 0xC79A971FUL, 0x35F1141CUL, 0x26A1E7E8UL, 0xD4CA64EBUL, + 0x8AD958CFUL, 0x78B2DBCCUL, 0x6BE22838UL, 0x9989AB3BUL, 0x4D43CFD0UL, 0xBF284CD3UL, 0xAC78BF27UL, 0x5E133C24UL, + 0x105EC76FUL, 0xE235446CUL, 0xF165B798UL, 0x030E349BUL, 0xD7C45070UL, 0x25AFD373UL, 0x36FF2087UL, 0xC494A384UL, + 0x9A879FA0UL, 0x68EC1CA3UL, 0x7BBCEF57UL, 0x89D76C54UL, 0x5D1D08BFUL, 0xAF768BBCUL, 0xBC267848UL, 0x4E4DFB4BUL, + 0x20BD8EDEUL, 0xD2D60DDDUL, 0xC186FE29UL, 0x33ED7D2AUL, 0xE72719C1UL, 0x154C9AC2UL, 0x061C6936UL, 0xF477EA35UL, + 0xAA64D611UL, 0x580F5512UL, 0x4B5FA6E6UL, 0xB93425E5UL, 0x6DFE410EUL, 0x9F95C20DUL, 0x8CC531F9UL, 0x7EAEB2FAUL, + 0x30E349B1UL, 0xC288CAB2UL, 0xD1D83946UL, 0x23B3BA45UL, 0xF779DEAEUL, 0x05125DADUL, 0x1642AE59UL, 0xE4292D5AUL, + 0xBA3A117EUL, 0x4851927DUL, 0x5B016189UL, 0xA96AE28AUL, 0x7DA08661UL, 0x8FCB0562UL, 0x9C9BF696UL, 0x6EF07595UL, + 0x417B1DBCUL, 0xB3109EBFUL, 0xA0406D4BUL, 0x522BEE48UL, 0x86E18AA3UL, 0x748A09A0UL, 0x67DAFA54UL, 0x95B17957UL, + 0xCBA24573UL, 0x39C9C670UL, 0x2A993584UL, 0xD8F2B687UL, 0x0C38D26CUL, 0xFE53516FUL, 0xED03A29BUL, 0x1F682198UL, + 0x5125DAD3UL, 0xA34E59D0UL, 0xB01EAA24UL, 0x42752927UL, 0x96BF4DCCUL, 0x64D4CECFUL, 0x77843D3BUL, 0x85EFBE38UL, + 0xDBFC821CUL, 0x2997011FUL, 0x3AC7F2EBUL, 0xC8AC71E8UL, 0x1C661503UL, 0xEE0D9600UL, 0xFD5D65F4UL, 0x0F36E6F7UL, + 0x61C69362UL, 0x93AD1061UL, 0x80FDE395UL, 0x72966096UL, 0xA65C047DUL, 0x5437877EUL, 0x4767748AUL, 0xB50CF789UL, + 0xEB1FCBADUL, 0x197448AEUL, 0x0A24BB5AUL, 0xF84F3859UL, 0x2C855CB2UL, 0xDEEEDFB1UL, 0xCDBE2C45UL, 0x3FD5AF46UL, + 0x7198540DUL, 0x83F3D70EUL, 0x90A324FAUL, 0x62C8A7F9UL, 0xB602C312UL, 0x44694011UL, 0x5739B3E5UL, 0xA55230E6UL, + 0xFB410CC2UL, 0x092A8FC1UL, 0x1A7A7C35UL, 0xE811FF36UL, 0x3CDB9BDDUL, 0xCEB018DEUL, 0xDDE0EB2AUL, 0x2F8B6829UL, + 0x82F63B78UL, 0x709DB87BUL, 0x63CD4B8FUL, 0x91A6C88CUL, 0x456CAC67UL, 0xB7072F64UL, 0xA457DC90UL, 0x563C5F93UL, + 0x082F63B7UL, 0xFA44E0B4UL, 0xE9141340UL, 0x1B7F9043UL, 0xCFB5F4A8UL, 0x3DDE77ABUL, 0x2E8E845FUL, 0xDCE5075CUL, + 0x92A8FC17UL, 0x60C37F14UL, 0x73938CE0UL, 0x81F80FE3UL, 0x55326B08UL, 0xA759E80BUL, 0xB4091BFFUL, 0x466298FCUL, + 0x1871A4D8UL, 0xEA1A27DBUL, 0xF94AD42FUL, 0x0B21572CUL, 0xDFEB33C7UL, 0x2D80B0C4UL, 0x3ED04330UL, 0xCCBBC033UL, + 0xA24BB5A6UL, 0x502036A5UL, 0x4370C551UL, 0xB11B4652UL, 0x65D122B9UL, 0x97BAA1BAUL, 0x84EA524EUL, 0x7681D14DUL, + 0x2892ED69UL, 0xDAF96E6AUL, 0xC9A99D9EUL, 0x3BC21E9DUL, 0xEF087A76UL, 0x1D63F975UL, 0x0E330A81UL, 0xFC588982UL, + 0xB21572C9UL, 0x407EF1CAUL, 0x532E023EUL, 0xA145813DUL, 0x758FE5D6UL, 0x87E466D5UL, 0x94B49521UL, 0x66DF1622UL, + 0x38CC2A06UL, 0xCAA7A905UL, 0xD9F75AF1UL, 0x2B9CD9F2UL, 0xFF56BD19UL, 0x0D3D3E1AUL, 0x1E6DCDEEUL, 0xEC064EEDUL, + 0xC38D26C4UL, 0x31E6A5C7UL, 0x22B65633UL, 0xD0DDD530UL, 0x0417B1DBUL, 0xF67C32D8UL, 0xE52CC12CUL, 0x1747422FUL, + 0x49547E0BUL, 0xBB3FFD08UL, 0xA86F0EFCUL, 0x5A048DFFUL, 0x8ECEE914UL, 0x7CA56A17UL, 0x6FF599E3UL, 0x9D9E1AE0UL, + 0xD3D3E1ABUL, 0x21B862A8UL, 0x32E8915CUL, 0xC083125FUL, 0x144976B4UL, 0xE622F5B7UL, 0xF5720643UL, 0x07198540UL, + 0x590AB964UL, 0xAB613A67UL, 0xB831C993UL, 0x4A5A4A90UL, 0x9E902E7BUL, 0x6CFBAD78UL, 0x7FAB5E8CUL, 0x8DC0DD8FUL, + 0xE330A81AUL, 0x115B2B19UL, 0x020BD8EDUL, 0xF0605BEEUL, 0x24AA3F05UL, 0xD6C1BC06UL, 0xC5914FF2UL, 0x37FACCF1UL, + 0x69E9F0D5UL, 0x9B8273D6UL, 0x88D28022UL, 0x7AB90321UL, 0xAE7367CAUL, 0x5C18E4C9UL, 0x4F48173DUL, 0xBD23943EUL, + 0xF36E6F75UL, 0x0105EC76UL, 0x12551F82UL, 0xE03E9C81UL, 0x34F4F86AUL, 0xC69F7B69UL, 0xD5CF889DUL, 0x27A40B9EUL, + 0x79B737BAUL, 0x8BDCB4B9UL, 0x988C474DUL, 0x6AE7C44EUL, 0xBE2DA0A5UL, 0x4C4623A6UL, 0x5F16D052UL, 0xAD7D5351UL, +}; -static byte_t* txSerializeU32(byte_t* const destination_buffer, const uint32_t value) +/// Do not forget to apply the output XOR when done, or use crc_full(). +static uint32_t crc_add(uint32_t crc, const size_t n_bytes, const void* const data) { - byte_t* ptr = destination_buffer; - for (size_t i = 0; i < sizeof(value); i++) // We sincerely hope that the compiler will use memcpy. - { - *ptr++ = (byte_t) ((byte_t) (value >> (i * ByteWidth)) & ByteMask); + UDPARD_ASSERT((data != NULL) || (n_bytes == 0U)); + const byte_t* p = (const byte_t*)data; + for (size_t i = 0; i < n_bytes; i++) { + crc = (crc >> 8U) ^ crc_table[(*p++) ^ (crc & 0xFFU)]; } - return ptr; + return crc; } -static byte_t* txSerializeU64(byte_t* const destination_buffer, const uint64_t value) +static uint32_t crc_full(const size_t n_bytes, const void* const data) { - byte_t* ptr = destination_buffer; - for (size_t i = 0; i < sizeof(value); i++) // We sincerely hope that the compiler will use memcpy. - { - *ptr++ = (byte_t) ((byte_t) (value >> (i * ByteWidth)) & ByteMask); - } - return ptr; + return crc_add(CRC_INITIAL, n_bytes, data) ^ CRC_OUTPUT_XOR; } -static byte_t* txSerializeHeader(byte_t* const destination_buffer, - const TransferMetadata meta, - const uint32_t frame_index, - const bool end_of_transfer) -{ - byte_t* ptr = destination_buffer; - *ptr++ = HEADER_VERSION; - *ptr++ = (byte_t) meta.priority; - ptr = txSerializeU16(ptr, meta.src_node_id); - ptr = txSerializeU16(ptr, meta.dst_node_id); - ptr = txSerializeU16(ptr, meta.data_specifier); - ptr = txSerializeU64(ptr, meta.transfer_id); - UDPARD_ASSERT((frame_index + 0UL) <= HEADER_FRAME_INDEX_MAX); // +0UL is to avoid a compiler warning. - ptr = txSerializeU32(ptr, frame_index | (end_of_transfer ? HEADER_FRAME_INDEX_EOT_MASK : 0U)); - ptr = txSerializeU16(ptr, 0); // opaque user data - // Header CRC in the big endian format. Optimization prospect: the header up to frame_index is constant in - // multi-frame transfers, so we don't really need to recompute the CRC from scratch per frame. - const uint16_t crc = headerCRCCompute(HEADER_SIZE_BYTES - HEADER_CRC_SIZE_BYTES, destination_buffer); - *ptr++ = (byte_t) ((byte_t) (crc >> ByteWidth) & ByteMask); - *ptr++ = (byte_t) (crc & ByteMask); - UDPARD_ASSERT(ptr == (destination_buffer + HEADER_SIZE_BYTES)); - return ptr; -} +// --------------------------------------------- LIST CONTAINER --------------------------------------------- -/// Produces a chain of Tx queue items for later insertion into the Tx queue. The tail is NULL if OOM. -/// The caller is responsible for freeing the memory allocated for the chain. -static TxChain txMakeChain(const struct UdpardTxMemoryResources memory, - const uint_least8_t dscp_value_per_priority[UDPARD_PRIORITY_MAX + 1U], - const size_t mtu, - const UdpardMicrosecond deadline_usec, - const TransferMetadata meta, - const struct UdpardUDPIPEndpoint endpoint, - const struct UdpardPayload payload, - void* const user_transfer_reference) +static bool is_listed(const udpard_list_t* const list, const udpard_listed_t* const member) { - UDPARD_ASSERT(mtu > 0); - UDPARD_ASSERT((payload.data != NULL) || (payload.size == 0U)); - const size_t payload_size_with_crc = payload.size + TRANSFER_CRC_SIZE_BYTES; - byte_t crc_bytes[TRANSFER_CRC_SIZE_BYTES]; - txSerializeU32(crc_bytes, transferCRCCompute(payload.size, payload.data)); - TxChain out = {NULL, NULL, 0}; - size_t offset = 0U; - while (offset < payload_size_with_crc) - { - struct UdpardTxItem* const item = txNewItem(memory, - dscp_value_per_priority, - deadline_usec, - meta.priority, - endpoint, - smaller(payload_size_with_crc - offset, mtu) + HEADER_SIZE_BYTES, - user_transfer_reference); - if (NULL == out.head) - { - out.head = item; - } - else - { - // C std, 6.7.2.1.15: A pointer to a structure object <...> points to its initial member, and vice versa. - // Can't just read tqi->base because tqi may be NULL; https://github.com/OpenCyphal/libcanard/issues/203. - out.tail->next_in_transfer = item; - } - out.tail = item; - if (NULL == out.tail) - { - break; - } - const bool last = (payload_size_with_crc - offset) <= mtu; - byte_t* const dst_buffer = item->datagram_payload.data; - byte_t* write_ptr = txSerializeHeader(dst_buffer, meta, (uint32_t) out.count, last); - if (offset < payload.size) - { - const size_t progress = smaller(payload.size - offset, mtu); - // NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling) - (void) memcpy(write_ptr, ((const byte_t*) payload.data) + offset, progress); - offset += progress; - write_ptr += progress; - UDPARD_ASSERT(offset <= payload.size); - UDPARD_ASSERT((!last) || (offset == payload.size)); - } - if (offset >= payload.size) - { - const size_t crc_offset = offset - payload.size; - UDPARD_ASSERT(crc_offset < TRANSFER_CRC_SIZE_BYTES); - const size_t available = item->datagram_payload.size - (size_t) (write_ptr - dst_buffer); - UDPARD_ASSERT(available <= TRANSFER_CRC_SIZE_BYTES); - const size_t write_size = smaller(TRANSFER_CRC_SIZE_BYTES - crc_offset, available); - // NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling) - (void) memcpy(write_ptr, &crc_bytes[crc_offset], write_size); - offset += write_size; - } - UDPARD_ASSERT((out.count + 0ULL) < HEADER_FRAME_INDEX_MAX); // +0 is to suppress warning. - out.count++; - } - UDPARD_ASSERT((offset == payload_size_with_crc) || (out.tail == NULL)); - return out; + return (member->next != NULL) || (member->prev != NULL) || (list->head == member); } -static int32_t txPush(struct UdpardTx* const tx, - const UdpardMicrosecond deadline_usec, - const TransferMetadata meta, - const struct UdpardUDPIPEndpoint endpoint, - const struct UdpardPayload payload, - void* const user_transfer_reference) +/// No effect if not in the list. +static void delist(udpard_list_t* const list, udpard_listed_t* const member) { - UDPARD_ASSERT(tx != NULL); - int32_t out = 0; // The number of frames enqueued or negated error. - const size_t mtu = larger(tx->mtu, 1U); - const size_t frame_count = ((payload.size + TRANSFER_CRC_SIZE_BYTES + mtu) - 1U) / mtu; - UDPARD_ASSERT((frame_count > 0U) && ((frame_count + 0ULL) <= INT32_MAX)); // +0 is to suppress warning. - const bool anonymous = (*tx->local_node_id) > UDPARD_NODE_ID_MAX; - const bool service = (meta.data_specifier & DATA_SPECIFIER_SERVICE_NOT_MESSAGE_MASK) != 0; - if (anonymous && ((frame_count > 1) || service)) - { - out = -UDPARD_ERROR_ANONYMOUS; // Only single-frame message transfers can be anonymous. + if (member->next != NULL) { + member->next->prev = member->prev; } - else if ((tx->queue_size + frame_count) > tx->queue_capacity) - { - out = -UDPARD_ERROR_CAPACITY; // Not enough space in the queue. + if (member->prev != NULL) { + member->prev->next = member->next; } - else - { - const TxChain chain = txMakeChain(tx->memory, - tx->dscp_value_per_priority, - mtu, - deadline_usec, - meta, - endpoint, - payload, - user_transfer_reference); - if (chain.tail != NULL) - { - UDPARD_ASSERT(frame_count == chain.count); - struct UdpardTxItem* next = chain.head; - do - { - const struct UdpardTreeNode* const res = - cavlSearch(&tx->root, &next->base, &txAVLPredicate, &avlTrivialFactory); - (void) res; - UDPARD_ASSERT(res == &next->base); - UDPARD_ASSERT(tx->root != NULL); - next = next->next_in_transfer; - } while (next != NULL); - tx->queue_size += chain.count; - UDPARD_ASSERT(tx->queue_size <= tx->queue_capacity); - UDPARD_ASSERT((chain.count + 0ULL) <= INT32_MAX); // +0 is to suppress warning. - out = (int32_t) chain.count; - } - else // The queue is large enough but we ran out of heap memory, so we have to unwind the chain. - { - out = -UDPARD_ERROR_MEMORY; - struct UdpardTxItem* head = chain.head; - while (head != NULL) - { - struct UdpardTxItem* const next = head->next_in_transfer; - udpardTxFree(tx->memory, head); - head = next; - } - } + if (list->head == member) { + list->head = member->next; } - UDPARD_ASSERT((out < 0) || (out >= 1)); - return out; + if (list->tail == member) { + list->tail = member->prev; + } + member->next = NULL; + member->prev = NULL; + UDPARD_ASSERT((list->head != NULL) == (list->tail != NULL)); } -int_fast8_t udpardTxInit(struct UdpardTx* const self, - const UdpardNodeID* const local_node_id, - const size_t queue_capacity, - const struct UdpardTxMemoryResources memory) +/// If the item is already in the list, it will be delisted first. Can be used for moving to the front. +static void enlist_head(udpard_list_t* const list, udpard_listed_t* const member) { - int_fast8_t ret = -UDPARD_ERROR_ARGUMENT; - if ((NULL != self) && (NULL != local_node_id) && txValidateMemoryResources(memory)) - { - ret = 0; - memZero(sizeof(*self), self); - self->local_node_id = local_node_id; - self->queue_capacity = queue_capacity; - self->mtu = UDPARD_MTU_DEFAULT; - // The DSCP mapping recommended by the Specification is all zeroes, so we don't need to set it. - self->memory = memory; - self->queue_size = 0; - self->root = NULL; - } - return ret; -} - -int32_t udpardTxPublish(struct UdpardTx* const self, - const UdpardMicrosecond deadline_usec, - const enum UdpardPriority priority, - const UdpardPortID subject_id, - const UdpardTransferID transfer_id, - const struct UdpardPayload payload, - void* const user_transfer_reference) -{ - int32_t out = -UDPARD_ERROR_ARGUMENT; - const bool args_ok = (self != NULL) && (self->local_node_id != NULL) && (priority <= UDPARD_PRIORITY_MAX) && - (subject_id <= UDPARD_SUBJECT_ID_MAX) && ((payload.data != NULL) || (payload.size == 0U)); - if (args_ok) - { - out = txPush(self, - deadline_usec, - (TransferMetadata) { - .priority = priority, - .src_node_id = *self->local_node_id, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .transfer_id = transfer_id, - .data_specifier = subject_id, - }, - makeSubjectUDPIPEndpoint(subject_id), - payload, - user_transfer_reference); + delist(list, member); + UDPARD_ASSERT((member->next == NULL) && (member->prev == NULL)); + UDPARD_ASSERT((list->head != NULL) == (list->tail != NULL)); + member->next = list->head; + if (list->head != NULL) { + list->head->prev = member; } - return out; -} - -int32_t udpardTxRequest(struct UdpardTx* const self, - const UdpardMicrosecond deadline_usec, - const enum UdpardPriority priority, - const UdpardPortID service_id, - const UdpardNodeID server_node_id, - const UdpardTransferID transfer_id, - const struct UdpardPayload payload, - void* const user_transfer_reference) -{ - int32_t out = -UDPARD_ERROR_ARGUMENT; - const bool args_ok = (self != NULL) && (self->local_node_id != NULL) && (priority <= UDPARD_PRIORITY_MAX) && - (service_id <= UDPARD_SERVICE_ID_MAX) && (server_node_id <= UDPARD_NODE_ID_MAX) && - ((payload.data != NULL) || (payload.size == 0U)); - if (args_ok) - { - out = txPush(self, - deadline_usec, - (TransferMetadata) { - .priority = priority, - .src_node_id = *self->local_node_id, - .dst_node_id = server_node_id, - .transfer_id = transfer_id, - .data_specifier = DATA_SPECIFIER_SERVICE_NOT_MESSAGE_MASK | - DATA_SPECIFIER_SERVICE_REQUEST_NOT_RESPONSE_MASK | service_id, - }, - makeServiceUDPIPEndpoint(server_node_id), - payload, - user_transfer_reference); + list->head = member; + if (list->tail == NULL) { + list->tail = member; } - return out; + UDPARD_ASSERT((list->head != NULL) && (list->tail != NULL)); } -int32_t udpardTxRespond(struct UdpardTx* const self, - const UdpardMicrosecond deadline_usec, - const enum UdpardPriority priority, - const UdpardPortID service_id, - const UdpardNodeID client_node_id, - const UdpardTransferID transfer_id, - const struct UdpardPayload payload, - void* const user_transfer_reference) -{ - int32_t out = -UDPARD_ERROR_ARGUMENT; - const bool args_ok = (self != NULL) && (self->local_node_id != NULL) && (priority <= UDPARD_PRIORITY_MAX) && - (service_id <= UDPARD_SERVICE_ID_MAX) && (client_node_id <= UDPARD_NODE_ID_MAX) && - ((payload.data != NULL) || (payload.size == 0U)); - if (args_ok) - { - out = txPush(self, - deadline_usec, - (TransferMetadata) { - .priority = priority, - .src_node_id = *self->local_node_id, - .dst_node_id = client_node_id, - .transfer_id = transfer_id, - .data_specifier = DATA_SPECIFIER_SERVICE_NOT_MESSAGE_MASK | service_id, - }, - makeServiceUDPIPEndpoint(client_node_id), - payload, - user_transfer_reference); - } - return out; -} - -struct UdpardTxItem* udpardTxPeek(const struct UdpardTx* const self) +#define LIST_MEMBER(ptr, owner_type, owner_field) ((owner_type*)ptr_unbias((ptr), offsetof(owner_type, owner_field))) +static void* ptr_unbias(const void* const ptr, const size_t offset) { - struct UdpardTxItem* out = NULL; - if (self != NULL) - { - // Paragraph 6.7.2.1.15 of the C standard says: - // A pointer to a structure object, suitably converted, points to its initial member, and vice versa. - out = (struct UdpardTxItem*) (void*) cavlFindExtremum(self->root, false); - } - return out; + return (ptr == NULL) ? NULL : (void*)((char*)ptr - offset); } +#define LIST_TAIL(list, owner_type, owner_field) LIST_MEMBER((list).tail, owner_type, owner_field) + +// --------------------------------------------------------------------------------------------------------------------- +// --------------------------------------------- HEADER --------------------------------------------- +// --------------------------------------------------------------------------------------------------------------------- + +#define HEADER_SIZE_BYTES 48U +#define HEADER_VERSION 2U +#define HEADER_FLAG_RELIABLE 0x01U +#define HEADER_FLAG_ACKNOWLEDGEMENT 0x02U +#define HEADER_FRAME_INDEX_MAX 0xFFFFFFU /// 4 GiB with 256-byte MTU; 21.6 GiB with 1384-byte MTU -struct UdpardTxItem* udpardTxPop(struct UdpardTx* const self, struct UdpardTxItem* const item) +typedef struct { - if ((self != NULL) && (item != NULL)) - { - // Paragraph 6.7.2.1.15 of the C standard says: - // A pointer to a structure object, suitably converted, points to its initial member, and vice versa. - // Note that the highest-priority frame is always a leaf node in the AVL tree, which means that it is very - // cheap to remove. - cavlRemove(&self->root, &item->base); - UDPARD_ASSERT(self->queue_size > 0U); - self->queue_size--; - } - return item; + udpard_prio_t priority; + + bool flag_reliable; + bool flag_acknowledgement; + + uint32_t transfer_payload_size; + uint64_t transfer_id; + uint64_t sender_uid; + uint64_t topic_hash; +} meta_t; + +static byte_t* header_serialize(byte_t* const buffer, + const meta_t meta, + const uint32_t frame_index, + const uint32_t frame_payload_offset, + const uint32_t prefix_crc) +{ + byte_t* ptr = buffer; + byte_t flags = 0; + if (meta.flag_reliable) { + flags |= HEADER_FLAG_RELIABLE; + } + if (meta.flag_acknowledgement) { + flags |= HEADER_FLAG_ACKNOWLEDGEMENT; + } + *ptr++ = (byte_t)(HEADER_VERSION | (meta.priority << 5U)); + *ptr++ = flags; + *ptr++ = 0; + *ptr++ = 0; + ptr = serialize_u32(ptr, frame_index & HEADER_FRAME_INDEX_MAX); + ptr = serialize_u32(ptr, frame_payload_offset); + ptr = serialize_u32(ptr, meta.transfer_payload_size); + ptr = serialize_u64(ptr, meta.transfer_id); + ptr = serialize_u64(ptr, meta.sender_uid); + ptr = serialize_u64(ptr, meta.topic_hash); + ptr = serialize_u32(ptr, prefix_crc); + ptr = serialize_u32(ptr, crc_full(HEADER_SIZE_BYTES - CRC_SIZE_BYTES, buffer)); + UDPARD_ASSERT((size_t)(ptr - buffer) == HEADER_SIZE_BYTES); + return ptr; } -void udpardTxFree(const struct UdpardTxMemoryResources memory, struct UdpardTxItem* const item) -{ - if (item != NULL) - { - if (item->datagram_payload.data != NULL) - { - memFree(memory.payload, item->datagram_payload.size, item->datagram_payload.data); +static bool header_deserialize(const udpard_bytes_mut_t dgram_payload, + meta_t* const out_meta, + uint32_t* const frame_index, + uint32_t* const frame_payload_offset, + uint32_t* const prefix_crc, + udpard_bytes_t* const out_payload) +{ + UDPARD_ASSERT(out_payload != NULL); + bool ok = (dgram_payload.size >= HEADER_SIZE_BYTES) && (dgram_payload.data != NULL) && // + (crc_full(HEADER_SIZE_BYTES, dgram_payload.data) == CRC_RESIDUE_AFTER_OUTPUT_XOR); + if (ok) { + const byte_t* ptr = dgram_payload.data; + const byte_t head = *ptr++; + const byte_t version = head & 0x1FU; + if (version == HEADER_VERSION) { + out_meta->priority = (udpard_prio_t)((byte_t)(head >> 5U) & 0x07U); + const byte_t flags = *ptr++; + out_meta->flag_reliable = (flags & HEADER_FLAG_RELIABLE) != 0U; + out_meta->flag_acknowledgement = (flags & HEADER_FLAG_ACKNOWLEDGEMENT) != 0U; + const byte_t incompatibility = (byte_t)(flags & ~(HEADER_FLAG_RELIABLE | HEADER_FLAG_ACKNOWLEDGEMENT)); + ptr += 2U; + ptr = deserialize_u32(ptr, frame_index); + ptr = deserialize_u32(ptr, frame_payload_offset); + ptr = deserialize_u32(ptr, &out_meta->transfer_payload_size); + ptr = deserialize_u64(ptr, &out_meta->transfer_id); + ptr = deserialize_u64(ptr, &out_meta->sender_uid); + ptr = deserialize_u64(ptr, &out_meta->topic_hash); + ptr = deserialize_u32(ptr, prefix_crc); + (void)ptr; + // Set up the output payload view. + out_payload->size = dgram_payload.size - HEADER_SIZE_BYTES; + out_payload->data = (byte_t*)dgram_payload.data + HEADER_SIZE_BYTES; + // Finalize the fields. + *frame_index = HEADER_FRAME_INDEX_MAX & *frame_index; + // Validate the fields. + ok = ok && (incompatibility == 0U); + ok = ok && (((uint64_t)*frame_payload_offset + (uint64_t)out_payload->size) <= + (uint64_t)out_meta->transfer_payload_size); + ok = ok && ((0 == *frame_index) == (0 == *frame_payload_offset)); + // The prefix-CRC of the first frame of a transfer equals the CRC of its payload. + ok = ok && ((0 < *frame_payload_offset) || (crc_full(out_payload->size, out_payload->data) == *prefix_crc)); + // ACK frame requires zero offset. + ok = ok && ((!out_meta->flag_acknowledgement) || (*frame_payload_offset == 0U)); + // Detect impossible flag combinations. + ok = ok && (!(out_meta->flag_reliable && out_meta->flag_acknowledgement)); + } else { + ok = false; } - - memFree(memory.fragment, sizeof(struct UdpardTxItem), item); } + return ok; } -// ===================================================================================================================== -// ================================================= RX PIPELINE ================================================= -// ===================================================================================================================== +// --------------------------------------------------------------------------------------------------------------------- +// --------------------------------------------- TX PIPELINE --------------------------------------------- +// --------------------------------------------------------------------------------------------------------------------- -/// All but the transfer metadata. -typedef struct -{ - uint32_t index; - bool end_of_transfer; - struct UdpardPayload payload; ///< Also contains the transfer CRC (but not the header CRC). - struct UdpardMutablePayload origin; ///< The entirety of the free-able buffer passed from the application. -} RxFrameBase; - -/// Full frame state. -typedef struct +typedef struct tx_frame_t { - RxFrameBase base; - TransferMetadata meta; -} RxFrame; + size_t refcount; + udpard_deleter_t deleter; + size_t* objcount; + struct tx_frame_t* next; + size_t size; + byte_t data[]; +} tx_frame_t; -/// The primitive deserialization functions are endian-agnostic. -static const byte_t* txDeserializeU16(const byte_t* const source_buffer, uint16_t* const out_value) +static udpard_bytes_t tx_frame_view(const tx_frame_t* const frame) { - UDPARD_ASSERT((source_buffer != NULL) && (out_value != NULL)); - const byte_t* ptr = source_buffer; - *out_value = *ptr; - ptr++; - *out_value |= (uint16_t) (((uint16_t) *ptr) << ByteWidth); - ptr++; - return ptr; + return (udpard_bytes_t){ .size = frame->size, .data = frame->data }; } -static const byte_t* txDeserializeU32(const byte_t* const source_buffer, uint32_t* const out_value) +static tx_frame_t* tx_frame_from_view(const udpard_bytes_t view) { - UDPARD_ASSERT((source_buffer != NULL) && (out_value != NULL)); - const byte_t* ptr = source_buffer; - *out_value = 0; - for (size_t i = 0; i < sizeof(*out_value); i++) // We sincerely hope that the compiler will use memcpy. - { - *out_value |= (uint32_t) ((uint32_t) *ptr << (i * ByteWidth)); // NOLINT(google-readability-casting) NOSONAR - ptr++; - } - return ptr; + return (tx_frame_t*)ptr_unbias(view.data, offsetof(tx_frame_t, data)); } -static const byte_t* txDeserializeU64(const byte_t* const source_buffer, uint64_t* const out_value) +static tx_frame_t* tx_frame_new(udpard_tx_t* const tx, const udpard_mem_t mem, const size_t data_size) { - UDPARD_ASSERT((source_buffer != NULL) && (out_value != NULL)); - const byte_t* ptr = source_buffer; - *out_value = 0; - for (size_t i = 0; i < sizeof(*out_value); i++) // We sincerely hope that the compiler will use memcpy. - { - *out_value |= ((uint64_t) *ptr << (i * ByteWidth)); - ptr++; + tx_frame_t* const frame = (tx_frame_t*)mem_alloc(mem, sizeof(tx_frame_t) + data_size); + if (frame != NULL) { + frame->refcount = 1U; + frame->deleter = udpard_make_deleter(mem); + frame->objcount = &tx->enqueued_frames_count; + frame->next = NULL; + frame->size = data_size; + // Update the count; this is decremented when the frame is freed upon refcount reaching zero. + tx->enqueued_frames_count++; + UDPARD_ASSERT(tx->enqueued_frames_count <= tx->enqueued_frames_limit); } - return ptr; + return frame; } -/// This is roughly the inverse of the txSerializeHeader function, but it also handles the frame payload. -static bool rxParseFrame(const struct UdpardMutablePayload datagram_payload, RxFrame* const out) +/// The ordering is by topic hash first, then by transfer-ID. +/// Therefore, it orders all transfers by topic hash, allowing quick lookup by topic with an arbitrary transfer-ID. +typedef struct { - UDPARD_ASSERT((out != NULL) && (datagram_payload.data != NULL)); - out->base.origin = datagram_payload; - bool ok = false; - if (datagram_payload.size > 0) // HEADER_SIZE_BYTES may change in the future depending on the header version. - { - const byte_t* ptr = (const byte_t*) datagram_payload.data; - const uint_fast8_t version = *ptr++; - // The frame payload cannot be empty because every transfer has at least four bytes of CRC. - if ((datagram_payload.size > HEADER_SIZE_BYTES) && (version == HEADER_VERSION) && - (headerCRCCompute(HEADER_SIZE_BYTES, datagram_payload.data) == HEADER_CRC_RESIDUE)) - { - const uint_fast8_t priority = *ptr++; - if (priority <= UDPARD_PRIORITY_MAX) - { - out->meta.priority = (enum UdpardPriority) priority; - ptr = txDeserializeU16(ptr, &out->meta.src_node_id); - ptr = txDeserializeU16(ptr, &out->meta.dst_node_id); - ptr = txDeserializeU16(ptr, &out->meta.data_specifier); - ptr = txDeserializeU64(ptr, &out->meta.transfer_id); - uint32_t index_eot = 0; - ptr = txDeserializeU32(ptr, &index_eot); - out->base.index = (uint32_t) (index_eot & HEADER_FRAME_INDEX_MASK); - out->base.end_of_transfer = (index_eot & HEADER_FRAME_INDEX_EOT_MASK) != 0U; - ptr += 2; // Opaque user data. - ptr += HEADER_CRC_SIZE_BYTES; - out->base.payload.data = ptr; - out->base.payload.size = datagram_payload.size - HEADER_SIZE_BYTES; - ok = true; - UDPARD_ASSERT((ptr == (((const byte_t*) datagram_payload.data) + HEADER_SIZE_BYTES)) && - (out->base.payload.size > 0U)); - } + uint64_t topic_hash; + uint64_t transfer_id; +} tx_transfer_key_t; + +/// The transmission scheduler maintains several indexes for the transfers in the pipeline. +/// The segregated priority queue only contains transfers that are ready for transmission. +/// The staged index contains transfers ordered by readiness for retransmission; +/// transfers that will no longer be transmitted but are retained waiting for the ack are in neither of these. +/// The deadline index contains ALL transfers, ordered by their deadlines, used for purging expired transfers. +/// The transfer index contains ALL transfers, used for lookup by (topic_hash, transfer_id). +typedef struct tx_transfer_t +{ + udpard_tree_t index_staged; ///< Soonest to be ready on the left. Key: staged_until + transfer identity + udpard_tree_t index_deadline; ///< Soonest to expire on the left. Key: deadline + transfer identity + udpard_tree_t index_transfer; ///< Specific transfer lookup for ack management. Key: tx_transfer_key_t + udpard_listed_t queue[UDPARD_IFACE_COUNT_MAX]; ///< Listed when ready for transmission. + udpard_listed_t agewise; ///< Listed when created; oldest at the tail. + udpard_tree_t index_transfer_ack; ///< Only for acks. Key: tx_transfer_key_t but referencing remote_*. + + /// We always keep a pointer to the head, plus a cursor that scans the frames during transmission. + /// Both are NULL if the payload is destroyed. + /// The head points to the first frame unless it is known that no (further) retransmissions are needed, + /// in which case the old head is deleted and the head points to the next frame to transmit. + tx_frame_t* head[UDPARD_IFACE_COUNT_MAX]; + + /// Mutable transmission state. All other fields, except for the index handles, are immutable. + tx_frame_t* cursor[UDPARD_IFACE_COUNT_MAX]; + uint_fast8_t epoch; ///< Does not overflow due to exponential backoff; e.g. 1us with epoch=48 => 9 years. + udpard_us_t staged_until; + + /// Constant transfer properties supplied by the client. + /// The remote_* fields are identical to the local ones except in the case of ack transfers, where they contain the + /// values encoded in the ack message. This is needed to find pending acks (to minimize duplicates); + /// in the future we may even remove them and accept potential ack duplication, since they are idempotent and cheap. + /// By default, upon construction, the remote_* fields equal the local ones, which is valid for ordinary messages. + uint64_t topic_hash; + uint64_t transfer_id; + uint64_t remote_topic_hash; + uint64_t remote_transfer_id; + udpard_us_t deadline; + bool reliable; + udpard_prio_t priority; + uint16_t iface_bitmap; ///< Guaranteed to have at least one bit set within UDPARD_IFACE_COUNT_MAX. + udpard_udpip_ep_t p2p_destination[UDPARD_IFACE_COUNT_MAX]; ///< Only for P2P transfers. + udpard_user_context_t user; + + void (*feedback)(udpard_tx_t*, udpard_tx_feedback_t); +} tx_transfer_t; + +static bool tx_validate_mem_resources(const udpard_tx_mem_resources_t memory) +{ + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + if ((memory.payload[i].vtable == NULL) || // + (memory.payload[i].vtable->base.free == NULL) || // + (memory.payload[i].vtable->alloc == NULL)) { + return false; } - // Parsers for other header versions may be added here later. - } - if (ok) // Version-agnostic semantics check. - { - UDPARD_ASSERT(out->base.payload.size > 0); // Follows from the prior checks. - const bool anonymous = out->meta.src_node_id == UDPARD_NODE_ID_UNSET; - const bool broadcast = out->meta.dst_node_id == UDPARD_NODE_ID_UNSET; - const bool service = (out->meta.data_specifier & DATA_SPECIFIER_SERVICE_NOT_MESSAGE_MASK) != 0; - const bool single_frame = (out->base.index == 0) && out->base.end_of_transfer; - ok = service ? ((!broadcast) && (!anonymous)) : (broadcast && ((!anonymous) || single_frame)); - ok = ok && (out->meta.transfer_id != TRANSFER_ID_UNSET); } - return ok; + return (memory.transfer.vtable != NULL) && // + (memory.transfer.vtable->base.free != NULL) && // + (memory.transfer.vtable->alloc != NULL); } -static bool rxValidateMemoryResources(const struct UdpardRxMemoryResources memory) +static void tx_transfer_free_payload(tx_transfer_t* const tr) { - return (memory.session.allocate != NULL) && (memory.session.deallocate != NULL) && - (memory.fragment.allocate != NULL) && (memory.fragment.deallocate != NULL) && - (memory.payload.deallocate != NULL); + UDPARD_ASSERT(tr != NULL); + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + const tx_frame_t* frame = tr->head[i]; + while (frame != NULL) { + const tx_frame_t* const next = frame->next; + udpard_tx_refcount_dec(tx_frame_view(frame)); + frame = next; + } + tr->head[i] = NULL; + tr->cursor[i] = NULL; + } } -/// This helper is needed to minimize the risk of argument swapping when passing these two resources around, -/// as they almost always go side by side. -typedef struct -{ - struct UdpardMemoryResource fragment; - struct UdpardMemoryDeleter payload; -} RxMemory; - -typedef struct -{ - struct UdpardTreeNode base; - struct RxFragment* this; // This is needed to avoid pointer arithmetic with multiple inheritance. -} RxFragmentTreeNode; - -/// This is designed to be convertible to/from UdpardFragment, so that the application could be -/// given a linked list of these objects represented as a list of UdpardFragment. -typedef struct RxFragment -{ - struct UdpardFragment base; - RxFragmentTreeNode tree; - uint32_t frame_index; -} RxFragment; - -/// Internally, the RX pipeline is arranged as follows: -/// -/// - There is one port per subscription or an RPC-service listener. Within the port, there are N sessions, -/// one session per remote node emitting transfers on this port (i.e., on this subject, or sending -/// request/response of this service). Sessions are constructed dynamically in memory provided by -/// UdpardMemoryResource. +/// Currently, we use a very simple implementation that ceases delivery attempts after the first acknowledgment +/// is received, similar to the CAN bus. Such mode of reliability is useful in the following scenarios: /// -/// - Per session, there are UDPARD_NETWORK_INTERFACE_COUNT_MAX interface states to support interface redundancy. +/// - With topics with a single subscriber, or sent via P2P transport (responses to published messages). +/// With a single recipient, a single acknowledgement is sufficient to guarantee delivery. /// -/// - Per interface, there are RX_SLOT_COUNT slots; a slot keeps the state of a transfer in the process of being -/// reassembled which includes its payload fragments. +/// - The application only cares about one acknowledgement (anycast), e.g., with modular redundant nodes. /// -/// Port -> Session -> Interface -> Slot -> Fragments. +/// - The application assumes that if one copy was delivered successfully, then other copies have likely +/// succeeded as well (depends on the required reliability guarantees), similar to the CAN bus. /// -/// Consider the following examples, where A,B,C denote distinct multi-frame transfers: -/// -/// A0 A1 A2 B0 B1 B2 -- two transfers without OOO frames; both accepted -/// A2 A0 A1 B0 B2 B1 -- two transfers with OOO frames; both accepted -/// A0 A1 B0 A2 B1 B2 -- two transfers with interleaved frames; both accepted (this is why we need 2 slots) -/// B1 A2 A0 C0 B0 A1 C1 -- B evicted by C; A and C accepted, B dropped (to accept B we would need 3 slots) -/// B0 A0 A1 C0 B1 A2 C1 -- ditto -/// A0 A1 C0 B0 A2 C1 B1 -- A evicted by B; B and C accepted, A dropped -/// -/// In this implementation we postpone the implicit truncation until all fragments of a transfer are received. -/// Early truncation such that excess payload is not stored in memory at all is difficult to implement if -/// out-of-order reassembly is a requirement. -/// To implement early truncation with out-of-order reassembly, we need to deduce the MTU of the sender per transfer -/// (which is easy as we only need to take note of the payload size of any non-last frame of the transfer), -/// then, based on the MTU, determine the maximum frame index we should accept (higher indexes will be dropped); -/// then, for each fragment (i.e., frame) we need to compute the CRC (including those that are discarded). -/// At the end, when all frames have been observed, combine all CRCs to obtain the final transfer CRC -/// (this is possible because all common CRC functions are linear). -typedef struct -{ - UdpardMicrosecond ts_usec; ///< Timestamp of the earliest frame; TIMESTAMP_UNSET upon restart. - UdpardTransferID transfer_id; ///< When first constructed, this shall be set to UINT64_MAX (unreachable value). - uint32_t max_index; ///< Maximum observed frame index in this transfer (so far); zero upon restart. - uint32_t eot_index; ///< Frame index where the EOT flag was observed; FRAME_INDEX_UNSET upon restart. - uint32_t accepted_frames; ///< Number of frames accepted so far. - size_t payload_size; - RxFragmentTreeNode* fragments; -} RxSlot; +/// TODO In the future, there are plans to extend this mechanism to track the number of acknowledgements per topic, +/// such that we can retain transfers until a specified number of acknowledgements have been received. A remote +/// node can be considered to have disappeared if it failed to acknowledge a transfer after the maximum number +/// of attempts have been made. This is somewhat similar in principle to the connection-oriented DDS/RTPS approach, +/// where pub/sub associations are established and removed automatically, transparently to the application. +static void tx_transfer_retire(udpard_tx_t* const tx, tx_transfer_t* const tr, const bool success) +{ + // Construct the feedback object first before the transfer is destroyed. + const udpard_tx_feedback_t fb = { .user = tr->user, .acknowledgements = success ? 1 : 0 }; + UDPARD_ASSERT(tr->reliable == (tr->feedback != NULL)); + // save the feedback pointer + void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t) = tr->feedback; + + // Remove from all indexes and lists. + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + delist(&tx->queue[i][tr->priority], &tr->queue[i]); + } + delist(&tx->agewise, &tr->agewise); + (void)cavl2_remove_if(&tx->index_staged, &tr->index_staged); + cavl2_remove(&tx->index_deadline, &tr->index_deadline); + cavl2_remove(&tx->index_transfer, &tr->index_transfer); + (void)cavl2_remove_if(&tx->index_transfer_ack, &tr->index_transfer_ack); + + // Free the memory. The payload memory may already be empty depending on where we were invoked from. + tx_transfer_free_payload(tr); + mem_free(tx->memory.transfer, sizeof(tx_transfer_t), tr); + + // Finally, when the internal state is updated and consistent, invoke the feedback callback if any. + if (feedback != NULL) { + feedback(tx, fb); + } +} + +/// When the queue is exhausted, finds a transfer to sacrifice using simple heuristics and returns it. +/// Will return NULL if there are no transfers worth sacrificing (no queue space can be reclaimed). +/// We cannot simply stop accepting new transfers when the queue is full, because it may be caused by a single +/// stalled interface holding back progress for all transfers. +/// The heuristics are subject to review and improvement. +static tx_transfer_t* tx_sacrifice(udpard_tx_t* const tx) { return LIST_TAIL(tx->agewise, tx_transfer_t, agewise); } + +/// True on success, false if not possible to reclaim enough space. +static bool tx_ensure_queue_space(udpard_tx_t* const tx, const size_t total_frames_needed) +{ + if (total_frames_needed > tx->enqueued_frames_limit) { + return false; // not gonna happen + } + while (total_frames_needed > (tx->enqueued_frames_limit - tx->enqueued_frames_count)) { + tx_transfer_t* const tr = tx_sacrifice(tx); + if (tr == NULL) { + break; // We may have no transfers anymore but the NIC TX driver could still be holding some frames. + } + tx_transfer_retire(tx, tr, false); + tx->errors_sacrifice++; + } + return total_frames_needed <= (tx->enqueued_frames_limit - tx->enqueued_frames_count); +} +// Key for time-ordered TX indices with stable tiebreaking. typedef struct { - UdpardMicrosecond ts_usec; ///< The timestamp of the last valid transfer to arrive on this interface. - RxSlot slots[RX_SLOT_COUNT]; -} RxIface; + udpard_us_t time; + uint64_t topic_hash; + uint64_t transfer_id; +} tx_time_key_t; -/// This type is forward-declared externally, hence why it has such a long name with the "udpard" prefix. -/// Keep in mind that we have a dedicated session object per remote node per port; this means that the states -/// kept here -- the timestamp and the transfer-ID -- are specific per remote node, as it should be. -struct UdpardInternalRxSession -{ - struct UdpardTreeNode base; - /// The remote node-ID is needed here as this is the ordering/search key. - UdpardNodeID remote_node_id; - /// This shared state is used for redundant transfer deduplication. - /// Redundancies occur as a result of the use of multiple network interfaces, spurious frame duplication along - /// the network path, and trivial forward error correction through duplication (if used by the sender). - UdpardMicrosecond last_ts_usec; - UdpardTransferID last_transfer_id; - /// Each redundant interface maintains its own session state independently. - /// The first interface to receive a transfer takes precedence, thus the redundant group always operates - /// at the speed of the fastest interface. Duplicate transfers delivered by the slower interfaces are discarded. - RxIface ifaces[UDPARD_NETWORK_INTERFACE_COUNT_MAX]; -}; - -// -------------------------------------------------- RX FRAGMENT -------------------------------------------------- - -/// Frees all fragments in the tree and their payload buffers. Destroys the passed fragment. -/// This is meant to be invoked on the root of the tree. -/// The maximum recursion depth is ceil(1.44*log2(FRAME_INDEX_MAX+1)-0.328) = 45 levels. -// NOLINTNEXTLINE(misc-no-recursion) MISRA C:2012 rule 17.2 -static void rxFragmentDestroyTree(RxFragment* const self, const RxMemory memory) +// Compare staged transfers by time then by transfer identity. +static int32_t tx_cavl_compare_staged(const void* const user, const udpard_tree_t* const node) { - UDPARD_ASSERT(self != NULL); - memFreePayload(memory.payload, self->base.origin); - for (uint_fast8_t i = 0; i < 2; i++) - { - RxFragmentTreeNode* const child = (RxFragmentTreeNode*) self->tree.base.lr[i]; - if (child != NULL) - { - UDPARD_ASSERT(child->base.up == &self->tree.base); - rxFragmentDestroyTree(child->this, memory); // NOSONAR recursion - } - } - memFree(memory.fragment, sizeof(RxFragment), self); // self-destruct + const tx_time_key_t* const key = (const tx_time_key_t*)user; + const tx_transfer_t* const tr = CAVL2_TO_OWNER(node, tx_transfer_t, index_staged); // clang-format off + if (key->time < tr->staged_until) { return -1; } + if (key->time > tr->staged_until) { return +1; } + if (key->topic_hash < tr->topic_hash) { return -1; } + if (key->topic_hash > tr->topic_hash) { return +1; } + if (key->transfer_id < tr->transfer_id) { return -1; } + if (key->transfer_id > tr->transfer_id) { return +1; } + return 0; // clang-format on } -/// Frees all fragments in the list and their payload buffers. Destroys the passed fragment. -/// This is meant to be invoked on the head of the list. -/// This function is needed because when a fragment tree is transformed into a list, the tree structure itself -/// is invalidated and cannot be used to free the fragments anymore. -static void rxFragmentDestroyList(struct UdpardFragment* const head, const RxMemory memory) +// Compare deadlines by time then by transfer identity. +static int32_t tx_cavl_compare_deadline(const void* const user, const udpard_tree_t* const node) { - struct UdpardFragment* handle = head; - while (handle != NULL) - { - struct UdpardFragment* const next = handle->next; - memFreePayload(memory.payload, handle->origin); // May be NULL, is okay. - memFree(memory.fragment, sizeof(RxFragment), handle); - handle = next; - } + const tx_time_key_t* const key = (const tx_time_key_t*)user; + const tx_transfer_t* const tr = CAVL2_TO_OWNER(node, tx_transfer_t, index_deadline); // clang-format off + if (key->time < tr->deadline) { return -1; } + if (key->time > tr->deadline) { return +1; } + if (key->topic_hash < tr->topic_hash) { return -1; } + if (key->topic_hash > tr->topic_hash) { return +1; } + if (key->transfer_id < tr->transfer_id) { return -1; } + if (key->transfer_id > tr->transfer_id) { return +1; } + return 0; // clang-format on } - -// -------------------------------------------------- RX SLOT -------------------------------------------------- - -static void rxSlotFree(RxSlot* const self, const RxMemory memory) +static int32_t tx_cavl_compare_transfer(const void* const user, const udpard_tree_t* const node) { - UDPARD_ASSERT(self != NULL); - if (self->fragments != NULL) - { - rxFragmentDestroyTree(self->fragments->this, memory); - self->fragments = NULL; - } + const tx_transfer_key_t* const key = (const tx_transfer_key_t*)user; + const tx_transfer_t* const tr = CAVL2_TO_OWNER(node, tx_transfer_t, index_transfer); // clang-format off + if (key->topic_hash < tr->topic_hash) { return -1; } + if (key->topic_hash > tr->topic_hash) { return +1; } + if (key->transfer_id < tr->transfer_id) { return -1; } + if (key->transfer_id > tr->transfer_id) { return +1; } + return 0; // clang-format on } - -static void rxSlotRestart(RxSlot* const self, const UdpardTransferID transfer_id, const RxMemory memory) +static int32_t tx_cavl_compare_transfer_remote(const void* const user, const udpard_tree_t* const node) { - UDPARD_ASSERT(self != NULL); - rxSlotFree(self, memory); - self->ts_usec = TIMESTAMP_UNSET; // Will be assigned when the first frame of the transfer has arrived. - self->transfer_id = transfer_id; - self->max_index = 0; - self->eot_index = FRAME_INDEX_UNSET; - self->accepted_frames = 0; - self->payload_size = 0; + const tx_transfer_key_t* const key = (const tx_transfer_key_t*)user; + const tx_transfer_t* const tr = CAVL2_TO_OWNER(node, tx_transfer_t, index_transfer_ack); // clang-format off + if (key->topic_hash < tr->remote_topic_hash) { return -1; } + if (key->topic_hash > tr->remote_topic_hash) { return +1; } + if (key->transfer_id < tr->remote_transfer_id) { return -1; } + if (key->transfer_id > tr->remote_transfer_id) { return +1; } + return 0; // clang-format on } -/// This is a helper for rxSlotRestart that restarts the transfer for the next transfer-ID value. -/// The transfer-ID increment is necessary to weed out duplicate transfers. -static void rxSlotRestartAdvance(RxSlot* const self, const RxMemory memory) +static tx_transfer_t* tx_transfer_find(udpard_tx_t* const tx, const uint64_t topic_hash, const uint64_t transfer_id) { - rxSlotRestart(self, self->transfer_id + 1U, memory); + const tx_transfer_key_t key = { .topic_hash = topic_hash, .transfer_id = transfer_id }; + return CAVL2_TO_OWNER( + cavl2_find(tx->index_transfer, &key, &tx_cavl_compare_transfer), tx_transfer_t, index_transfer); } -typedef struct -{ - uint32_t frame_index; - bool accepted; - struct UdpardMemoryResource memory_fragment; -} RxSlotUpdateContext; - -static int_fast8_t rxSlotFragmentSearch(void* const user_reference, // NOSONAR Cavl API requires non-const. - const struct UdpardTreeNode* node) +/// True iff listed in at least one interface queue. +static bool tx_is_pending(const udpard_tx_t* const tx, const tx_transfer_t* const tr) { - UDPARD_ASSERT((user_reference != NULL) && (node != NULL)); - return compare32(((const RxSlotUpdateContext*) user_reference)->frame_index, - ((const RxFragmentTreeNode*) node)->this->frame_index); + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + if (is_listed(&tx->queue[i][tr->priority], &tr->queue[i])) { + return true; + } + } + return false; } -static struct UdpardTreeNode* rxSlotFragmentFactory(void* const user_reference) +/// Returns the head of the transfer chain; NULL on OOM. +static tx_frame_t* tx_spool(udpard_tx_t* const tx, + const udpard_mem_t memory, + const size_t mtu, + const meta_t meta, + const udpard_bytes_scattered_t payload) { - RxSlotUpdateContext* const ctx = (RxSlotUpdateContext*) user_reference; - UDPARD_ASSERT((ctx != NULL) && (ctx->memory_fragment.allocate != NULL) && - (ctx->memory_fragment.deallocate != NULL)); - struct UdpardTreeNode* out = NULL; - RxFragment* const frag = memAlloc(ctx->memory_fragment, sizeof(RxFragment)); - if (frag != NULL) - { - memZero(sizeof(RxFragment), frag); - out = &frag->tree.base; // this is not an escape bug, we retain the pointer via "this" - frag->frame_index = ctx->frame_index; - frag->tree.this = frag; // <-- right here, see? - ctx->accepted = true; + UDPARD_ASSERT(mtu > 0); + uint32_t prefix_crc = CRC_INITIAL; + tx_frame_t* head = NULL; + tx_frame_t* tail = NULL; + size_t frame_index = 0U; + size_t offset = 0U; + bytes_scattered_reader_t reader = { .cursor = &payload, .position = 0U }; + do { + // Compute the size of the next frame, allocate it and link it up in the chain. + const size_t progress = smaller(meta.transfer_payload_size - offset, mtu); + tx_frame_t* const item = tx_frame_new(tx, memory, progress + HEADER_SIZE_BYTES); + if (NULL == head) { + head = item; + } else { + tail->next = item; + } + tail = item; + // On OOM, deallocate the entire chain and quit. + if (NULL == tail) { + while (head != NULL) { + tx_frame_t* const next = head->next; + udpard_tx_refcount_dec(tx_frame_view(head)); + head = next; + } + break; + } + // Populate the frame contents. + byte_t* const payload_ptr = &tail->data[HEADER_SIZE_BYTES]; + bytes_scattered_read(&reader, progress, payload_ptr); + prefix_crc = crc_add(prefix_crc, progress, payload_ptr); + const byte_t* const end_of_header = + header_serialize(tail->data, meta, (uint32_t)frame_index, (uint32_t)offset, prefix_crc ^ CRC_OUTPUT_XOR); + UDPARD_ASSERT(end_of_header == payload_ptr); + (void)end_of_header; + // Advance the state. + ++frame_index; + offset += progress; + UDPARD_ASSERT(offset <= meta.transfer_payload_size); + } while (offset < meta.transfer_payload_size); + UDPARD_ASSERT((offset == meta.transfer_payload_size) || ((head == NULL) && (tail == NULL))); + return head; +} + +/// Derives the ack timeout for an outgoing transfer. +static udpard_us_t tx_ack_timeout(const udpard_us_t baseline, const udpard_prio_t prio, const size_t attempts) +{ + UDPARD_ASSERT(baseline > 0); + UDPARD_ASSERT(prio < UDPARD_PRIORITY_COUNT); + return baseline * (1LL << smaller((size_t)prio + attempts, 62)); // NOLINT(*-signed-bitwise) +} + +/// Updates the next attempt time and inserts the transfer into the staged index, unless the next scheduled +/// transmission time is too close to the deadline, in which case no further attempts will be made. +/// When invoking for the first time, staged_until must be set to the time of the first attempt (usually now). +/// Once can deduce whether further attempts are planned by checking if the transfer is in the staged index. +/// +/// The idea is that retransmitting the transfer too close to the deadline is pointless, because +/// the ack may arrive just after the deadline and the transfer would be considered failed anyway. +/// The solution is to add a small margin before the deadline. The margin is derived using a simple heuristic, +/// which is subject to review and improvement later on (this is not an API-visible trait). +static void tx_stage_if(udpard_tx_t* const tx, tx_transfer_t* const tr) +{ + UDPARD_ASSERT(!cavl2_is_inserted(tx->index_staged, &tr->index_staged)); + const uint_fast8_t epoch = tr->epoch++; + const udpard_us_t timeout = tx_ack_timeout(tx->ack_baseline_timeout, tr->priority, epoch); + tr->staged_until += timeout; + if ((tr->deadline - timeout) >= tr->staged_until) { + // Insert into staged index with deterministic tie-breaking. + const tx_time_key_t key = { .time = tr->staged_until, + .topic_hash = tr->topic_hash, + .transfer_id = tr->transfer_id }; + // Ensure we didn't collide with another entry that should be unique. + const udpard_tree_t* const tree_staged = cavl2_find_or_insert(&tx->index_staged, // + &key, + tx_cavl_compare_staged, + &tr->index_staged, + cavl2_trivial_factory); + UDPARD_ASSERT(tree_staged == &tr->index_staged); + (void)tree_staged; + } +} + +static void tx_purge_expired_transfers(udpard_tx_t* const self, const udpard_us_t now) +{ + while (true) { // we can use next_greater instead of doing min search every time + tx_transfer_t* const tr = CAVL2_TO_OWNER(cavl2_min(self->index_deadline), tx_transfer_t, index_deadline); + if ((tr != NULL) && (now > tr->deadline)) { + tx_transfer_retire(self, tr, false); + self->errors_expiration++; + } else { + break; + } } - return out; // OOM handled by the caller } -/// States outliving each level of recursion while ejecting the transfer from the fragment tree. -typedef struct +static void tx_promote_staged_transfers(udpard_tx_t* const self, const udpard_us_t now) { - struct UdpardFragment* head; // Points to the first fragment in the list. - struct UdpardFragment* predecessor; - uint32_t crc; - size_t retain_size; - size_t offset; - RxMemory memory; -} RxSlotEjectContext; - -/// See rxSlotEject() for details. -/// The maximum recursion depth is ceil(1.44*log2(FRAME_INDEX_MAX+1)-0.328) = 45 levels. -/// NOLINTNEXTLINE(misc-no-recursion) MISRA C:2012 rule 17.2 -static void rxSlotEjectFragment(RxFragment* const frag, RxSlotEjectContext* const ctx) -{ - UDPARD_ASSERT((frag != NULL) && (ctx != NULL)); - if (frag->tree.base.lr[0] != NULL) - { - RxFragment* const child = ((RxFragmentTreeNode*) frag->tree.base.lr[0])->this; - UDPARD_ASSERT(child->frame_index < frag->frame_index); - UDPARD_ASSERT(child->tree.base.up == &frag->tree.base); - rxSlotEjectFragment(child, ctx); // NOSONAR recursion - } - const size_t fragment_size = frag->base.view.size; - frag->base.next = NULL; // Default state; may be overwritten. - ctx->crc = transferCRCAdd(ctx->crc, fragment_size, frag->base.view.data); - // Truncate unnecessary payload past the specified limit. This enforces the extent and removes the transfer CRC. - const bool retain = ctx->offset < ctx->retain_size; - if (retain) - { - UDPARD_ASSERT(ctx->retain_size >= ctx->offset); - ctx->head = (ctx->head == NULL) ? &frag->base : ctx->head; - frag->base.view.size = smaller(frag->base.view.size, ctx->retain_size - ctx->offset); - if (ctx->predecessor != NULL) - { - ctx->predecessor->next = &frag->base; + while (true) { // we can use next_greater instead of doing min search every time + tx_transfer_t* const tr = CAVL2_TO_OWNER(cavl2_min(self->index_staged), tx_transfer_t, index_staged); + if ((tr != NULL) && (now >= tr->staged_until)) { + // Reinsert into the staged index at the new position, when the next attempt is due (if any). + cavl2_remove(&self->index_staged, &tr->index_staged); + tx_stage_if(self, tr); + // Enqueue for transmission unless it's been there since the last attempt (stalled interface?) + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + if (((tr->iface_bitmap & (1U << i)) != 0) && !is_listed(&self->queue[i][tr->priority], &tr->queue[i])) { + UDPARD_ASSERT(tr->head[i] != NULL); // cannot stage without payload, doesn't make sense + UDPARD_ASSERT(tr->cursor[i] == tr->head[i]); // must have been rewound after last attempt + enlist_head(&self->queue[i][tr->priority], &tr->queue[i]); + } + } + } else { + break; } - ctx->predecessor = &frag->base; - } - // Adjust the offset of the next fragment and descend into it. Keep the sub-tree alive for now even if not needed. - ctx->offset += fragment_size; - if (frag->tree.base.lr[1] != NULL) - { - RxFragment* const child = ((RxFragmentTreeNode*) frag->tree.base.lr[1])->this; - UDPARD_ASSERT(child->frame_index > frag->frame_index); - UDPARD_ASSERT(child->tree.base.up == &frag->tree.base); - rxSlotEjectFragment(child, ctx); // NOSONAR recursion - } - // Drop the unneeded fragments and their handles after the sub-tree is fully traversed. - if (!retain) - { - memFreePayload(ctx->memory.payload, frag->base.origin); - memFree(ctx->memory.fragment, sizeof(RxFragment), frag); } } -/// This function finalizes the fragmented transfer payload by doing multiple things in one pass through the tree: -/// -/// - Compute the transfer-CRC. The caller should verify the result. -/// - Build a linked list of fragments ordered by frame index, as the application would expect it. -/// - Truncate the payload according to the specified size limit. -/// - Free the tree nodes and their payload buffers past the size limit. -/// -/// It is guaranteed that the output list is sorted by frame index. It may be empty. -/// After this function is invoked, the tree will be destroyed and cannot be used anymore; -/// hence, in the event of invalid transfer being received (bad CRC), the fragments will have to be freed -/// by traversing the linked list instead of the tree. -/// -/// The payload shall contain at least the transfer CRC, so the minimum size is TRANSFER_CRC_SIZE_BYTES. -/// There shall be at least one fragment (because a Cyphal transfer contains at least one frame). -/// -/// The return value indicates whether the transfer is valid (CRC is correct). -static bool rxSlotEject(size_t* const out_payload_size, - struct UdpardFragment* const out_payload_head, - RxFragmentTreeNode* const fragment_tree, - const size_t received_total_size, // With CRC. - const size_t extent, - const RxMemory memory) -{ - UDPARD_ASSERT((received_total_size >= TRANSFER_CRC_SIZE_BYTES) && (fragment_tree != NULL) && - (out_payload_size != NULL) && (out_payload_head != NULL)); - bool result = false; - RxSlotEjectContext eject_ctx = { - .head = NULL, - .predecessor = NULL, - .crc = TRANSFER_CRC_INITIAL, - .retain_size = smaller(received_total_size - TRANSFER_CRC_SIZE_BYTES, extent), - .offset = 0, - .memory = memory, - }; - rxSlotEjectFragment(fragment_tree->this, &eject_ctx); - UDPARD_ASSERT(eject_ctx.offset == received_total_size); // Ensure we have traversed the entire tree. - if (TRANSFER_CRC_RESIDUE_BEFORE_OUTPUT_XOR == eject_ctx.crc) - { - result = true; - *out_payload_size = eject_ctx.retain_size; - if (eject_ctx.head != NULL) - { - // This is the single-frame transfer optimization suggested by Scott: we free the first fragment handle - // early by moving the contents into the rx_transfer structure by value. - // No need to free the payload buffer because it has been transferred to the transfer. - *out_payload_head = *eject_ctx.head; // Slice off the derived type fields as they are not needed. - memFree(memory.fragment, sizeof(RxFragment), eject_ctx.head); - } - else - { - *out_payload_head = (struct UdpardFragment) {.next = NULL, .view = {0, NULL}, .origin = {0, NULL}}; +/// A transfer can use the same fragments between two interfaces if +/// (both have the same MTU OR the transfer fits in both MTU) AND both use the same allocator. +/// Either they will share the same spool, or there is only a single frame so the MTU difference does not matter. +/// The allocator requirement is important because it is possible that distinct NICs may not be able to reach the +/// same memory region via DMA. +static bool tx_spool_shareable(const size_t mtu_a, + const udpard_mem_t mem_a, + const size_t mtu_b, + const udpard_mem_t mem_b, + const size_t payload_size) +{ + return ((mtu_a == mtu_b) || (payload_size <= smaller(mtu_a, mtu_b))) && mem_same(mem_a, mem_b); +} + +/// The prediction takes into account that some interfaces may share the same frame spool. +static size_t tx_predict_frame_count(const size_t mtu[UDPARD_IFACE_COUNT_MAX], + const udpard_mem_t memory[UDPARD_IFACE_COUNT_MAX], + const uint16_t iface_bitmap, + const size_t payload_size) +{ + UDPARD_ASSERT((iface_bitmap & UDPARD_IFACE_BITMAP_ALL) != 0); // The caller ensures this + size_t n_frames_total = 0; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + UDPARD_ASSERT(mtu[i] > 0); + if ((iface_bitmap & (1U << i)) != 0) { + bool shared = false; + for (size_t j = 0; j < i; j++) { + shared = shared || (((iface_bitmap & (1U << j)) != 0) && + tx_spool_shareable(mtu[i], memory[i], mtu[j], memory[j], payload_size)); + } + if (!shared) { + n_frames_total += larger(1, (payload_size + mtu[i] - 1U) / mtu[i]); + } } } - else // The transfer turned out to be invalid. We have to free the fragments. Can't use the tree anymore. - { - rxFragmentDestroyList(eject_ctx.head, memory); - } - return result; + UDPARD_ASSERT(n_frames_total > 0); // The caller ensures that at least one endpoint is valid. + return n_frames_total; } -/// Update the frame count discovery state in this transfer. -/// Returns true on success, false if inconsistencies are detected and the slot should be restarted. -static bool rxSlotAccept_UpdateFrameCount(RxSlot* const self, const RxFrameBase frame) +static bool tx_push(udpard_tx_t* const tx, + const udpard_us_t now, + const udpard_us_t deadline, + const meta_t meta, + const uint16_t iface_bitmap, + const udpard_udpip_ep_t p2p_destination[UDPARD_IFACE_COUNT_MAX], + const udpard_bytes_scattered_t payload, + void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), + const udpard_user_context_t user, + tx_transfer_t** const out_transfer) { - UDPARD_ASSERT((self != NULL) && (frame.payload.size > 0)); - bool ok = true; - self->max_index = max32(self->max_index, frame.index); - if (frame.end_of_transfer) - { - if ((self->eot_index != FRAME_INDEX_UNSET) && (self->eot_index != frame.index)) - { - ok = false; // Inconsistent EOT flag, could be a node-ID conflict. + UDPARD_ASSERT(now <= deadline); + UDPARD_ASSERT(tx != NULL); + UDPARD_ASSERT((iface_bitmap & UDPARD_IFACE_BITMAP_ALL) != 0); + UDPARD_ASSERT((iface_bitmap & UDPARD_IFACE_BITMAP_ALL) == iface_bitmap); + + // Purge expired transfers before accepting a new one to make room in the queue. + tx_purge_expired_transfers(tx, now); + + // Promote staged transfers that are now eligible for retransmission to ensure fairness: + // if they have the same priority as the new transfer, they should get a chance to go first. + tx_promote_staged_transfers(tx, now); + + // Construct the empty transfer object, without the frames for now. The frame spools will be constructed next. + tx_transfer_t* const tr = mem_alloc(tx->memory.transfer, sizeof(tx_transfer_t)); + if (tr == NULL) { + tx->errors_oom++; + return false; + } + mem_zero(sizeof(*tr), tr); + tr->epoch = 0; + tr->staged_until = now; + tr->topic_hash = meta.topic_hash; + tr->transfer_id = meta.transfer_id; + tr->remote_topic_hash = meta.topic_hash; + tr->remote_transfer_id = meta.transfer_id; + tr->deadline = deadline; + tr->reliable = meta.flag_reliable; + tr->priority = meta.priority; + tr->iface_bitmap = iface_bitmap; + tr->user = user; + tr->feedback = feedback; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + tr->p2p_destination[i] = p2p_destination[i]; + tr->head[i] = tr->cursor[i] = NULL; + } + + // Ensure the queue has enough space. + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + tx->mtu[i] = larger(tx->mtu[i], UDPARD_MTU_MIN); // enforce minimum MTU + } + const size_t n_frames = + tx_predict_frame_count(tx->mtu, tx->memory.payload, iface_bitmap, meta.transfer_payload_size); + UDPARD_ASSERT(n_frames > 0); + if (!tx_ensure_queue_space(tx, n_frames)) { + mem_free(tx->memory.transfer, sizeof(tx_transfer_t), tr); + tx->errors_capacity++; + return false; + } + + // Spool the frames for each interface, with deduplication where possible to conserve memory and queue space. + const size_t enqueued_frames_before = tx->enqueued_frames_count; + bool oom = false; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + if ((tr->iface_bitmap & (1U << i)) != 0) { + if (tr->head[i] == NULL) { + tr->head[i] = tx_spool(tx, tx->memory.payload[i], tx->mtu[i], meta, payload); + tr->cursor[i] = tr->head[i]; + if (tr->head[i] == NULL) { + oom = true; + break; + } + // Detect which interfaces can use the same spool to conserve memory. + for (size_t j = i + 1; j < UDPARD_IFACE_COUNT_MAX; j++) { + if (((tr->iface_bitmap & (1U << j)) != 0) && tx_spool_shareable(tx->mtu[i], + tx->memory.payload[i], + tx->mtu[j], + tx->memory.payload[j], + meta.transfer_payload_size)) { + tr->head[j] = tr->head[i]; + tr->cursor[j] = tr->cursor[i]; + tx_frame_t* frame = tr->head[j]; + while (frame != NULL) { + frame->refcount++; + frame = frame->next; + } + } + } + } } - self->eot_index = frame.index; } - UDPARD_ASSERT(frame.index <= self->max_index); - if (self->max_index > self->eot_index) - { - ok = false; // Frames past EOT found, discard the entire transfer because we don't trust it anymore. + if (oom) { + tx_transfer_free_payload(tr); + mem_free(tx->memory.transfer, sizeof(tx_transfer_t), tr); + tx->errors_oom++; + return false; } - return ok; -} + UDPARD_ASSERT((tx->enqueued_frames_count - enqueued_frames_before) == n_frames); + UDPARD_ASSERT(tx->enqueued_frames_count <= tx->enqueued_frames_limit); + (void)enqueued_frames_before; -/// Insert the fragment into the fragment tree. If it already exists, drop and free the duplicate. -/// Returns 0 if the fragment is not needed, 1 if it is needed, negative on error. -/// The fragment shall be deallocated unless the return value is 1. -static int_fast8_t rxSlotAccept_InsertFragment(RxSlot* const self, const RxFrameBase frame, const RxMemory memory) -{ - UDPARD_ASSERT((self != NULL) && (frame.payload.size > 0) && (self->max_index <= self->eot_index) && - (self->accepted_frames <= self->eot_index)); - RxSlotUpdateContext update_ctx = {.frame_index = frame.index, - .accepted = false, - .memory_fragment = memory.fragment}; - RxFragmentTreeNode* const frag = (RxFragmentTreeNode*) cavlSearch((struct UdpardTreeNode**) &self->fragments, // - &update_ctx, - &rxSlotFragmentSearch, - &rxSlotFragmentFactory); - int_fast8_t result = update_ctx.accepted ? 1 : 0; - if (frag == NULL) - { - UDPARD_ASSERT(!update_ctx.accepted); - result = -UDPARD_ERROR_MEMORY; - // No restart because there is hope that there will be enough memory when we receive a duplicate. - } - UDPARD_ASSERT(self->max_index <= self->eot_index); - if (update_ctx.accepted) - { - UDPARD_ASSERT((result > 0) && (frag->this->frame_index == frame.index)); - frag->this->base.view = frame.payload; - frag->this->base.origin = frame.origin; - self->payload_size += frame.payload.size; - self->accepted_frames++; - } - return result; -} - -/// Detect transfer completion. If complete, eject the payload from the fragment tree and check its CRC. -/// The return value is passed over from rxSlotEject. -static int_fast8_t rxSlotAccept_FinalizeMaybe(RxSlot* const self, - size_t* const out_transfer_payload_size, - struct UdpardFragment* const out_transfer_payload_head, - const size_t extent, - const RxMemory memory) -{ - UDPARD_ASSERT((self != NULL) && (out_transfer_payload_size != NULL) && (out_transfer_payload_head != NULL) && - (self->fragments != NULL)); - int_fast8_t result = 0; - if (self->accepted_frames > self->eot_index) // Mind the off-by-one: cardinal vs. ordinal. - { - if (self->payload_size >= TRANSFER_CRC_SIZE_BYTES) - { - result = rxSlotEject(out_transfer_payload_size, - out_transfer_payload_head, - self->fragments, - self->payload_size, - extent, - memory) - ? 1 - : 0; - // The tree is now unusable and the data is moved into rx_transfer. - self->fragments = NULL; - } - rxSlotRestartAdvance(self, memory); // Restart needed even if invalid. - } - return result; -} - -/// This function will either move the frame payload into the session, or free it if it can't be used. -/// Upon return, certain state fields may be overwritten, so the caller should not rely on them. -/// Returns: 1 -- transfer available, payload written; 0 -- transfer not yet available; <0 -- error. -static int_fast8_t rxSlotAccept(RxSlot* const self, - size_t* const out_transfer_payload_size, - struct UdpardFragment* const out_transfer_payload_head, - const RxFrameBase frame, - const size_t extent, - const RxMemory memory) -{ - UDPARD_ASSERT((self != NULL) && (frame.payload.size > 0) && (out_transfer_payload_size != NULL) && - (out_transfer_payload_head != NULL)); - int_fast8_t result = 0; - bool release = true; - if (rxSlotAccept_UpdateFrameCount(self, frame)) - { - result = rxSlotAccept_InsertFragment(self, frame, memory); - UDPARD_ASSERT(result <= 1); - if (result > 0) - { - release = false; - result = rxSlotAccept_FinalizeMaybe(self, // - out_transfer_payload_size, - out_transfer_payload_head, - extent, - memory); + // Enqueue for transmission immediately. + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + if ((tr->iface_bitmap & (1U << i)) != 0) { + enlist_head(&tx->queue[i][tr->priority], &tr->queue[i]); } } - else - { - rxSlotRestartAdvance(self, memory); - } - if (release) - { - memFreePayload(memory.payload, frame.origin); + // Add to the staged index so that it is repeatedly re-enqueued later until acknowledged or expired. + if (meta.flag_reliable) { + tx_stage_if(tx, tr); + } + // Add to the deadline index for expiration management. + // Insert into deadline index with deterministic tie-breaking. + const tx_time_key_t deadline_key = { .time = tr->deadline, + .topic_hash = tr->topic_hash, + .transfer_id = tr->transfer_id }; + // Ensure we didn't collide with another entry that should be unique. + const udpard_tree_t* const tree_deadline = cavl2_find_or_insert( + &tx->index_deadline, &deadline_key, tx_cavl_compare_deadline, &tr->index_deadline, cavl2_trivial_factory); + UDPARD_ASSERT(tree_deadline == &tr->index_deadline); + (void)tree_deadline; + // Add to the transfer index for incoming ack management. + const tx_transfer_key_t transfer_key = { .topic_hash = tr->topic_hash, .transfer_id = tr->transfer_id }; + const udpard_tree_t* const tree_transfer = cavl2_find_or_insert( + &tx->index_transfer, &transfer_key, tx_cavl_compare_transfer, &tr->index_transfer, cavl2_trivial_factory); + UDPARD_ASSERT(tree_transfer == &tr->index_transfer); // ensure no duplicates; checked at the API level + (void)tree_transfer; + // Add to the agewise list for sacrifice management on queue exhaustion. + enlist_head(&tx->agewise, &tr->agewise); + + // Finalize. + if (out_transfer != NULL) { + *out_transfer = tr; + } + return true; +} + +/// Handle an ACK received from a remote node. +static void tx_receive_ack(udpard_rx_t* const rx, const uint64_t topic_hash, const uint64_t transfer_id) +{ + if (rx->tx != NULL) { + tx_transfer_t* const tr = tx_transfer_find(rx->tx, topic_hash, transfer_id); + if ((tr != NULL) && tr->reliable) { + tx_transfer_retire(rx->tx, tr, true); + } } - UDPARD_ASSERT(result <= 1); - return result; } -// -------------------------------------------------- RX IFACE -------------------------------------------------- +/// Generate an ack transfer for the specified remote transfer. +/// Do nothing if an ack for the same transfer is already enqueued with equal or better endpoint coverage. +static void tx_send_ack(udpard_rx_t* const rx, + const udpard_us_t now, + const udpard_prio_t priority, + const uint64_t topic_hash, + const uint64_t transfer_id, + const udpard_remote_t remote) +{ + udpard_tx_t* const tx = rx->tx; + if (tx != NULL) { + // Check if an ack for this transfer is already enqueued. + const tx_transfer_key_t key = { .topic_hash = topic_hash, .transfer_id = transfer_id }; + tx_transfer_t* const prior = + CAVL2_TO_OWNER(cavl2_find(tx->index_transfer_ack, &key, &tx_cavl_compare_transfer_remote), + tx_transfer_t, + index_transfer_ack); + const uint16_t prior_ep_bitmap = (prior != NULL) ? valid_ep_bitmap(prior->p2p_destination) : 0U; + UDPARD_ASSERT((prior == NULL) || (prior_ep_bitmap == prior->iface_bitmap)); + const uint16_t new_ep_bitmap = valid_ep_bitmap(remote.endpoints); + const bool new_better = (new_ep_bitmap & (uint16_t)(~prior_ep_bitmap)) != 0U; + if (!new_better) { + return; // Can we get an ack? We have ack at home! + } + if (prior != NULL) { // avoid redundant acks for the same transfer -- replace with better one + UDPARD_ASSERT(prior->feedback == NULL); + tx_transfer_retire(tx, prior, false); // this will free up a queue slot and some memory + } + // Even if the new, better ack fails to enqueue for some reason, it's no big deal -- we will send the next one. + // The only reason it might fail is an OOM but we just freed a slot so it should be fine. + + // Serialize the ACK payload. + byte_t message[ACK_SIZE_BYTES]; + byte_t* ptr = message; + ptr = serialize_u64(ptr, topic_hash); + ptr = serialize_u64(ptr, transfer_id); + UDPARD_ASSERT((ptr - message) == ACK_SIZE_BYTES); + (void)ptr; + + // Enqueue the transfer. + const udpard_bytes_t payload = { .size = ACK_SIZE_BYTES, .data = message }; + const meta_t meta = { + .priority = priority, + .flag_reliable = false, + .flag_acknowledgement = true, + .transfer_payload_size = (uint32_t)payload.size, + .transfer_id = tx->p2p_transfer_id++, + .sender_uid = tx->local_uid, + .topic_hash = remote.uid, + }; + tx_transfer_t* tr = NULL; + const uint32_t count = tx_push(tx, + now, + now + ACK_TX_DEADLINE, + meta, + new_ep_bitmap, + remote.endpoints, + (udpard_bytes_scattered_t){ .bytes = payload, .next = NULL }, + NULL, + UDPARD_USER_CONTEXT_NULL, + &tr); + UDPARD_ASSERT(count <= 1); + if (count == 1) { // ack is always a single-frame transfer, so we get either 0 or 1 + UDPARD_ASSERT(tr != NULL); + tr->remote_topic_hash = topic_hash; + tr->remote_transfer_id = transfer_id; + (void)cavl2_find_or_insert(&tx->index_transfer_ack, + &key, + tx_cavl_compare_transfer_remote, + &tr->index_transfer_ack, + cavl2_trivial_factory); + } else { + rx->errors_ack_tx++; + } + } else { + rx->errors_ack_tx++; + } +} + +bool udpard_tx_new(udpard_tx_t* const self, + const uint64_t local_uid, + const uint64_t p2p_transfer_id_initial, + const size_t enqueued_frames_limit, + const udpard_tx_mem_resources_t memory, + const udpard_tx_vtable_t* const vtable) +{ + const bool ok = (NULL != self) && (local_uid != 0) && tx_validate_mem_resources(memory) && (vtable != NULL) && + (vtable->eject_subject != NULL) && (vtable->eject_p2p != NULL); + if (ok) { + mem_zero(sizeof(*self), self); + self->vtable = vtable; + self->local_uid = local_uid; + self->p2p_transfer_id = p2p_transfer_id_initial; + self->ack_baseline_timeout = UDPARD_TX_ACK_BASELINE_TIMEOUT_DEFAULT_us; + self->enqueued_frames_limit = enqueued_frames_limit; + self->enqueued_frames_count = 0; + self->memory = memory; + self->index_staged = NULL; + self->index_deadline = NULL; + self->index_transfer = NULL; + self->user = NULL; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + self->mtu[i] = UDPARD_MTU_DEFAULT; + for (size_t p = 0; p < UDPARD_PRIORITY_COUNT; p++) { + self->queue[i][p].head = NULL; + self->queue[i][p].tail = NULL; + } + } + } + return ok; +} -/// Whether the supplied transfer-ID is greater than all transfer-IDs in the RX slots. -/// This indicates that the new transfer is not a duplicate and should be accepted. -static bool rxIfaceIsFutureTransferID(const RxIface* const self, const UdpardTransferID transfer_id) -{ - bool is_future_tid = true; - for (uint_fast8_t i = 0; i < RX_SLOT_COUNT; i++) // Expected to be unrolled by the compiler. - { - is_future_tid = is_future_tid && ((self->slots[i].transfer_id < transfer_id) || - (self->slots[i].transfer_id == TRANSFER_ID_UNSET)); +bool udpard_tx_push(udpard_tx_t* const self, + const udpard_us_t now, + const udpard_us_t deadline, + const uint16_t iface_bitmap, + const udpard_prio_t priority, + const uint64_t topic_hash, + const uint64_t transfer_id, + const udpard_bytes_scattered_t payload, + void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), // NULL if best-effort. + const udpard_user_context_t user) +{ + bool ok = (self != NULL) && (deadline >= now) && (now >= 0) && (self->local_uid != 0) && + ((iface_bitmap & UDPARD_IFACE_BITMAP_ALL) != 0) && (priority < UDPARD_PRIORITY_COUNT) && + ((payload.bytes.data != NULL) || (payload.bytes.size == 0U)) && + (tx_transfer_find(self, topic_hash, transfer_id) == NULL); + if (ok) { + const meta_t meta = { + .priority = priority, + .flag_reliable = feedback != NULL, + .transfer_payload_size = (uint32_t)bytes_scattered_size(payload), + .transfer_id = transfer_id, + .sender_uid = self->local_uid, + .topic_hash = topic_hash, + }; + const udpard_udpip_ep_t blank_ep[UDPARD_IFACE_COUNT_MAX] = { 0 }; + ok = tx_push(self, // -------------------------------------- + now, + deadline, + meta, + iface_bitmap & UDPARD_IFACE_BITMAP_ALL, + blank_ep, + payload, + feedback, + user, + NULL); } - return is_future_tid; + return ok; } -/// Whether the time that has passed since the last accepted first frame of a transfer exceeds the TID timeout. -/// This indicates that the transfer should be accepted even if its transfer-ID is not greater than all transfer-IDs -/// in the RX slots. -static bool rxIfaceCheckTransferIDTimeout(const RxIface* const self, - const UdpardMicrosecond ts_usec, - const UdpardMicrosecond transfer_id_timeout_usec) -{ - // We use the RxIface state here because the RxSlot state is reset between transfers. - // If there is reassembly in progress, we want to use the timestamps from these in-progress transfers, - // as that eliminates the risk of a false-positive TID-timeout detection. - UdpardMicrosecond most_recent_ts_usec = self->ts_usec; - for (uint_fast8_t i = 0; i < RX_SLOT_COUNT; i++) // Expected to be unrolled by the compiler. - { - if ((most_recent_ts_usec == TIMESTAMP_UNSET) || - ((self->slots[i].ts_usec != TIMESTAMP_UNSET) && (self->slots[i].ts_usec > most_recent_ts_usec))) - { - most_recent_ts_usec = self->slots[i].ts_usec; +bool udpard_tx_push_p2p(udpard_tx_t* const self, + const udpard_us_t now, + const udpard_us_t deadline, + const udpard_prio_t priority, + const udpard_remote_t remote, + const udpard_bytes_scattered_t payload, + void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), // NULL if best-effort. + const udpard_user_context_t user, + uint64_t* const out_transfer_id) +{ + const uint16_t iface_bitmap = valid_ep_bitmap(remote.endpoints); + bool ok = (self != NULL) && (deadline >= now) && (now >= 0) && (self->local_uid != 0) && (iface_bitmap != 0) && + (priority < UDPARD_PRIORITY_COUNT) && ((payload.bytes.data != NULL) || (payload.bytes.size == 0U)); + if (ok) { + const meta_t meta = { + .priority = priority, + .flag_reliable = feedback != NULL, + .transfer_payload_size = (uint32_t)bytes_scattered_size(payload), + .transfer_id = self->p2p_transfer_id++, + .sender_uid = self->local_uid, + .topic_hash = remote.uid, + }; + tx_transfer_t* tr = NULL; + ok = tx_push(self, now, deadline, meta, iface_bitmap, remote.endpoints, payload, feedback, user, &tr); + UDPARD_ASSERT((!ok) || (tr->transfer_id == meta.transfer_id)); + if (ok && (out_transfer_id != NULL)) { + *out_transfer_id = tr->transfer_id; } } - return (most_recent_ts_usec == TIMESTAMP_UNSET) || - ((ts_usec >= most_recent_ts_usec) && ((ts_usec - most_recent_ts_usec) >= transfer_id_timeout_usec)); + return ok; } -/// Traverses the list of slots trying to find a slot with a matching transfer-ID that is already IN PROGRESS. -/// If there is no such slot, tries again without the IN PROGRESS requirement. -/// The purpose of this complicated dual check is to support the case where multiple slots have the same -/// transfer-ID, which may occur with interleaved transfers. -static RxSlot* rxIfaceFindMatchingSlot(RxSlot slots[RX_SLOT_COUNT], const UdpardTransferID transfer_id) +static void tx_eject_pending_frames(udpard_tx_t* const self, const udpard_us_t now, const uint_fast8_t ifindex) { - RxSlot* slot = NULL; - for (uint_fast8_t i = 0; i < RX_SLOT_COUNT; i++) - { - if ((slots[i].transfer_id == transfer_id) && (slots[i].ts_usec != TIMESTAMP_UNSET)) - { - slot = &slots[i]; - break; + while (true) { + // Find the highest-priority pending transfer. + tx_transfer_t* tr = NULL; + for (size_t prio = 0; prio < UDPARD_PRIORITY_COUNT; prio++) { + tx_transfer_t* const candidate = // This pointer arithmetic is ugly and perhaps should be improved + ptr_unbias(self->queue[ifindex][prio].tail, + offsetof(tx_transfer_t, queue) + (sizeof(udpard_listed_t) * ifindex)); + if (candidate != NULL) { + tr = candidate; + break; + } } - } - if (slot == NULL) - { - for (uint_fast8_t i = 0; i < RX_SLOT_COUNT; i++) + if (tr == NULL) { + break; // No pending transfers at the moment. Find something else to do. + } + UDPARD_ASSERT(tr->cursor[ifindex] != NULL); // cannot be pending without payload, doesn't make sense + UDPARD_ASSERT(tr->priority < UDPARD_PRIORITY_COUNT); + + // Eject the frame. + const tx_frame_t* const frame = tr->cursor[ifindex]; + tx_frame_t* const frame_next = frame->next; + const bool last_attempt = !cavl2_is_inserted(self->index_staged, &tr->index_staged); + const bool last_frame = frame_next == NULL; // if not last attempt we will have to rewind to head. { - if (slots[i].transfer_id == transfer_id) - { - slot = &slots[i]; - break; + udpard_tx_ejection_t ejection = { .now = now, + .deadline = tr->deadline, + .iface_index = ifindex, + .dscp = self->dscp_value_per_priority[tr->priority], + .datagram = tx_frame_view(frame), + .user = tr->user }; + const bool ep_valid = udpard_is_valid_endpoint(tr->p2p_destination[ifindex]); + UDPARD_ASSERT((!ep_valid) || ((tr->iface_bitmap & (1U << ifindex)) != 0U)); + const bool ejected = ep_valid ? self->vtable->eject_p2p(self, &ejection, tr->p2p_destination[ifindex]) + : self->vtable->eject_subject(self, &ejection); + if (!ejected) { // The easy case -- no progress was made at this time; + break; // don't change anything, just try again later as-is + } + } + + // Frame ejected successfully. Update the transfer state to get ready for the next frame. + if (last_attempt) { // no need to keep frames that we will no longer use; free early to reduce pressure + UDPARD_ASSERT(tr->head[ifindex] == tr->cursor[ifindex]); + tr->head[ifindex] = frame_next; + udpard_tx_refcount_dec(tx_frame_view(frame)); + } + tr->cursor[ifindex] = frame_next; + + // Finalize the transmission if this was the last frame of the transfer. + if (last_frame) { + tr->cursor[ifindex] = tr->head[ifindex]; + delist(&self->queue[ifindex][tr->priority], &tr->queue[ifindex]); // no longer pending for transmission + UDPARD_ASSERT(!last_attempt || (tr->head[ifindex] == NULL)); // this iface is done with the payload + if (last_attempt && !tr->reliable && !tx_is_pending(self, tr)) { // remove early once all ifaces are done + UDPARD_ASSERT(tr->feedback == NULL); // non-reliable transfers have no feedback callback + tx_transfer_retire(self, tr, true); } } } - return slot; } -/// This function is invoked when a new datagram pertaining to a certain session is received on an interface. -/// This function will either move the frame payload into the session, or free it if it cannot be made use of. -/// Returns: 1 -- transfer available; 0 -- transfer not yet available; <0 -- error. -static int_fast8_t rxIfaceAccept(RxIface* const self, - const UdpardMicrosecond ts_usec, - const RxFrame frame, - const size_t extent, - const UdpardMicrosecond transfer_id_timeout_usec, - const RxMemory memory, - struct UdpardRxTransfer* const out_transfer) -{ - UDPARD_ASSERT((self != NULL) && (frame.base.payload.size > 0) && (out_transfer != NULL)); - RxSlot* slot = rxIfaceFindMatchingSlot(self->slots, frame.meta.transfer_id); - // If there is no suitable slot, we should check if the transfer is a future one (high transfer-ID), - // or a transfer-ID timeout has occurred. In this case we sacrifice the oldest slot. - if (slot == NULL) - { - // The timestamp is UNSET when the slot is waiting for the next transfer. - // Such slots are the best candidates for replacement because reusing them does not cause loss of - // transfers that are in the process of being reassembled. If there are no such slots, we must - // sacrifice the one whose first frame has arrived the longest time ago. - RxSlot* victim = &self->slots[0]; - for (uint_fast8_t i = 0; i < RX_SLOT_COUNT; i++) // Expected to be unrolled by the compiler. - { - if ((self->slots[i].ts_usec == TIMESTAMP_UNSET) || - ((victim->ts_usec != TIMESTAMP_UNSET) && (self->slots[i].ts_usec < victim->ts_usec))) - { - victim = &self->slots[i]; +void udpard_tx_poll(udpard_tx_t* const self, const udpard_us_t now, const uint16_t iface_bitmap) +{ + if ((self != NULL) && (now >= 0)) { // This is the main scheduler state machine update tick. + tx_purge_expired_transfers(self, now); // This may free up some memory and some queue slots. + tx_promote_staged_transfers(self, now); // This may add some new transfers to the queue. + for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + if ((iface_bitmap & (1U << i)) != 0U) { + tx_eject_pending_frames(self, now, i); } } - if (rxIfaceIsFutureTransferID(self, frame.meta.transfer_id) || - rxIfaceCheckTransferIDTimeout(self, ts_usec, transfer_id_timeout_usec)) - { - rxSlotRestart(victim, frame.meta.transfer_id, memory); - slot = victim; - UDPARD_ASSERT(slot != NULL); - } } - // If there is a suitable slot (perhaps a newly created one for this frame), update it. - // If there is neither a suitable slot nor a new one was created, the frame cannot be used. - int_fast8_t result = 0; - if (slot != NULL) - { - if (slot->ts_usec == TIMESTAMP_UNSET) - { - slot->ts_usec = ts_usec; // Transfer timestamp is the timestamp of the earliest frame. - } - const UdpardMicrosecond ts = slot->ts_usec; - UDPARD_ASSERT(slot->transfer_id == frame.meta.transfer_id); - result = rxSlotAccept(slot, // May invalidate state variables such as timestamp or transfer-ID. - &out_transfer->payload_size, - &out_transfer->payload, - frame.base, - extent, - memory); - if (result > 0) // Transfer successfully received, populate the transfer descriptor for the client. - { - self->ts_usec = ts; // Update the last valid transfer timestamp on this iface. - out_transfer->timestamp_usec = ts; - out_transfer->priority = frame.meta.priority; - out_transfer->source_node_id = frame.meta.src_node_id; - out_transfer->transfer_id = frame.meta.transfer_id; +} + +bool udpard_tx_cancel(udpard_tx_t* const self, const uint64_t topic_hash, const uint64_t transfer_id) +{ + bool cancelled = false; + if (self != NULL) { + tx_transfer_t* const tr = tx_transfer_find(self, topic_hash, transfer_id); + if (tr != NULL) { + tx_transfer_retire(self, tr, false); + cancelled = true; } } - else - { - memFreePayload(memory.payload, frame.base.origin); - } - return result; + return cancelled; } -static void rxIfaceInit(RxIface* const self, const RxMemory memory) +size_t udpard_tx_cancel_all(udpard_tx_t* const self, const uint64_t topic_hash) { - UDPARD_ASSERT(self != NULL); - memZero(sizeof(*self), self); - self->ts_usec = TIMESTAMP_UNSET; - for (uint_fast8_t i = 0; i < RX_SLOT_COUNT; i++) - { - self->slots[i].fragments = NULL; - rxSlotRestart(&self->slots[i], TRANSFER_ID_UNSET, memory); + size_t count = 0; + if (self != NULL) { + // Find the first transfer with matching topic_hash using transfer_id=0 as lower bound. + const tx_transfer_key_t key = { .topic_hash = topic_hash, .transfer_id = 0 }; + tx_transfer_t* tr = CAVL2_TO_OWNER( + cavl2_lower_bound(self->index_transfer, &key, &tx_cavl_compare_transfer), tx_transfer_t, index_transfer); + // Iterate through all transfers with the same topic_hash. + while ((tr != NULL) && (tr->topic_hash == topic_hash)) { + tx_transfer_t* const next = + CAVL2_TO_OWNER(cavl2_next_greater(&tr->index_transfer), tx_transfer_t, index_transfer); + tx_transfer_retire(self, tr, false); + count++; + tr = next; + } } + return count; } -/// Frees the iface and all slots in it. The iface instance itself is not freed. -static void rxIfaceFree(RxIface* const self, const RxMemory memory) +uint16_t udpard_tx_pending_ifaces(const udpard_tx_t* const self) { - UDPARD_ASSERT(self != NULL); - for (uint_fast8_t i = 0; i < RX_SLOT_COUNT; i++) - { - rxSlotFree(&self->slots[i], memory); + uint16_t bitmap = 0; + if (self != NULL) { + // Even though it's constant-time, I still mildly dislike this loop. Shall it become a bottleneck, + // we could modify the TX state to keep a bitmap of pending interfaces updated incrementally. + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + for (size_t p = 0; p < UDPARD_PRIORITY_COUNT; p++) { + if (self->queue[i][p].head != NULL) { + bitmap |= (1U << i); + break; + } + } + } } + return bitmap; } -// -------------------------------------------------- RX SESSION -------------------------------------------------- +void udpard_tx_refcount_inc(const udpard_bytes_t tx_payload_view) +{ + if (tx_payload_view.data != NULL) { + tx_frame_t* const frame = tx_frame_from_view(tx_payload_view); + UDPARD_ASSERT(frame->refcount > 0); // NOLINT(*ArrayBound) + // TODO: if C11 is enabled, use stdatomic here + frame->refcount++; + } +} -/// Checks if the given transfer should be accepted. If not, the transfer is freed. -/// Internal states are updated. -static bool rxSessionDeduplicate(struct UdpardInternalRxSession* const self, - const UdpardMicrosecond transfer_id_timeout_usec, - struct UdpardRxTransfer* const transfer, - const RxMemory memory) +void udpard_tx_refcount_dec(const udpard_bytes_t tx_payload_view) { - UDPARD_ASSERT((self != NULL) && (transfer != NULL)); - const bool future_tid = (self->last_transfer_id == TRANSFER_ID_UNSET) || // - (transfer->transfer_id > self->last_transfer_id); - const bool tid_timeout = (self->last_ts_usec == TIMESTAMP_UNSET) || - ((transfer->timestamp_usec >= self->last_ts_usec) && - ((transfer->timestamp_usec - self->last_ts_usec) >= transfer_id_timeout_usec)); - const bool accept = future_tid || tid_timeout; - if (accept) - { - self->last_ts_usec = transfer->timestamp_usec; - self->last_transfer_id = transfer->transfer_id; + if (tx_payload_view.data != NULL) { + tx_frame_t* const frame = tx_frame_from_view(tx_payload_view); + UDPARD_ASSERT(frame->refcount > 0); // NOLINT(*ArrayBound) + // TODO: if C11 is enabled, use stdatomic here + frame->refcount--; + if (frame->refcount == 0U) { + --*frame->objcount; + frame->deleter.vtable->free(frame->deleter.context, sizeof(tx_frame_t) + tx_payload_view.size, frame); + } } - else // This is a duplicate: received from another interface, a FEC retransmission, or a network glitch. - { - memFreePayload(memory.payload, transfer->payload.origin); - rxFragmentDestroyList(transfer->payload.next, memory); - transfer->payload_size = 0; - transfer->payload = (struct UdpardFragment) {.next = NULL, - .view = {.size = 0, .data = NULL}, - .origin = {.size = 0, .data = NULL}}; - } - return accept; -} - -/// Takes ownership of the frame payload buffer. -static int_fast8_t rxSessionAccept(struct UdpardInternalRxSession* const self, - const uint_fast8_t redundant_iface_index, - const UdpardMicrosecond ts_usec, - const RxFrame frame, - const size_t extent, - const UdpardMicrosecond transfer_id_timeout_usec, - const RxMemory memory, - struct UdpardRxTransfer* const out_transfer) -{ - UDPARD_ASSERT((self != NULL) && (redundant_iface_index < UDPARD_NETWORK_INTERFACE_COUNT_MAX) && - (out_transfer != NULL)); - int_fast8_t result = rxIfaceAccept(&self->ifaces[redundant_iface_index], - ts_usec, - frame, - extent, - transfer_id_timeout_usec, - memory, - out_transfer); - UDPARD_ASSERT(result <= 1); - if (result > 0) - { - result = rxSessionDeduplicate(self, transfer_id_timeout_usec, out_transfer, memory) ? 1 : 0; +} + +void udpard_tx_free(udpard_tx_t* const self) +{ + if (self != NULL) { + while (self->index_transfer != NULL) { + tx_transfer_t* tr = CAVL2_TO_OWNER(self->index_transfer, tx_transfer_t, index_transfer); + tx_transfer_retire(self, tr, false); + } } - return result; } -static void rxSessionInit(struct UdpardInternalRxSession* const self, const RxMemory memory) +// --------------------------------------------------------------------------------------------------------------------- +// --------------------------------------------- RX PIPELINE --------------------------------------------- +// --------------------------------------------------------------------------------------------------------------------- +// +// The RX pipeline is a layered solution: PORT -> SESSION -> SLOT -> FRAGMENT TREE. +// +// Ports are created by the application per subject to subscribe to. There are various parameters defined per port, +// such as the extent (max payload size to accept) and the reassembly mode (ORDERED, UNORDERED, STATELESS). +// +// Each port automatically dynamically creates a dedicated session per remote node that publishes on that subject +// (unless the STATELESS mode is used, which is simple and limited). Sessions are automatically cleaned up and +// removed when the remote node ceases to publish for a certain (large) timeout period. +// +// Each session holds RX_SLOT_COUNT slots for concurrent transfers from the same remote node on the same subject; +// concurrent transfers may occur due to spontaneous datagram reordering or when the sender needs to emit a higher- +// priority transfer while a lower-priority transfer is still ongoing (this is why there needs to be at least as many +// slots as there are priority levels). Each slot accepts frames from all redundant network interfaces at once and +// runs an efficient fragment tree reassembler to reconstruct the original transfer payload with automatic deduplication +// and defragmentation; since all interfaces are pooled together, the reassembler is completely insensitive to +// permanent or transient failure of any of the redundant interfaces; as long as at least one of them is able to +// deliver frames, the link will function; further, transient packet loss in one of the interfaces does not affect +// the overall reliability. The message reception machine always operates at the throughput and latency of the +// best-performing interface at any given time with seamless failover. +// +// Each session keeps track of recently received/seen transfers, which is used for ack retransmission +// if the remote end attempts to retransmit a transfer that was already fully received, and is also used for duplicate +// rejection. In the ORDERED mode, late transfers (those arriving out of order past the reordering window closure) +// are never acked, but they may still be received and acked by some other nodes in the network that were able to +// accept them. +// +// Acks are transmitted immediately upon successful reception of a transfer. If the remote end retransmits the transfer +// (e.g., if the first ack was lost or due to a spurious duplication), repeat acks are only retransmitted +// for the first frame of the transfer because we don't want to flood the network with duplicate ACKs for every +// +// The redundant interfaces may have distinct MTUs, so the fragment offsets and sizes may vary significantly. +// The reassembler decides if a newly arrived fragment is needed based on gap/overlap detection in the fragment tree. +// An accepted fragment may overlap with neighboring fragments; however, the reassembler guarantees that no fragment is +// fully contained within another fragment; this also implies that there are no fragments sharing the same offset, +// and that fragments ordered by offset are also ordered by their ends. +// The reassembler prefers to keep fewer large fragments over many small fragments to reduce the overhead of +// managing the fragment tree and the amount of auxiliary memory required for it. +// +// The code here does a lot of linear lookups. This is intentional and is not expected to bring any performance issues +// because all loops are tightly bounded with a compile-time known maximum number of iterations that is very small +// in practice (e.g., number of slots per session, number of priority levels, number of interfaces). For small +// number of iterations this is much faster than more sophisticated lookup structures. + +/// All but the transfer metadata: fields that change from frame to frame within the same transfer. +typedef struct { - UDPARD_ASSERT(self != NULL); - memZero(sizeof(*self), self); - self->remote_node_id = UDPARD_NODE_ID_UNSET; - self->last_ts_usec = TIMESTAMP_UNSET; - self->last_transfer_id = TRANSFER_ID_UNSET; - for (uint_fast8_t i = 0; i < UDPARD_NETWORK_INTERFACE_COUNT_MAX; i++) - { - rxIfaceInit(&self->ifaces[i], memory); + size_t offset; ///< Offset of this fragment's payload within the full transfer payload. + udpard_bytes_t payload; ///< Does not include the header, just pure payload. + udpard_bytes_mut_t origin; ///< The entirety of the free-able buffer passed from the application. + uint32_t crc; ///< CRC of all preceding payload bytes in the transfer plus this fragment's payload. +} rx_frame_base_t; + +/// Full frame state. +typedef struct rx_frame_t +{ + rx_frame_base_t base; + meta_t meta; +} rx_frame_t; + +// --------------------------------------------- FRAGMENT TREE --------------------------------------------- + +/// Finds the number of contiguous payload bytes received from offset zero after accepting a new fragment. +/// The transfer is considered fully received when covered_prefix >= min(extent, transfer_payload_size). +/// This should be invoked after the fragment tree accepted a new fragment at frag_offset with frag_size. +/// The complexity is amortized-logarithmic, worst case is linear in the number of frames in the transfer. +static size_t rx_fragment_tree_update_covered_prefix(udpard_tree_t* const root, + const size_t old_prefix, + const size_t frag_offset, + const size_t frag_size) +{ + const size_t end = frag_offset + frag_size; + if ((frag_offset > old_prefix) || (end <= old_prefix)) { + return old_prefix; // The new fragment does not cross the frontier, so it cannot affect the prefix. + } + udpard_fragment_t* fr = (udpard_fragment_t*)cavl2_predecessor(root, &old_prefix, &cavl_compare_fragment_offset); + UDPARD_ASSERT(fr != NULL); + size_t out = old_prefix; + while ((fr != NULL) && (fr->offset <= out)) { + out = larger(out, fr->offset + fr->view.size); + fr = (udpard_fragment_t*)cavl2_next_greater(&fr->index_offset); } + return out; } -/// Frees all ifaces in the session, all children in the session tree recursively, and destroys the session itself. -/// The maximum recursion depth is ceil(1.44*log2(UDPARD_NODE_ID_MAX+1)-0.328) = 23 levels. -// NOLINTNEXTLINE(*-no-recursion) MISRA C:2012 rule 17.2 -static void rxSessionDestroyTree(struct UdpardInternalRxSession* const self, - const struct UdpardRxMemoryResources memory) +/// If NULL, the payload ownership could not be transferred due to OOM. The caller still owns the payload. +static udpard_fragment_t* rx_fragment_new(const udpard_mem_t memory, + const udpard_deleter_t payload_deleter, + const rx_frame_base_t frame) +{ + udpard_fragment_t* const mew = mem_alloc(memory, sizeof(udpard_fragment_t)); + if (mew != NULL) { + mem_zero(sizeof(*mew), mew); + mew->index_offset = (udpard_tree_t){ NULL, { NULL, NULL }, 0 }; + mew->offset = frame.offset; + mew->view.data = frame.payload.data; + mew->view.size = frame.payload.size; + mew->origin.data = frame.origin.data; + mew->origin.size = frame.origin.size; + mew->payload_deleter = payload_deleter; + } + return mew; +} + +typedef enum +{ + rx_fragment_tree_rejected, ///< The newly received fragment was not needed for the tree and was freed. + rx_fragment_tree_accepted, ///< The newly received fragment was accepted into the tree, possibly replacing another. + rx_fragment_tree_done, ///< The newly received fragment completed the transfer; the caller must extract payload. + rx_fragment_tree_oom, ///< The fragment could not be accepted, but a possible future duplicate may work. +} rx_fragment_tree_update_result_t; + +/// Takes ownership of the frame payload; either a new fragment is inserted or the payload is freed. +static rx_fragment_tree_update_result_t rx_fragment_tree_update(udpard_tree_t** const root, + const udpard_mem_t fragment_memory, + const udpard_deleter_t payload_deleter, + const rx_frame_base_t frame, + const size_t transfer_payload_size, + const size_t extent, + size_t* const covered_prefix_io) +{ + const size_t left = frame.offset; + const size_t right = frame.offset + frame.payload.size; + + // Ignore frames beyond the extent. Zero extent requires special handling because from the reassembler's + // view such transfers are useless, but we still want them. + if (((extent > 0) && (left >= extent)) || ((extent == 0) && (left > extent))) { + mem_free_payload(payload_deleter, frame.origin); + return rx_fragment_tree_rejected; // New fragment is beyond the extent, discard. + } + + // Check if the new fragment is fully contained within an existing fragment, or is an exact replica of one. + // We discard those early to maintain an essential invariant of the fragment tree: no fully-contained fragments. + { + const udpard_fragment_t* const frag = + (udpard_fragment_t*)cavl2_predecessor(*root, &left, &cavl_compare_fragment_offset); + if ((frag != NULL) && ((frag->offset + frag->view.size) >= right)) { + mem_free_payload(payload_deleter, frame.origin); + return rx_fragment_tree_rejected; // New fragment is fully contained within an existing one, discard. + } + } + + // Find the left and right neighbors, if any, with possible (likely) overlap. Consider new fragment X with A, B, C: + // |----X----| + // |--A--| + // |--B--| + // |--C--| + // Here, only A is the left neighbor, and only C is the right neighbor. B is a victim. + // If A.right >= C.left, then there is neither a gap nor a victim to remove. + // + // To find the left neighbor, we need to find the fragment crossing the left boundary whose offset is the smallest. + // To do that, we simply need to find the fragment with the smallest right boundary that is on the right of our + // left boundary. This works because by construction we guarantee that our tree has no fully-contained fragments, + // implying that ordering by left is also ordering by right. + // + // The right neighbor is found by analogy: find the fragment with the largest left boundary that is on the left + // of our right boundary. This guarantees that the new virtual right boundary will max out to the right. + const udpard_fragment_t* n_left = (udpard_fragment_t*)cavl2_lower_bound(*root, &left, &cavl_compare_fragment_end); + if ((n_left != NULL) && (n_left->offset >= left)) { + n_left = NULL; // There is no left neighbor. + } + const udpard_fragment_t* n_right = + (udpard_fragment_t*)cavl2_predecessor(*root, &right, &cavl_compare_fragment_offset); + if ((n_right != NULL) && ((n_right->offset + n_right->view.size) <= right)) { + n_right = NULL; // There is no right neighbor. + } + const size_t n_left_size = (n_left != NULL) ? n_left->view.size : 0U; + const size_t n_right_size = (n_right != NULL) ? n_right->view.size : 0U; + + // Simple acceptance heuristic -- if the new fragment adds new payload, allows to eliminate a smaller fragment, + // or is larger than either neighbor, we accept it. The 'larger' condition is intended to allow + // eventual replacement of many small fragments with fewer large fragments. + // Consider the following scenario: + // |--A--|--B--|--C--|--D--| <-- small MTU set + // |---X---|---Y---|---Z---| <-- large MTU set + // Suppose we already have A..D received. Arrival of either X or Z allows eviction of A/D immediately. + // Arrival of Y does not allow an immediate eviction of any fragment, but if we had rejected it because it added + // no new coverage, we would miss the opportunity to evict B/C when X or Z arrive later. By this logic alone, + // we would also have to accept B and C if they were to arrive after X/Y/Z, which is however unnecessary because + // these fragments add no new information AND are smaller than the existing fragments, meaning that they offer + // no prospect of eventual defragmentation, so we reject them immediately. + const bool accept = (n_left == NULL) || (n_right == NULL) || + ((n_left->offset + n_left->view.size) < n_right->offset) || + (frame.payload.size > smaller(n_left_size, n_right_size)); + if (!accept) { + mem_free_payload(payload_deleter, frame.origin); + return rx_fragment_tree_rejected; // New fragment is not expected to be useful. + } + + // Ensure we can allocate the fragment header for the new frame before pruning the tree to avoid data loss. + udpard_fragment_t* const mew = rx_fragment_new(fragment_memory, payload_deleter, frame); + if (mew == NULL) { + mem_free_payload(payload_deleter, frame.origin); + return rx_fragment_tree_oom; // Cannot allocate fragment header. Maybe we will succeed later. + } + + // The addition of a new fragment that joins adjacent fragments together into a larger contiguous block may + // render smaller fragments crossing its boundaries redundant. + // To check for that, we create a new virtual fragment that represents the new fragment together with those + // that join it on either end, if any, and then look for fragments contained within the virtual one. + // The virtual boundaries are adjusted by 1 to ensure that the neighbors themselves are not marked for eviction. + // Example: + // |--A--|--B--| + // |--X--| + // The addition of fragment A or B will render X redundant, even though it is not contained within either. + // This algorithm will detect that and mark X for removal. + const size_t v_left = smaller(left, (n_left == NULL) ? SIZE_MAX : (n_left->offset + 1U)); + const size_t v_right = + larger(right, (n_right == NULL) ? 0 : (larger(n_right->offset + n_right->view.size, 1U) - 1U)); + UDPARD_ASSERT((v_left <= left) && (right <= v_right)); + + // Remove all redundant fragments before inserting the new one. + // No need to repeat tree lookup at every iteration, we just step through the nodes using the next_greater lookup. + udpard_fragment_t* victim = (udpard_fragment_t*)cavl2_lower_bound(*root, &v_left, &cavl_compare_fragment_offset); + while ((victim != NULL) && (victim->offset >= v_left) && ((victim->offset + victim->view.size) <= v_right)) { + udpard_fragment_t* const next = (udpard_fragment_t*)cavl2_next_greater(&victim->index_offset); + cavl2_remove(root, &victim->index_offset); + mem_free_payload(victim->payload_deleter, victim->origin); + mem_free(fragment_memory, sizeof(udpard_fragment_t), victim); + victim = next; + } + // Insert the new fragment. + const udpard_tree_t* const res = cavl2_find_or_insert(root, // + &mew->offset, + &cavl_compare_fragment_offset, + &mew->index_offset, + &cavl2_trivial_factory); + UDPARD_ASSERT(res == &mew->index_offset); + (void)res; + // Update the covered prefix. This requires only a single full scan across all iterations! + *covered_prefix_io = rx_fragment_tree_update_covered_prefix(*root, // + *covered_prefix_io, + frame.offset, + frame.payload.size); + return (*covered_prefix_io >= smaller(extent, transfer_payload_size)) ? rx_fragment_tree_done + : rx_fragment_tree_accepted; +} + +/// 1. Eliminates payload overlaps. They may appear if redundant interfaces with different MTU settings are used. +/// 2. Verifies the end-to-end CRC of the full reassembled payload. +/// Returns true iff the transfer is valid and safe to deliver to the application. +/// Observe that this function alters the tree ordering keys, but it does not alter the tree topology, +/// because each fragment's offset is changed within the bounds that preserve the ordering. +static bool rx_fragment_tree_finalize(udpard_tree_t* const root, const uint32_t crc_expected) +{ + uint32_t crc_computed = CRC_INITIAL; + size_t offset = 0; + for (udpard_tree_t* p = cavl2_min(root); p != NULL; p = cavl2_next_greater(p)) { + udpard_fragment_t* const frag = (udpard_fragment_t*)p; + UDPARD_ASSERT(frag->offset <= offset); // The tree reassembler cannot leave gaps. + const size_t trim = offset - frag->offset; + // The tree reassembler evicts redundant fragments, so there must be some payload, unless the transfer is empty. + UDPARD_ASSERT((trim < frag->view.size) || ((frag->view.size == 0) && (trim == 0) && (offset == 0))); + frag->offset += trim; + frag->view.data = (const byte_t*)frag->view.data + trim; + frag->view.size -= trim; + offset += frag->view.size; + crc_computed = crc_add(crc_computed, frag->view.size, frag->view.data); + } + return (crc_computed ^ CRC_OUTPUT_XOR) == crc_expected; +} + +// --------------------------------------------- SLOT --------------------------------------------- + +typedef enum +{ + rx_slot_idle = 0, + rx_slot_busy = 1, + rx_slot_done = 2, +} rx_slot_state_t; + +/// Frames from all redundant interfaces are pooled into the same reassembly slot per transfer-ID. +/// The redundant interfaces may use distinct MTU, which requires special fragment tree handling. +typedef struct { - if (self != NULL) - { - for (uint_fast8_t i = 0; i < UDPARD_NETWORK_INTERFACE_COUNT_MAX; i++) - { - rxIfaceFree(&self->ifaces[i], (RxMemory) {.fragment = memory.fragment, .payload = memory.payload}); + rx_slot_state_t state; + + uint64_t transfer_id; ///< Which transfer we're reassembling here. + + udpard_us_t ts_min; ///< Earliest frame timestamp, aka transfer reception timestamp. + udpard_us_t ts_max; ///< Latest frame timestamp, aka transfer completion timestamp. + + size_t covered_prefix; ///< Number of bytes received contiguously from offset zero. + size_t total_size; ///< The total size of the transfer payload being transmitted (we may only use part of it). + + size_t crc_end; ///< The end offset of the frame whose CRC is stored in `crc`. + uint32_t crc; ///< Once the reassembly is done, holds the CRC of the entire transfer. + + udpard_prio_t priority; + + udpard_tree_t* fragments; +} rx_slot_t; + +static void rx_slot_reset(rx_slot_t* const slot, const udpard_mem_t fragment_memory) +{ + udpard_fragment_free_all((udpard_fragment_t*)slot->fragments, udpard_make_deleter(fragment_memory)); + slot->fragments = NULL; + slot->state = rx_slot_idle; + slot->covered_prefix = 0U; + slot->crc_end = 0U; + slot->crc = CRC_INITIAL; +} + +/// The caller will accept the ownership of the fragments iff the resulting state is done. +static void rx_slot_update(rx_slot_t* const slot, + const udpard_us_t ts, + const udpard_mem_t fragment_memory, + const udpard_deleter_t payload_deleter, + rx_frame_t* const frame, + const size_t extent, + uint64_t* const errors_oom, + uint64_t* const errors_transfer_malformed) +{ + if (slot->state != rx_slot_busy) { + rx_slot_reset(slot, fragment_memory); + slot->state = rx_slot_busy; + slot->transfer_id = frame->meta.transfer_id; + slot->ts_min = ts; + slot->ts_max = ts; + // Some metadata is only needed to pass it over to the application once the transfer is done. + slot->total_size = frame->meta.transfer_payload_size; + slot->priority = frame->meta.priority; + } + // Enforce consistent per-frame values throughout the transfer. + if ((slot->total_size != frame->meta.transfer_payload_size) || (slot->priority != frame->meta.priority)) { + ++*errors_transfer_malformed; + mem_free_payload(payload_deleter, frame->base.origin); + rx_slot_reset(slot, fragment_memory); + return; + } + const rx_fragment_tree_update_result_t tree_res = rx_fragment_tree_update(&slot->fragments, + fragment_memory, + payload_deleter, + frame->base, + frame->meta.transfer_payload_size, + extent, + &slot->covered_prefix); + if ((tree_res == rx_fragment_tree_accepted) || (tree_res == rx_fragment_tree_done)) { + slot->ts_max = later(slot->ts_max, ts); + slot->ts_min = earlier(slot->ts_min, ts); + const size_t crc_end = frame->base.offset + frame->base.payload.size; + if (crc_end >= slot->crc_end) { + slot->crc_end = crc_end; + slot->crc = frame->base.crc; } - for (uint_fast8_t i = 0; i < 2; i++) - { - struct UdpardInternalRxSession* const child = (struct UdpardInternalRxSession*) (void*) self->base.lr[i]; - if (child != NULL) - { - UDPARD_ASSERT(child->base.up == &self->base); - rxSessionDestroyTree(child, memory); // NOSONAR recursion - } + } + if (tree_res == rx_fragment_tree_oom) { + ++*errors_oom; + } + if (tree_res == rx_fragment_tree_done) { + if (rx_fragment_tree_finalize(slot->fragments, slot->crc)) { + slot->state = rx_slot_done; // The caller will handle the completed transfer. + } else { + ++*errors_transfer_malformed; + rx_slot_reset(slot, fragment_memory); } - memFree(memory.session, sizeof(struct UdpardInternalRxSession), self); } } -// -------------------------------------------------- RX PORT -------------------------------------------------- +// --------------------------------------------- SESSION & PORT --------------------------------------------- -typedef struct +/// The number of times `from` must be incremented (modulo 2^64) to reach `to`. +static uint64_t rx_transfer_id_forward_distance(const uint64_t from, const uint64_t to) { return to - from; } + +/// Keep in mind that we have a dedicated session object per remote node per port; this means that the states +/// kept here are specific per remote node, as it should be. +typedef struct rx_session_t { - UdpardNodeID remote_node_id; - struct UdpardRxMemoryResources memory; -} RxPortSessionSearchContext; + udpard_tree_t index_remote_uid; ///< Must be the first member. + udpard_remote_t remote; ///< Most recent discovered reverse path for P2P to the sender. + + udpard_rx_port_t* port; + + /// Sessions interned for the reordering window closure. + udpard_tree_t index_reordering_window; + udpard_us_t reordering_window_deadline; + + /// LRU last animated list for automatic retirement of stale sessions. + udpard_listed_t list_by_animation; + udpard_us_t last_animated_ts; -static int_fast8_t rxPortSessionSearch(void* const user_reference, // NOSONAR non-const API - const struct UdpardTreeNode* node) + /// Most recently received transfer-IDs, used for duplicate detection and ACK retransmission. + /// The index is always in [0,RX_TRANSFER_HISTORY_COUNT), pointing to the last added (newest) entry. + uint64_t history[RX_TRANSFER_HISTORY_COUNT]; + uint_fast8_t history_current; + + bool initialized; ///< Set after the first frame is seen. + + rx_slot_t slots[RX_SLOT_COUNT]; +} rx_session_t; + +/// The reassembly strategy is composed once at initialization time by choosing a vtable with the desired behavior. +typedef struct udpard_rx_port_vtable_private_t { - UDPARD_ASSERT((user_reference != NULL) && (node != NULL)); - return compare32(((const RxPortSessionSearchContext*) user_reference)->remote_node_id, - ((const struct UdpardInternalRxSession*) (const void*) node)->remote_node_id); -} + /// Takes ownership of the frame payload. + void (*accept)(udpard_rx_t*, + udpard_rx_port_t*, + udpard_us_t, + udpard_udpip_ep_t, + rx_frame_t*, + udpard_deleter_t, + uint_fast8_t); + /// Takes ownership of the frame payload. + void (*update_session)(rx_session_t*, udpard_rx_t*, udpard_us_t, rx_frame_t*, udpard_deleter_t); +} udpard_rx_port_vtable_private_t; -static struct UdpardTreeNode* rxPortSessionFactory(void* const user_reference) // NOSONAR non-const API +/// True iff the given transfer-ID was recently ejected. +static bool rx_session_is_transfer_ejected(const rx_session_t* const self, const uint64_t transfer_id) { - const RxPortSessionSearchContext* const ctx = (const RxPortSessionSearchContext*) user_reference; - UDPARD_ASSERT((ctx != NULL) && (ctx->remote_node_id <= UDPARD_NODE_ID_MAX)); - struct UdpardTreeNode* out = NULL; - struct UdpardInternalRxSession* const session = - memAlloc(ctx->memory.session, sizeof(struct UdpardInternalRxSession)); - if (session != NULL) - { - rxSessionInit(session, (RxMemory) {.payload = ctx->memory.payload, .fragment = ctx->memory.fragment}); - session->remote_node_id = ctx->remote_node_id; - out = &session->base; - } - return out; // OOM handled by the caller -} - -/// Accepts a frame into a port, possibly creating a new session along the way. -/// The frame shall not be anonymous. Takes ownership of the frame payload buffer. -static int_fast8_t rxPortAccept(struct UdpardRxPort* const self, - const uint_fast8_t redundant_iface_index, - const UdpardMicrosecond ts_usec, - const RxFrame frame, - const struct UdpardRxMemoryResources memory, - struct UdpardRxTransfer* const out_transfer) -{ - UDPARD_ASSERT((self != NULL) && (redundant_iface_index < UDPARD_NETWORK_INTERFACE_COUNT_MAX) && - (out_transfer != NULL) && (frame.meta.src_node_id != UDPARD_NODE_ID_UNSET)); - int_fast8_t result = 0; - struct UdpardInternalRxSession* const session = (struct UdpardInternalRxSession*) (void*) - cavlSearch((struct UdpardTreeNode**) &self->sessions, - &(RxPortSessionSearchContext) {.remote_node_id = frame.meta.src_node_id, .memory = memory}, - &rxPortSessionSearch, - &rxPortSessionFactory); - if (session != NULL) - { - UDPARD_ASSERT(session->remote_node_id == frame.meta.src_node_id); - result = rxSessionAccept(session, // The callee takes ownership of the memory. - redundant_iface_index, - ts_usec, - frame, - self->extent, - self->transfer_id_timeout_usec, - (RxMemory) {.payload = memory.payload, .fragment = memory.fragment}, - out_transfer); - } - else // Failed to allocate a new session. - { - result = -UDPARD_ERROR_MEMORY; - memFreePayload(memory.payload, frame.base.origin); + for (size_t i = 0; i < RX_TRANSFER_HISTORY_COUNT; i++) { // dear compiler, please unroll this loop + if (transfer_id == self->history[i]) { + return true; + } } - return result; + return false; } -/// A special case of rxPortAccept() for anonymous transfers. Accepts all transfers unconditionally. -/// Does not allocate new memory. Takes ownership of the frame payload buffer. -static int_fast8_t rxPortAcceptAnonymous(const UdpardMicrosecond ts_usec, - const RxFrame frame, - const struct UdpardMemoryDeleter memory, - struct UdpardRxTransfer* const out_transfer) +/// True iff the given transfer-ID is shortly before one of the recently ejected ones or equals one. +/// In the ORDERED mode, this indicates that the transfer is late and can no longer be ejected. +static bool rx_session_is_transfer_late_or_ejected(const rx_session_t* const self, const uint64_t transfer_id) { - UDPARD_ASSERT((out_transfer != NULL) && (frame.meta.src_node_id == UDPARD_NODE_ID_UNSET)); - int_fast8_t result = 0; - const bool size_ok = frame.base.payload.size >= TRANSFER_CRC_SIZE_BYTES; - const bool crc_ok = - transferCRCCompute(frame.base.payload.size, frame.base.payload.data) == TRANSFER_CRC_RESIDUE_AFTER_OUTPUT_XOR; - if (size_ok && crc_ok) - { - result = 1; - memZero(sizeof(*out_transfer), out_transfer); - // Copy relevant metadata from the frame. Remember that anonymous transfers are always single-frame. - out_transfer->timestamp_usec = ts_usec; - out_transfer->priority = frame.meta.priority; - out_transfer->source_node_id = frame.meta.src_node_id; - out_transfer->transfer_id = frame.meta.transfer_id; - // Manually set up the transfer payload to point to the relevant slice inside the frame payload. - out_transfer->payload.next = NULL; - out_transfer->payload.view.size = frame.base.payload.size - TRANSFER_CRC_SIZE_BYTES; - out_transfer->payload.view.data = frame.base.payload.data; - out_transfer->payload.origin = frame.base.origin; - out_transfer->payload_size = out_transfer->payload.view.size; - } - else - { - memFreePayload(memory, frame.base.origin); + for (size_t i = 0; i < RX_TRANSFER_HISTORY_COUNT; i++) { + if (rx_transfer_id_forward_distance(transfer_id, self->history[i]) < RX_TRANSFER_ORDERING_WINDOW) { + return true; + } } - return result; + return false; } -/// Accepts a raw frame and, if valid, passes it on to rxPortAccept() for further processing. -/// Takes ownership of the frame payload buffer. -static int_fast8_t rxPortAcceptFrame(struct UdpardRxPort* const self, - const uint_fast8_t redundant_iface_index, - const UdpardMicrosecond ts_usec, - const struct UdpardMutablePayload datagram_payload, - const struct UdpardRxMemoryResources memory, - struct UdpardRxTransfer* const out_transfer) +/// True iff the transfer is already received but is not yet ejected to maintain ordering. Only useful for ORDERED mode. +static bool rx_session_is_transfer_interned(const rx_session_t* const self, const uint64_t transfer_id) { - int_fast8_t result = 0; - RxFrame frame = {0}; - if (rxParseFrame(datagram_payload, &frame)) - { - if (frame.meta.src_node_id != UDPARD_NODE_ID_UNSET) - { - result = rxPortAccept(self, redundant_iface_index, ts_usec, frame, memory, out_transfer); - } - else - { - result = rxPortAcceptAnonymous(ts_usec, frame, memory.payload, out_transfer); + for (size_t i = 0; i < RX_SLOT_COUNT; i++) { + if ((self->slots[i].state == rx_slot_done) && (self->slots[i].transfer_id == transfer_id)) { + return true; } } - else // Malformed datagram or unsupported header version, drop. - { - memFreePayload(memory.payload, datagram_payload); - } - return result; + return false; } -static void rxPortInit(struct UdpardRxPort* const self) +static int32_t cavl_compare_rx_session_by_remote_uid(const void* const user, const udpard_tree_t* const node) { - memZero(sizeof(*self), self); - self->extent = SIZE_MAX; // Unlimited extent by default. - self->transfer_id_timeout_usec = UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC; - self->sessions = NULL; + const uint64_t uid_a = *(const uint64_t*)user; + const uint64_t uid_b = ((const rx_session_t*)(const void*)node)->remote.uid; // clang-format off + if (uid_a < uid_b) { return -1; } + if (uid_a > uid_b) { return +1; } + return 0; // clang-format on } -static void rxPortFree(struct UdpardRxPort* const self, const struct UdpardRxMemoryResources memory) +// Key for reordering deadline ordering with stable tiebreaking. +typedef struct { - rxSessionDestroyTree(self->sessions, memory); - self->sessions = NULL; -} + udpard_us_t deadline; + uint64_t remote_uid; +} rx_reordering_key_t; -static int_fast8_t rxRPCSearch(void* const user_reference, // NOSONAR Cavl API requires non-const. - const struct UdpardTreeNode* node) +// Compare sessions by reordering deadline then by remote UID. +static int32_t cavl_compare_rx_session_by_reordering_deadline(const void* const user, const udpard_tree_t* const node) { - UDPARD_ASSERT((user_reference != NULL) && (node != NULL)); - return compare32(((const struct UdpardRxRPCPort*) user_reference)->service_id, - ((const struct UdpardRxRPCPort*) (const void*) node)->service_id); + const rx_reordering_key_t* const key = (const rx_reordering_key_t*)user; + const rx_session_t* const ses = CAVL2_TO_OWNER(node, rx_session_t, index_reordering_window); // clang-format off + if (key->deadline < ses->reordering_window_deadline) { return -1; } + if (key->deadline > ses->reordering_window_deadline) { return +1; } + if (key->remote_uid < ses->remote.uid) { return -1; } + if (key->remote_uid > ses->remote.uid) { return +1; } + return 0; // clang-format on } -static int_fast8_t rxRPCSearchByServiceID(void* const user_reference, // NOSONAR Cavl API requires non-const. - const struct UdpardTreeNode* node) +typedef struct { - UDPARD_ASSERT((user_reference != NULL) && (node != NULL)); - return compare32(*(const UdpardPortID*) user_reference, - ((const struct UdpardRxRPCPort*) (const void*) node)->service_id); + udpard_rx_port_t* owner; + udpard_list_t* sessions_by_animation; + uint64_t remote_uid; + udpard_us_t now; +} rx_session_factory_args_t; + +static udpard_tree_t* cavl_factory_rx_session_by_remote_uid(void* const user) +{ + const rx_session_factory_args_t* const args = (const rx_session_factory_args_t*)user; + rx_session_t* const out = mem_alloc(args->owner->memory.session, sizeof(rx_session_t)); + if (out != NULL) { + mem_zero(sizeof(*out), out); + out->index_remote_uid = (udpard_tree_t){ NULL, { NULL, NULL }, 0 }; + out->index_reordering_window = (udpard_tree_t){ NULL, { NULL, NULL }, 0 }; + out->reordering_window_deadline = BIG_BANG; + out->list_by_animation = (udpard_listed_t){ NULL, NULL }; + for (size_t i = 0; i < RX_SLOT_COUNT; i++) { + out->slots[i].fragments = NULL; + rx_slot_reset(&out->slots[i], args->owner->memory.fragment); + } + out->remote.uid = args->remote_uid; + out->port = args->owner; + out->last_animated_ts = args->now; + out->history_current = 0; + out->initialized = false; + enlist_head(args->sessions_by_animation, &out->list_by_animation); + } + return (udpard_tree_t*)out; } -// -------------------------------------------------- RX API -------------------------------------------------- - -void udpardRxFragmentFree(const struct UdpardFragment head, - const struct UdpardMemoryResource memory_fragment, - const struct UdpardMemoryDeleter memory_payload) +/// Removes the instance from all indexes and frees all associated memory. +static void rx_session_free(rx_session_t* const self, + udpard_list_t* const sessions_by_animation, + udpard_tree_t** const sessions_by_reordering) { - // The head is not heap-allocated so not freed. - memFreePayload(memory_payload, head.origin); // May be NULL, is okay. - rxFragmentDestroyList(head.next, (RxMemory) {.fragment = memory_fragment, .payload = memory_payload}); + for (size_t i = 0; i < RX_SLOT_COUNT; i++) { + rx_slot_reset(&self->slots[i], self->port->memory.fragment); + } + cavl2_remove(&self->port->index_session_by_remote_uid, &self->index_remote_uid); + (void)cavl2_remove_if(sessions_by_reordering, &self->index_reordering_window); + delist(sessions_by_animation, &self->list_by_animation); + mem_free(self->port->memory.session, sizeof(rx_session_t), self); } -int_fast8_t udpardRxSubscriptionInit(struct UdpardRxSubscription* const self, - const UdpardPortID subject_id, - const size_t extent, - const struct UdpardRxMemoryResources memory) +/// The payload ownership is transferred to the application. The history log and the window will be updated. +static void rx_session_eject(rx_session_t* const self, udpard_rx_t* const rx, rx_slot_t* const slot) { - int_fast8_t result = -UDPARD_ERROR_ARGUMENT; - if ((self != NULL) && (subject_id <= UDPARD_SUBJECT_ID_MAX) && rxValidateMemoryResources(memory)) - { - memZero(sizeof(*self), self); - rxPortInit(&self->port); - self->port.extent = extent; - self->udp_ip_endpoint = makeSubjectUDPIPEndpoint(subject_id); - self->memory = memory; - result = 0; + UDPARD_ASSERT(slot->state == rx_slot_done); + + // Update the history -- overwrite the oldest entry. + self->history_current = (self->history_current + 1U) % RX_TRANSFER_HISTORY_COUNT; + self->history[self->history_current] = slot->transfer_id; + + // Construct the arguments and invoke the callback. + const udpard_rx_transfer_t transfer = { + .timestamp = slot->ts_min, + .priority = slot->priority, + .transfer_id = slot->transfer_id, + .remote = self->remote, + .payload_size_stored = slot->covered_prefix, + .payload_size_wire = slot->total_size, + .payload = (udpard_fragment_t*)slot->fragments, + }; + self->port->vtable->on_message(rx, self->port, transfer); + + // Finally, reset the slot. + slot->fragments = NULL; // Transfer ownership to the application. + rx_slot_reset(slot, self->port->memory.fragment); +} + +/// In the ORDERED mode, checks which slots can be ejected or interned in the reordering window. +/// This is only useful for the ORDERED mode. This mode is much more complex and CPU-heavy than the UNORDERED mode. +/// Should be invoked whenever a slot MAY or MUST be ejected (i.e., on completion or when an empty slot is required). +/// If the force flag is set, at least one DONE slot will be ejected even if its reordering window is still open; +/// this is used to forcibly free up at least one slot when no slot is idle and a new transfer arrives. +static void rx_session_ordered_scan_slots(rx_session_t* const self, + udpard_rx_t* const rx, + const udpard_us_t ts, + const bool force_one) +{ + // Reset the reordering window timer because we will either eject everything or arm it again later. + if (cavl2_remove_if(&rx->index_session_by_reordering, &self->index_reordering_window)) { + self->reordering_window_deadline = BIG_BANG; + } + // We need to repeat the scan because each ejection may open up the window for the next in-sequence transfer. + for (size_t iter = 0; iter < RX_SLOT_COUNT; iter++) { + // Find the slot closest to the next in-sequence transfer-ID. + const uint64_t tid_expected = self->history[self->history_current] + 1U; + uint64_t min_tid_dist = UINT64_MAX; + rx_slot_t* slot = NULL; + for (size_t i = 0; i < RX_SLOT_COUNT; i++) { + const uint64_t dist = rx_transfer_id_forward_distance(tid_expected, self->slots[i].transfer_id); + if ((self->slots[i].state == rx_slot_done) && (dist < min_tid_dist)) { + min_tid_dist = dist; + slot = &self->slots[i]; + if (dist == 0) { + break; // Fast path for a common case. + } + } + } + // The slot needs to be ejected if it's in-sequence, if it's reordering window is closed, or if we're + // asked to force an ejection and we haven't done so yet. + // The reordering window timeout implies that earlier transfers will be dropped if ORDERED mode is used. + const bool eject = + (slot != NULL) && ((slot->transfer_id == tid_expected) || + (ts >= (slot->ts_min + self->port->reordering_window)) || (force_one && (iter == 0))); + if (!eject) { + // The slot is done but cannot be ejected yet; arm the reordering window timer. + // There may be transfers with future (more distant) transfer-IDs with an earlier reordering window + // closure deadline, but we ignore them because the nearest transfer overrides the more distant ones. + if (slot != NULL) { + self->reordering_window_deadline = slot->ts_min + self->port->reordering_window; + // Insert into reordering index with deterministic tie-breaking. + const rx_reordering_key_t key = { .deadline = self->reordering_window_deadline, + .remote_uid = self->remote.uid }; + const udpard_tree_t* res = cavl2_find_or_insert(&rx->index_session_by_reordering, //---------------- + &key, + &cavl_compare_rx_session_by_reordering_deadline, + &self->index_reordering_window, + &cavl2_trivial_factory); + UDPARD_ASSERT(res == &self->index_reordering_window); + (void)res; + } + break; // No more slots can be ejected at this time. + } + // We always pick the next transfer to eject with the nearest transfer-ID, which guarantees that the other + // DONE transfers will not end up being late. + // Some of the in-progress slots may be obsoleted by this move, which will be taken care of later. + UDPARD_ASSERT((slot != NULL) && (slot->state == rx_slot_done)); + rx_session_eject(self, rx, slot); + } + // Ensure that in-progress slots, if any, have not ended up within the accepted window after the update. + // We can release them early to avoid holding the payload buffers that won't be used anyway. + for (size_t i = 0; i < RX_SLOT_COUNT; i++) { + rx_slot_t* const slot = &self->slots[i]; + if ((slot->state == rx_slot_busy) && rx_session_is_transfer_late_or_ejected(self, slot->transfer_id)) { + rx_slot_reset(slot, self->port->memory.fragment); + } } - return result; } -void udpardRxSubscriptionFree(struct UdpardRxSubscription* const self) +/// Finds an existing in-progress slot with the specified transfer-ID, or allocates a new one. +/// Allocation always succeeds so the result is never NULL, but it may cause early ejection of an interned DONE slot. +/// THIS IS POTENTIALLY DESTRUCTIVE IN THE ORDERED MODE because it may force an early reordering window closure. +static rx_slot_t* rx_session_get_slot(rx_session_t* const self, + udpard_rx_t* const rx, + const udpard_us_t ts, + const uint64_t transfer_id) { - if (self != NULL) - { - rxPortFree(&self->port, self->memory); + // First, check if one is in progress already; resume it if so. + for (size_t i = 0; i < RX_SLOT_COUNT; i++) { + if ((self->slots[i].state == rx_slot_busy) && (self->slots[i].transfer_id == transfer_id)) { + return &self->slots[i]; + } + } + // Use this opportunity to check for timed-out in-progress slots. This may free up a slot for the search below. + for (size_t i = 0; i < RX_SLOT_COUNT; i++) { + if ((self->slots[i].state == rx_slot_busy) && (ts >= (self->slots[i].ts_max + SESSION_LIFETIME))) { + rx_slot_reset(&self->slots[i], self->port->memory.fragment); + } } + // This appears to be a new transfer, so we will need to allocate a new slot for it. + for (size_t i = 0; i < RX_SLOT_COUNT; i++) { + if (self->slots[i].state == rx_slot_idle) { + return &self->slots[i]; + } + } + // All slots are currently occupied; find the oldest slot to sacrifice, which may be busy or done. + rx_slot_t* slot = NULL; + udpard_us_t oldest_ts = HEAT_DEATH; + for (size_t i = 0; i < RX_SLOT_COUNT; i++) { + UDPARD_ASSERT(self->slots[i].state != rx_slot_idle); // Checked this already. + if (self->slots[i].ts_max < oldest_ts) { + oldest_ts = self->slots[i].ts_max; + slot = &self->slots[i]; + } + } + UDPARD_ASSERT((slot != NULL) && ((slot->state == rx_slot_busy) || (slot->state == rx_slot_done))); + // If it's busy, it is probably just a stale transfer, so it's a no-brainer to evict it. + // If it's done, we have to force the reordering window to close early to free up a slot without transfer loss. + if (slot->state == rx_slot_busy) { + rx_slot_reset(slot, self->port->memory.fragment); // Just a stale transfer, it's probably dead anyway. + } else { + UDPARD_ASSERT(slot->state == rx_slot_done); + // The oldest slot is DONE; we cannot just reset it, we must force an early ejection. + // The slot to eject will be chosen based on the transfer-ID, which may not be the oldest slot. + // Then we repeat the search looking for any IDLE slot, which must succeed now. + rx_session_ordered_scan_slots(self, rx, ts, true); // A slot will be ejected (we don't know which one). + slot = NULL; + for (size_t i = 0; i < RX_SLOT_COUNT; i++) { + if (self->slots[i].state == rx_slot_idle) { + slot = &self->slots[i]; + break; + } + } + } + UDPARD_ASSERT((slot != NULL) && (slot->state == rx_slot_idle)); + return slot; } -int_fast8_t udpardRxSubscriptionReceive(struct UdpardRxSubscription* const self, - const UdpardMicrosecond timestamp_usec, - const struct UdpardMutablePayload datagram_payload, - const uint_fast8_t redundant_iface_index, - struct UdpardRxTransfer* const out_transfer) -{ - int_fast8_t result = -UDPARD_ERROR_ARGUMENT; - if ((self != NULL) && (timestamp_usec != TIMESTAMP_UNSET) && (datagram_payload.data != NULL) && - (redundant_iface_index < UDPARD_NETWORK_INTERFACE_COUNT_MAX) && (out_transfer != NULL)) - { - result = rxPortAcceptFrame(&self->port, - redundant_iface_index, - timestamp_usec, - datagram_payload, - self->memory, - out_transfer); - } - else if (self != NULL) - { - memFreePayload(self->memory.payload, datagram_payload); +static void rx_session_update(rx_session_t* const self, + udpard_rx_t* const rx, + const udpard_us_t ts, + const udpard_udpip_ep_t src_ep, + rx_frame_t* const frame, + const udpard_deleter_t payload_deleter, + const uint_fast8_t ifindex) +{ + UDPARD_ASSERT(self->remote.uid == frame->meta.sender_uid); + UDPARD_ASSERT(frame->meta.topic_hash == self->port->topic_hash); // must be checked by the caller beforehand + + // Animate the session to prevent it from being retired. + enlist_head(&rx->list_session_by_animation, &self->list_by_animation); + self->last_animated_ts = ts; + + // Update the return path discovery state. + // We identify nodes by their UID, allowing them to migrate across interfaces and IP addresses. + UDPARD_ASSERT(ifindex < UDPARD_IFACE_COUNT_MAX); + self->remote.endpoints[ifindex] = src_ep; + + // Do-once initialization to ensure we don't lose any transfers by choosing the initial transfer-ID poorly. + // Any transfers with prior transfer-ID values arriving later will be rejected, which is acceptable. + if (!self->initialized) { + self->initialized = true; + self->history_current = 0; + for (size_t i = 0; i < RX_TRANSFER_HISTORY_COUNT; i++) { + self->history[i] = frame->meta.transfer_id - 1U; + } } - else - { - (void) 0; + self->port->vtable_private->update_session(self, rx, ts, frame, payload_deleter); +} + +/// The ORDERED mode implementation. May delay incoming transfers to maintain strict transfer-ID ordering. +/// The ORDERED mode is much more complex and CPU-heavy. +static void rx_session_update_ordered(rx_session_t* const self, + udpard_rx_t* const rx, + const udpard_us_t ts, + rx_frame_t* const frame, + const udpard_deleter_t payload_deleter) +{ + // The queries here may be a bit time-consuming. If this becomes a problem, there are many ways to optimize this. + const bool is_ejected = rx_session_is_transfer_ejected(self, frame->meta.transfer_id); + const bool is_late_or_ejected = rx_session_is_transfer_late_or_ejected(self, frame->meta.transfer_id); + const bool is_interned = rx_session_is_transfer_interned(self, frame->meta.transfer_id); + const bool is_new = !is_late_or_ejected && !is_interned; + if (is_new) { + rx_slot_t* const slot = rx_session_get_slot(self, rx, ts, frame->meta.transfer_id); + UDPARD_ASSERT((slot != NULL) && (slot->state != rx_slot_done)); + UDPARD_ASSERT((slot->state == rx_slot_idle) || + ((slot->state == rx_slot_busy) && (slot->transfer_id == frame->meta.transfer_id))); + rx_slot_update(slot, + ts, + self->port->memory.fragment, + payload_deleter, + frame, + self->port->extent, + &rx->errors_oom, + &rx->errors_transfer_malformed); + if (slot->state == rx_slot_done) { + UDPARD_ASSERT(rx_session_is_transfer_interned(self, slot->transfer_id)); + if (frame->meta.flag_reliable) { + // Payload view: ((udpard_fragment_t*)cavl2_min(slot->fragments))->view + tx_send_ack(rx, ts, slot->priority, self->port->topic_hash, slot->transfer_id, self->remote); + } + rx_session_ordered_scan_slots(self, rx, ts, false); + } + } else { // retransmit ACK if needed + // Note: transfers that are no longer retained in the history will not solicit an ACK response, + // meaning that the sender will not get a confirmation if the retransmitted transfer is too old. + // We assume that RX_TRANSFER_HISTORY_COUNT is enough to cover all sensible use cases. + if ((is_interned || is_ejected) && frame->meta.flag_reliable && (frame->base.offset == 0U)) { + // Payload view: frame->base.payload + tx_send_ack(rx, ts, frame->meta.priority, self->port->topic_hash, frame->meta.transfer_id, self->remote); + } + mem_free_payload(payload_deleter, frame->base.origin); + } +} + +/// The UNORDERED mode implementation. Ejects every transfer immediately upon completion without delay. +/// The reordering timer is not used. +static void rx_session_update_unordered(rx_session_t* const self, + udpard_rx_t* const rx, + const udpard_us_t ts, + rx_frame_t* const frame, + const udpard_deleter_t payload_deleter) +{ + UDPARD_ASSERT(self->port->mode == udpard_rx_unordered); + UDPARD_ASSERT(self->port->reordering_window == 0); + // We do not check interned transfers because in the UNORDERED mode they are never interned, always ejected ASAP. + // We don't care about the ordering, either; we just accept anything that looks new. + if (!rx_session_is_transfer_ejected(self, frame->meta.transfer_id)) { + rx_slot_t* const slot = rx_session_get_slot(self, rx, ts, frame->meta.transfer_id); // new or continuation + UDPARD_ASSERT((slot != NULL) && (slot->state != rx_slot_done)); + UDPARD_ASSERT((slot->state == rx_slot_idle) || + ((slot->state == rx_slot_busy) && (slot->transfer_id == frame->meta.transfer_id))); + rx_slot_update(slot, + ts, + self->port->memory.fragment, + payload_deleter, + frame, + self->port->extent, + &rx->errors_oom, + &rx->errors_transfer_malformed); + if (slot->state == rx_slot_done) { + if (frame->meta.flag_reliable) { // Payload view: ((udpard_fragment_t*)cavl2_min(slot->fragments))->view + tx_send_ack(rx, ts, slot->priority, self->port->topic_hash, slot->transfer_id, self->remote); + } + rx_session_eject(self, rx, slot); + } + } else { // retransmit ACK if needed + if (frame->meta.flag_reliable && (frame->base.offset == 0U)) { // Payload view: frame->base.payload + UDPARD_ASSERT(rx_session_is_transfer_ejected(self, frame->meta.transfer_id)); + tx_send_ack(rx, ts, frame->meta.priority, self->port->topic_hash, frame->meta.transfer_id, self->remote); + } + mem_free_payload(payload_deleter, frame->base.origin); + } +} + +/// The stateful strategy maintains a dedicated session per remote node, indexed in a fast AVL tree. +static void rx_port_accept_stateful(udpard_rx_t* const rx, + udpard_rx_port_t* const port, + const udpard_us_t timestamp, + const udpard_udpip_ep_t source_ep, + rx_frame_t* const frame, + const udpard_deleter_t payload_deleter, + const uint_fast8_t iface_index) +{ + rx_session_factory_args_t fac_args = { .owner = port, + .sessions_by_animation = &rx->list_session_by_animation, + .remote_uid = frame->meta.sender_uid, + .now = timestamp }; + rx_session_t* const ses = // Will find an existing one or create a new one. + (rx_session_t*)(void*)cavl2_find_or_insert(&port->index_session_by_remote_uid, + &frame->meta.sender_uid, + &cavl_compare_rx_session_by_remote_uid, + &fac_args, + &cavl_factory_rx_session_by_remote_uid); + if (ses != NULL) { + rx_session_update(ses, rx, timestamp, source_ep, frame, payload_deleter, iface_index); + } else { + mem_free_payload(payload_deleter, frame->base.origin); + ++rx->errors_oom; + } +} + +/// The stateless strategy accepts only single-frame transfers and does not maintain any session state. +/// It could be trivially extended to fallback to UNORDERED when multi-frame transfers are detected. +static void rx_port_accept_stateless(udpard_rx_t* const rx, + udpard_rx_port_t* const port, + const udpard_us_t timestamp, + const udpard_udpip_ep_t source_ep, + rx_frame_t* const frame, + const udpard_deleter_t payload_deleter, + const uint_fast8_t iface_index) +{ + const size_t required_size = smaller(port->extent, frame->meta.transfer_payload_size); + const bool full_transfer = (frame->base.offset == 0) && (frame->base.payload.size >= required_size); + if (full_transfer) { + // The fragment allocation is only needed to uphold the callback protocol. + // Maybe we could do something about it in the future to avoid this allocation. + udpard_fragment_t* const frag = rx_fragment_new(port->memory.fragment, payload_deleter, frame->base); + if (frag != NULL) { + udpard_remote_t remote = { .uid = frame->meta.sender_uid }; + remote.endpoints[iface_index] = source_ep; + // The CRC is validated by the frame parser for the first frame of any transfer. It is certainly correct. + UDPARD_ASSERT(frame->base.crc == crc_full(frame->base.payload.size, frame->base.payload.data)); + const udpard_rx_transfer_t transfer = { + .timestamp = timestamp, + .priority = frame->meta.priority, + .transfer_id = frame->meta.transfer_id, + .remote = remote, + .payload_size_stored = required_size, + .payload_size_wire = frame->meta.transfer_payload_size, + .payload = frag, + }; + port->vtable->on_message(rx, port, transfer); + } else { + mem_free_payload(payload_deleter, frame->base.origin); + ++rx->errors_oom; + } + } else { + mem_free_payload(payload_deleter, frame->base.origin); + ++rx->errors_transfer_malformed; // The stateless mode expects only single-frame transfers. } - return result; } -int_fast8_t udpardRxRPCDispatcherInit(struct UdpardRxRPCDispatcher* const self, - const struct UdpardRxMemoryResources memory) +static const udpard_rx_port_vtable_private_t rx_port_vtb_ordered = { .accept = rx_port_accept_stateful, + .update_session = rx_session_update_ordered }; +static const udpard_rx_port_vtable_private_t rx_port_vtb_unordered = { .accept = rx_port_accept_stateful, + .update_session = rx_session_update_unordered }; +static const udpard_rx_port_vtable_private_t rx_port_vtb_stateless = { .accept = rx_port_accept_stateless, + .update_session = NULL }; + +// --------------------------------------------- RX PUBLIC API --------------------------------------------- + +static bool rx_validate_mem_resources(const udpard_rx_mem_resources_t memory) { - int_fast8_t result = -UDPARD_ERROR_ARGUMENT; - if ((self != NULL) && rxValidateMemoryResources(memory)) - { - memZero(sizeof(*self), self); - self->local_node_id = UDPARD_NODE_ID_UNSET; - self->memory = memory; - self->request_ports = NULL; - self->response_ports = NULL; - result = 0; - } - return result; + return (memory.session.vtable != NULL) && (memory.session.vtable->base.free != NULL) && + (memory.session.vtable->alloc != NULL) && // + (memory.fragment.vtable != NULL) && (memory.fragment.vtable->base.free != NULL) && + (memory.fragment.vtable->alloc != NULL); } -int_fast8_t udpardRxRPCDispatcherStart(struct UdpardRxRPCDispatcher* const self, - const UdpardNodeID local_node_id, - struct UdpardUDPIPEndpoint* const out_udp_ip_endpoint) +void udpard_rx_new(udpard_rx_t* const self, udpard_tx_t* const tx) { - int_fast8_t result = -UDPARD_ERROR_ARGUMENT; - if ((self != NULL) && (out_udp_ip_endpoint != NULL) && (local_node_id <= UDPARD_NODE_ID_MAX) && - (self->local_node_id > UDPARD_NODE_ID_MAX)) - { - self->local_node_id = local_node_id; - *out_udp_ip_endpoint = makeServiceUDPIPEndpoint(local_node_id); - result = 0; - } - return result; + UDPARD_ASSERT(self != NULL); + mem_zero(sizeof(*self), self); + self->list_session_by_animation = (udpard_list_t){ NULL, NULL }; + self->index_session_by_reordering = NULL; + self->errors_oom = 0; + self->errors_frame_malformed = 0; + self->errors_transfer_malformed = 0; + self->tx = tx; + self->user = NULL; } -int_fast8_t udpardRxRPCDispatcherListen(struct UdpardRxRPCDispatcher* const self, - struct UdpardRxRPCPort* const port, - const UdpardPortID service_id, - const bool is_request, - const size_t extent) +void udpard_rx_poll(udpard_rx_t* const self, const udpard_us_t now) { - int_fast8_t result = -UDPARD_ERROR_ARGUMENT; - if ((self != NULL) && (port != NULL) && (service_id <= UDPARD_SERVICE_ID_MAX)) - { - const int_fast8_t cancel_result = udpardRxRPCDispatcherCancel(self, service_id, is_request); - UDPARD_ASSERT((cancel_result == 0) || (cancel_result == 1)); // We already checked the arguments. - memZero(sizeof(*port), port); - port->service_id = service_id; - rxPortInit(&port->port); - port->port.extent = extent; - port->user_reference = NULL; - // Insert the newly initialized service into the tree. - const struct UdpardTreeNode* const item = cavlSearch(is_request ? &self->request_ports : &self->response_ports, - port, - &rxRPCSearch, - &avlTrivialFactory); - UDPARD_ASSERT((item != NULL) && (item == &port->base)); - (void) item; - result = (cancel_result > 0) ? 0 : 1; - } - return result; -} - -int_fast8_t udpardRxRPCDispatcherCancel(struct UdpardRxRPCDispatcher* const self, - const UdpardPortID service_id, - const bool is_request) -{ - int_fast8_t result = -UDPARD_ERROR_ARGUMENT; - if ((self != NULL) && (service_id <= UDPARD_SERVICE_ID_MAX)) + // Retire timed out sessions. We retire at most one per poll to avoid burstiness -- session retirement + // may potentially free up a lot of memory at once. { - UdpardPortID service_id_mutable = service_id; - struct UdpardTreeNode** const root = is_request ? &self->request_ports : &self->response_ports; - struct UdpardRxRPCPort* const item = - (struct UdpardRxRPCPort*) (void*) cavlSearch(root, &service_id_mutable, &rxRPCSearchByServiceID, NULL); - if (item != NULL) - { - cavlRemove(root, &item->base); - rxPortFree(&item->port, self->memory); + rx_session_t* const ses = LIST_TAIL(self->list_session_by_animation, rx_session_t, list_by_animation); + if ((ses != NULL) && (now >= (ses->last_animated_ts + SESSION_LIFETIME))) { + rx_session_free(ses, &self->list_session_by_animation, &self->index_session_by_reordering); } - result = (item == NULL) ? 0 : 1; } - return result; + // Process reordering window timeouts. + // We may process more than one to minimize transfer delays; this is also expected to be quick. + while (true) { + rx_session_t* const ses = + CAVL2_TO_OWNER(cavl2_min(self->index_session_by_reordering), rx_session_t, index_reordering_window); + if ((ses == NULL) || (now < ses->reordering_window_deadline)) { + break; + } + rx_session_ordered_scan_slots(ses, self, now, false); + } +} + +bool udpard_rx_port_new(udpard_rx_port_t* const self, + const uint64_t topic_hash, + const size_t extent, + const udpard_rx_mode_t mode, + const udpard_us_t reordering_window, + const udpard_rx_mem_resources_t memory, + const udpard_rx_port_vtable_t* const vtable) +{ + bool ok = (self != NULL) && rx_validate_mem_resources(memory) && (reordering_window >= 0) && (vtable != NULL) && + (vtable->on_message != NULL); + if (ok) { + mem_zero(sizeof(*self), self); + self->topic_hash = topic_hash; + self->extent = extent; + self->mode = mode; + self->memory = memory; + self->index_session_by_remote_uid = NULL; + self->vtable = vtable; + self->user = NULL; + switch (mode) { + case udpard_rx_stateless: + self->vtable_private = &rx_port_vtb_stateless; + self->reordering_window = 0; + break; + case udpard_rx_unordered: + self->vtable_private = &rx_port_vtb_unordered; + self->reordering_window = 0; + break; + case udpard_rx_ordered: + self->vtable_private = &rx_port_vtb_ordered; + self->reordering_window = reordering_window; + UDPARD_ASSERT(self->reordering_window >= 0); + break; + default: + ok = false; + } + } + return ok; } -int_fast8_t udpardRxRPCDispatcherReceive(struct UdpardRxRPCDispatcher* const self, - const UdpardMicrosecond timestamp_usec, - const struct UdpardMutablePayload datagram_payload, - const uint_fast8_t redundant_iface_index, - struct UdpardRxRPCPort** const out_port, - struct UdpardRxRPCTransfer* const out_transfer) +void udpard_rx_port_free(udpard_rx_t* const rx, udpard_rx_port_t* const port) { - bool release = true; - int_fast8_t result = -UDPARD_ERROR_ARGUMENT; - if ((self != NULL) && (timestamp_usec != TIMESTAMP_UNSET) && (datagram_payload.data != NULL) && - (redundant_iface_index < UDPARD_NETWORK_INTERFACE_COUNT_MAX) && (out_transfer != NULL)) - { - result = 0; // Invalid frames cannot complete a transfer, so zero is the new default. - RxFrame frame = {0}; - const bool accept = rxParseFrame(datagram_payload, &frame) && - ((frame.meta.data_specifier & DATA_SPECIFIER_SERVICE_NOT_MESSAGE_MASK) != 0) && - (frame.meta.dst_node_id == self->local_node_id); - if (accept) - { - // Service transfers cannot be anonymous. This is enforced by the rxParseFrame function; we re-check this. - UDPARD_ASSERT(frame.meta.src_node_id != UDPARD_NODE_ID_UNSET); - // Parse the data specifier in the frame. - out_transfer->is_request = - (frame.meta.data_specifier & DATA_SPECIFIER_SERVICE_REQUEST_NOT_RESPONSE_MASK) != 0; - out_transfer->service_id = frame.meta.data_specifier & DATA_SPECIFIER_SERVICE_ID_MASK; - // Search for the RPC-port that is registered for this service transfer in the tree. - struct UdpardRxRPCPort* const item = - (struct UdpardRxRPCPort*) (void*) cavlSearch(out_transfer->is_request ? &self->request_ports - : &self->response_ports, - &out_transfer->service_id, - &rxRPCSearchByServiceID, - NULL); - // If such a port is found, accept the frame on it. - if (item != NULL) - { - result = rxPortAccept(&item->port, - redundant_iface_index, - timestamp_usec, - frame, - self->memory, - &out_transfer->base); - release = false; - } // else, the application is not interested in this service-ID (does not know how to handle it). - // Expose the port instance to the caller if requested. - if (out_port != NULL) - { - *out_port = item; - } - } // else, we didn't accept so we just ignore this frame - } - if ((self != NULL) && release) - { - memFreePayload(self->memory.payload, datagram_payload); + if ((rx != NULL) && (port != NULL)) { + while (port->index_session_by_remote_uid != NULL) { + rx_session_free((rx_session_t*)(void*)port->index_session_by_remote_uid, + &rx->list_session_by_animation, + &rx->index_session_by_reordering); + } } - return result; } -// ===================================================================================================================== -// ==================================================== MISC ===================================================== -// ===================================================================================================================== - -size_t udpardGather(const struct UdpardFragment head, const size_t destination_size_bytes, void* const destination) -{ - size_t offset = 0; - if (NULL != destination) - { - const struct UdpardFragment* frag = &head; - while ((frag != NULL) && (offset < destination_size_bytes)) - { - UDPARD_ASSERT(frag->view.data != NULL); - const size_t frag_size = smaller(frag->view.size, destination_size_bytes - offset); - // NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling) - (void) memmove(((byte_t*) destination) + offset, frag->view.data, frag_size); - offset += frag_size; - UDPARD_ASSERT(offset <= destination_size_bytes); - frag = frag->next; +static void rx_accept_ack(udpard_rx_t* const rx, const udpard_bytes_t message) +{ + if (message.size >= ACK_SIZE_BYTES) { + uint64_t topic_hash = 0; + uint64_t transfer_id = 0; + (void)deserialize_u64(((const byte_t*)message.data) + 0U, &topic_hash); + (void)deserialize_u64(((const byte_t*)message.data) + 8U, &transfer_id); + tx_receive_ack(rx, topic_hash, transfer_id); + } +} + +bool udpard_rx_port_push(udpard_rx_t* const rx, + udpard_rx_port_t* const port, + const udpard_us_t timestamp, + const udpard_udpip_ep_t source_ep, + const udpard_bytes_mut_t datagram_payload, + const udpard_deleter_t payload_deleter, + const uint_fast8_t iface_index) +{ + const bool ok = (rx != NULL) && (port != NULL) && (timestamp >= 0) && udpard_is_valid_endpoint(source_ep) && + (datagram_payload.data != NULL) && (iface_index < UDPARD_IFACE_COUNT_MAX) && + (payload_deleter.vtable != NULL) && (payload_deleter.vtable->free != NULL); + if (ok) { + rx_frame_t frame = { 0 }; + uint32_t frame_index = 0; + uint32_t offset_32 = 0; + const bool frame_valid = header_deserialize( + datagram_payload, &frame.meta, &frame_index, &offset_32, &frame.base.crc, &frame.base.payload); + frame.base.offset = (size_t)offset_32; + (void)frame_index; // currently not used by this reassembler implementation. + frame.base.origin = datagram_payload; // Take ownership of the payload. + if (frame_valid) { + if (frame.meta.topic_hash == port->topic_hash) { + if (!frame.meta.flag_acknowledgement) { + port->vtable_private->accept(rx, port, timestamp, source_ep, &frame, payload_deleter, iface_index); + } else { + UDPARD_ASSERT(frame.base.offset == 0); // checked by the frame parser + rx_accept_ack(rx, frame.base.payload); + mem_free_payload(payload_deleter, frame.base.origin); + } + } else { // Collisions are discovered early so that we don't attempt to allocate sessions for them. + mem_free_payload(payload_deleter, frame.base.origin); + udpard_remote_t remote = { .uid = frame.meta.sender_uid }; + remote.endpoints[iface_index] = source_ep; + if (port->vtable->on_collision != NULL) { + port->vtable->on_collision(rx, port, remote); + } + } + } else { + mem_free_payload(payload_deleter, frame.base.origin); + ++rx->errors_frame_malformed; } } - return offset; + return ok; } diff --git a/libudpard/udpard.h b/libudpard/udpard.h index 2c8a5e1..4e2ce06 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -5,189 +5,44 @@ /// `____/ .___/`___/_/ /_/`____/`__, / .___/_/ /_/`__,_/_/ /// /_/ /____/_/ /// -/// LibUDPard is a compact implementation of the Cyphal/UDP protocol for high-integrity real-time embedded systems. -/// It is designed for use in robust deterministic embedded systems equipped with at least 64K ROM and RAM. -/// The codebase is compliant with a large subset of MISRA C, has full test coverage, and is validated by at least -/// two static analyzers. The library is designed to be compatible with any conventional target platform and -/// instruction set architecture, from 8 to 64 bit, little- and big-endian, RTOS-based or baremetal, -/// as long as there is a standards-compliant ISO C99 compiler available. +/// LibUDPard is a compact implementation of the Cyphal/UDP transport for high-integrity real-time embedded systems. +/// It is designed for use in robust deterministic embedded systems equipped with at least ~100K ROM and RAM, +/// as well as in general-purpose software. /// -/// The library offers a very low-level API that may be cumbersome to use in many applications. -/// Users seeking a higher-level API are encouraged to use LibCyphal instead, which builds on top of LibUDPard et al. +/// The codebase is compliant with a large subset of MISRA C and is fully covered by unit and end-to-end tests. +/// The library is designed to be compatible with any conventional target platform, from 8 to 64 bit, little- and +/// big-endian, RTOS-based or baremetal, as long as there is a standards-compliant ISO C99 or C11 compiler available. /// -/// -/// INTEGRATION -/// -/// The library is intended to be integrated into the end application by simply copying its source files into the +/// The library is intended to be integrated into the end application by simply copying udpard.c/.h into the /// source tree of the project; it does not require any special compilation options and should work out of the box. /// There are build-time configuration parameters defined near the top of udpard.c, but they are optional to use. /// -/// As explained in this documentation, the library requires a deterministic constant-time dynamic memory allocator. -/// If your target platform does not provide a deterministic memory manager, it is recommended to use O1Heap -/// (MIT licensed): https://github.com/pavel-kirienko/o1heap. -/// -/// To use the library, the application needs to provide an implementation of the UDP/IP stack with IGMP support. +/// To use the library, the application needs to provide a minimal UDP/IPv4 stack supporting IGMP v2 and passive ARP. /// POSIX-based systems may use the standard Berkeley sockets API, while more constrained embedded systems may choose -/// to rely either on a third-party solution like LwIP or a custom UDP/IP stack implementation. -/// -/// Cyphal/UDP leverages only multicast IP addresses; the underlying UDP/IP stack does not need to support ARP or ICMP. -/// -/// -/// ARCHITECTURE -/// -/// In the following description, the terms "local Cyphal node" and "application" are used interchangeably. -/// Some applications may require more than one logical Cyphal node to operate side-by-side. -/// Each logical node may utilize more than one network interface for redundancy. -/// -/// This library implements the Cyphal/UDP transport protocol. Resembling the conventional OSI model, the Cyphal -/// protocol stack --- when implemented with the help of this library --- consists of the following layers: -/// -/// LAYER | DESCRIPTION -/// ----------------|--------------------------------------------------------------------------------------- -/// APPLICATION | User-defined and Cyphal-standard application logic -/// PRESENTATION | Autogenerated code for DSDL serialization/deserialization (see Nunavut) -/// +-> TRANSPORT | THIS LIBRARY -/// | NETWORK | The UDP/IP stack provided by the application (LwIP, custom, Berkeley sockets, etc). -/// | -/// +------ you are here -/// -/// The library consists of three independent parts: -/// -/// - The transmission pipeline (TX pipeline) for publishing messages and sending RPC-service requests & responses. -/// - The reception pipeline (RX pipeline), which in turn is built from two sub-pipelines: -/// - subscriptions -- for subscribing to subjects (aka topics); -/// - service dispatcher -- for receiving service requests and responses; both clients and servers need this. -/// -/// As these components share no resources within the library, they can be used in different threads, -/// provided that there are no thread-unsafe resources shared between them in the application (such as heaps). -/// -/// The library supports at most UDPARD_NETWORK_INTERFACE_COUNT_MAX redundant network interfaces. -/// Transfers received from each interface are reassembled independently and the first interface to complete a -/// transfer is always chosen to deliver the transfer to the application, while the transfers from the slower -/// interface are discarded as duplicates. The application must assign each of the redundant interface a -/// unique integer ID in the range [0, UDPARD_NETWORK_INTERFACE_COUNT_MAX) to allow the library to distinguish -/// between them. -/// -/// As will be shown below, a typical application with R redundant network interfaces and S topic subscriptions needs -/// R*(S+2) sockets (or equivalent abstractions provided by the underlying UDP/IP stack). -/// -/// As a matter of convention, resource disposal functions are named "free" if the memory of the resource itself is -/// not deallocated, and "destroy" if the memory is deallocated. -/// -/// -/// Transmission pipeline -/// -/// The transmission pipeline is used to publish messages and send RPC-service requests and responses to the network -/// through a particular redundant interface. A Cyphal node with R redundant network interfaces needs to instantiate -/// R transmission pipelines, one per interface, unless the application is not interested in sending data at all. -/// The transmission pipeline contains a prioritized queue of UDP datagrams scheduled for transmission via its -/// network interface. The typical usage pattern is to enqueue Cyphal transfers using dedicated functions (see -/// udpardTxPublish, udpardTxRequest, udpardTxRespond) into all instances of transmission pipelines -/// (i.e., once per redundant interface) and periodically check the network interface for readiness to accept writes; -/// once the interface is ready, pop the next datagram scheduled for transmission from the queue and send it. -/// -/// Each transmission pipeline instance requires one socket (or a similar abstraction provided by the underlying -/// UDP/IP stack) that is not connected to any specific remote endpoint (i.e., usable with sendto(), -/// speaking in terms of Berkeley sockets). In the case of redundant interfaces, each socket may need to be configured -/// to emit data through its specific interface. -/// -/// Graphically, the transmission pipeline is arranged as follows: -/// -/// +---> TX PIPELINE ---> UDP SOCKET ---> REDUNDANT INTERFACE A -/// | -/// SERIALIZED TRANSFERS ---+---> TX PIPELINE ---> UDP SOCKET ---> REDUNDANT INTERFACE B -/// | -/// +---> ... -/// -/// The library supports configurable DSCP marking of the outgoing UDP datagrams as a function of Cyphal transfer -/// priority level. This is configured separately per TX pipeline instance (i.e., per network interface). -/// -/// The maximum transmission unit (MTU) can also be configured separately per TX pipeline instance. -/// Applications that are interested in maximizing their wire compatibility should not change the default MTU setting. -/// -/// -/// Reception pipeline -/// -/// The reception pipelines are used to subscribe to subjects (aka topics) and to receive RPC-service requests and -/// responses. The former are handled by "subscriptions" and the latter two are managed by a "service dispatcher". -/// Said pipelines are entirely independent from each other and can be operated from different threads, -/// as they share no resources. -/// -/// The reception pipeline is able to accept datagrams with arbitrary MTU, frames delivered out-of-order (OOO) with -/// arbitrary duplication, including duplication of non-adjacent frames, and/or frames interleaved between adjacent -/// transfers. The support for OOO reassembly is particularly interesting when simple repetition coding FEC is used. -/// -/// The application should instantiate one subscription instance per subject it needs to receive messages from, -/// irrespective of the number of redundant interfaces. There needs to be one socket (or a similar abstraction -/// provided by the underlying UDP/IP stack) per subscription instance per redundant interface, -/// each socket bound to the same UDP/IP endpoint (IP address and UDP port) which is selected by the library when -/// the subscription is created. -/// The application needs to listen to all these sockets simultaneously and pass the received UDP datagrams to the -/// corresponding subscription instance as they arrive, thus unifying the datagrams received from all redundant -/// interface sockets into a single stream. -/// At the output, subscription instances provide reassembled and deduplicated stream of Cyphal transfers ready for -/// deserialization. -/// -/// Graphically, the subscription pipeline is arranged as shown below. -/// Remember that the application with S topic subscriptions would have S such pipelines, one per subscription. -/// -/// REDUNDANT INTERFACE A ---> UDP SOCKET ---+ -/// | -/// REDUNDANT INTERFACE B ---> UDP SOCKET ---+---> SUBSCRIPTION ---> SERIALIZED TRANSFERS -/// | -/// ... ---+ -/// -/// The application should instantiate a single service dispatcher instance irrespective of the number of redundant -/// interfaces or the set of RPC-services it is interested in (unless it is not interested in RPC-services at all). -/// The service dispatcher instance requires a single socket (or a similar abstraction provided by the underlying -/// UDP/IP stack) per redundant interface, each socket bound to the same UDP/IP endpoint (IP address and UDP port) -/// which is selected by the library when the service dispatcher is created. -/// The application needs to listen to all these sockets simultaneously and pass the received UDP datagrams to -/// the service dispatcher instance as they arrive, thus unifying the datagrams received from all redundant -/// interface sockets into a single stream. -/// -/// The service dispatcher by itself is not useful; it needs to be configured with the set of RPC-services -/// that the application is interested in. This is done by creating RPC-service RX ports and registering them -/// with the service dispatcher. The service dispatcher will then forward the received requests and responses -/// to the corresponding RPC-service RX ports; the application can then deserialize and process them. -/// -/// Graphically, the service dispatcher pipeline is arranged as shown below. -/// -/// REDUNDANT INTERFACE A ---> UDP SOCKET ---+ +---> RPC PORT X ---> SERIALIZED TRANSFERS -/// | | -/// REDUNDANT INTERFACE B ---> UDP SOCKET ---+---> SERVICE DISPATCHER ---+---> RPC PORT Y ---> SERIALIZED TRANSFERS -/// | | -/// ... ---+ +---> ... -/// -/// In summary, to make a service request, the application needs a TX pipeline to transmit the request and -/// a service dispatcher with a registered RPC-service RX port to receive the response. Same holds if the -/// application needs to handle a service request, except that the RX port will be used to accept the request -/// and the TX pipeline will be used to transmit the response. -/// -/// -/// Memory management +/// to rely either on a third-party solution like LwIP or a custom minimal UDP/IP stack. /// /// The library can be used either with a regular heap (preferably constant-time) or with a collection of fixed-size -/// block pool allocators (in safety-certified systems). It is up to the application to choose the desired memory -/// management strategy; the library is interfaced with the memory managers via a special memory resource abstraction. -/// -/// Typically, if block pool allocators are used, the following block sizes should be served: -/// -/// - MTU sized blocks for the TX and RX pipelines (usually less than 2048 bytes); -/// - TX fragment item sized blocks for the TX pipeline (less than 128 bytes). -/// - RX session object sized blocks for the RX pipeline (less than 512 bytes); -/// - RX fragment handle sized blocks for the RX pipeline (less than 128 bytes). -/// -/// The detailed information is given in the API documentation. +/// block pool allocators (may be preferable in safety-certified systems). +/// If block pool allocators are used, the following block sizes should be served: +/// - MTU-sized blocks for the TX and RX pipelines (typically at most 1.5 KB unless jumbo frames are used). +/// The TX pipeline adds a small overhead of sizeof(tx_frame_t). +/// - sizeof(tx_transfer_t) blocks for the TX pipeline to store outgoing transfer metadata. +/// - sizeof(rx_session_t) blocks for the RX pipeline to store incoming transfer session metadata. +/// - sizeof(udpard_fragment_t) blocks for the RX pipeline to store received data fragments. +/// +/// Suitable memory allocators may be found here: +/// - Constant-time ultrafast deterministic heap: https://github.com/pavel-kirienko/o1heap +/// - Single-header fixed-size block pool: https://gist.github.com/pavel-kirienko/daf89e0481e6eac0f1fa8a7614667f59 /// /// -------------------------------------------------------------------------------------------------------------------- -/// /// This software is distributed under the terms of the MIT License. /// Copyright (C) OpenCyphal Development Team /// Copyright Amazon.com Inc. or its affiliates. /// SPDX-License-Identifier: MIT /// Author: Pavel Kirienko +// ReSharper disable CppUnusedIncludeDirective + #ifndef UDPARD_H_INCLUDED #define UDPARD_H_INCLUDED @@ -196,594 +51,754 @@ #include #ifdef __cplusplus -extern "C" { +extern "C" +{ #endif /// Semantic version of this library (not the Cyphal specification). /// API will be backward compatible within the same major version. -#define UDPARD_VERSION_MAJOR 2 +#define UDPARD_VERSION_MAJOR 3 #define UDPARD_VERSION_MINOR 0 /// The version number of the Cyphal specification implemented by this library. -#define UDPARD_CYPHAL_SPECIFICATION_VERSION_MAJOR 1 -#define UDPARD_CYPHAL_SPECIFICATION_VERSION_MINOR 0 - -/// These error codes may be returned from the library API calls whose return type is a signed integer in the negated -/// form (e.g., error code 2 returned as -2). A non-negative return value represents success. -/// API calls whose return type is not a signed integer cannot fail by contract. -/// No other error states may occur in the library. -/// By contract, a well-characterized application with properly sized memory pools will never encounter errors. -/// The error code 1 is not used because -1 is often used as a generic error code in 3rd-party code. -#define UDPARD_ERROR_ARGUMENT 2 -#define UDPARD_ERROR_MEMORY 3 -#define UDPARD_ERROR_CAPACITY 4 -#define UDPARD_ERROR_ANONYMOUS 5 +#define UDPARD_CYPHAL_VERSION_MAJOR 1 +#define UDPARD_CYPHAL_VERSION_MINOR 1 /// RFC 791 states that hosts must be prepared to accept datagrams of up to 576 octets and it is expected that this /// library will receive non IP-fragmented datagrams thus the minimum MTU should be larger than 576. -/// That being said, the MTU here is set to 1408 which is derived as: -/// 1500B Ethernet MTU (RFC 894) - 60B IPv4 max header - 8B UDP Header - 24B Cyphal header -#define UDPARD_MTU_DEFAULT 1408U -/// To guarantee a single frame transfer, the maximum payload size shall be 4 bytes less to accommodate for the CRC. -#define UDPARD_MTU_DEFAULT_MAX_SINGLE_FRAME (UDPARD_MTU_DEFAULT - 4U) - -/// Parameter ranges are inclusive; the lower bound is zero for all. See Cyphal/UDP Specification for background. -#define UDPARD_SUBJECT_ID_MAX 8191U -#define UDPARD_SERVICE_ID_MAX 511U -#define UDPARD_NODE_ID_MAX 0xFFFEU /// 2**16-1 is reserved for the anonymous/broadcast ID. -#define UDPARD_PRIORITY_MAX 7U - -/// This value represents an undefined node-ID: broadcast destination or anonymous source. -#define UDPARD_NODE_ID_UNSET 0xFFFFU - -/// This is the recommended transfer-ID timeout value given in the Cyphal Specification. The application may choose -/// different values per subscription (i.e., per data specifier) depending on its timing requirements. -#define UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC 2000000UL - -/// The library supports at most this many redundant network interfaces per Cyphal node. -#define UDPARD_NETWORK_INTERFACE_COUNT_MAX 3U - -typedef uint64_t UdpardMicrosecond; ///< UINT64_MAX is not a valid timestamp value. -typedef uint16_t UdpardPortID; -typedef uint16_t UdpardNodeID; -typedef uint64_t UdpardTransferID; ///< UINT64_MAX is not a valid transfer-ID value. - -/// Transfer priority level mnemonics per the recommendations given in the Cyphal Specification. -/// For outgoing transfers they are mapped to DSCP values as configured per redundant interface (per UdpardTx instance). -enum UdpardPriority +/// That being said, the MTU here is set to a larger value that is derived as: +/// 1500B Ethernet MTU (RFC 894) - 60B IPv4 max header - 8B UDP Header - 48B Cyphal header +/// This is also the default maximum size of a single-frame transfer. +/// The application can change this value at runtime as needed. +#define UDPARD_MTU_DEFAULT 1384U + +/// MTU less than this should not be used. This value may be increased in a future version of the library. +#define UDPARD_MTU_MIN 460U + +/// The library supports at most this many local redundant network interfaces. +#define UDPARD_IFACE_COUNT_MAX 3U + +#define UDPARD_IFACE_BITMAP_ALL ((1U << UDPARD_IFACE_COUNT_MAX) - 1U) + +/// Timestamps supplied by the application must be non-negative monotonically increasing counts of microseconds. +typedef int64_t udpard_us_t; + +/// See udpard_tx_t::ack_baseline_timeout. +/// This default value might be a good starting point for many applications running over a local network. +/// The baseline timeout should be greater than the expected round-trip time (RTT) between the most distant +/// nodes in the network for a message at the highest priority level. +#define UDPARD_TX_ACK_BASELINE_TIMEOUT_DEFAULT_us 16000LL + +/// The subject-ID only affects the formation of the multicast UDP/IP endpoint address. +/// In IPv4 networks, it is limited to 23 bits only due to the limited MAC multicast address space. +/// In IPv6 networks, 32 bits are supported. +#define UDPARD_IPv4_SUBJECT_ID_MAX 0x7FFFFFUL + +typedef enum udpard_prio_t { - UdpardPriorityExceptional = 0, - UdpardPriorityImmediate = 1, - UdpardPriorityFast = 2, - UdpardPriorityHigh = 3, - UdpardPriorityNominal = 4, ///< Nominal priority level should be the default. - UdpardPriorityLow = 5, - UdpardPrioritySlow = 6, - UdpardPriorityOptional = 7, -}; + udpard_prio_exceptional = 0, + udpard_prio_immediate = 1, + udpard_prio_fast = 2, + udpard_prio_high = 3, + udpard_prio_nominal = 4, ///< Nominal priority level should be the default. + udpard_prio_low = 5, + udpard_prio_slow = 6, + udpard_prio_optional = 7, +} udpard_prio_t; +#define UDPARD_PRIORITY_COUNT 8U + +typedef struct udpard_tree_t +{ + struct udpard_tree_t* up; + struct udpard_tree_t* lr[2]; + int_fast8_t bf; +} udpard_tree_t; -/// The AVL tree node structure is exposed here to avoid pointer casting/arithmetics inside the library. -/// The user code is not expected to interact with this type except if advanced introspection is required. -struct UdpardTreeNode +typedef struct udpard_listed_t { - struct UdpardTreeNode* up; ///< Do not access this field. - struct UdpardTreeNode* lr[2]; ///< Left and right children of this node may be accessed for tree traversal. - int_fast8_t bf; ///< Do not access this field. -}; + struct udpard_listed_t* next; + struct udpard_listed_t* prev; +} udpard_listed_t; -struct UdpardMutablePayload +typedef struct udpard_list_t { - size_t size; - void* data; -}; + udpard_listed_t* head; ///< NULL if list empty + udpard_listed_t* tail; ///< NULL if list empty +} udpard_list_t; -struct UdpardPayload +typedef struct udpard_bytes_t { size_t size; const void* data; -}; +} udpard_bytes_t; -/// This type represents payload as an ordered sequence of its fragments to eliminate data copying. -/// To free a fragmented payload buffer, the application needs to traverse the list and free each fragment's payload -/// as well as the payload structure itself, assuming that it is also heap-allocated. -/// The model is as follows: -/// -/// (payload header) ---> UdpardFragment: -/// next ---> UdpardFragment... -/// origin ---> (the free()able payload data buffer) -/// view ---> (somewhere inside the payload data buffer) -/// -/// Payloads of received transfers are represented using this type, where each fragment corresponds to a frame. -/// The application can either consume them directly or to copy the data into a contiguous buffer beforehand -/// at the expense of extra time and memory utilization. -struct UdpardFragment +typedef struct udpard_bytes_scattered_t { - /// Points to the next fragment in the fragmented buffer; NULL if this is the last fragment. - struct UdpardFragment* next; + udpard_bytes_t bytes; + const struct udpard_bytes_scattered_t* next; ///< NULL in the last fragment. +} udpard_bytes_scattered_t; - /// Contains the actual data to be used by the application. - /// The memory pointed to by this fragment shall not be freed by the application. - struct UdpardPayload view; +typedef struct udpard_bytes_mut_t +{ + size_t size; + void* data; +} udpard_bytes_mut_t; - /// This entity points to the base buffer that contains this fragment. - /// The application can use this pointer to free the outer buffer after the payload has been consumed. - /// In the most simple case this field is identical to the "view" field above, but it is not always the case. - struct UdpardMutablePayload origin; +/// The size can be changed arbitrarily. This value is a compromise between copy size and footprint and utility. +#ifndef UDPARD_USER_CONTEXT_PTR_COUNT +#define UDPARD_USER_CONTEXT_PTR_COUNT 4 +#endif + +/// The library carries the user-provided context from inputs to outputs without interpreting it, +/// allowing the application to associate its own data with various entities inside the library. +typedef union udpard_user_context_t +{ + void* ptr[UDPARD_USER_CONTEXT_PTR_COUNT]; + unsigned char bytes[sizeof(void*) * UDPARD_USER_CONTEXT_PTR_COUNT]; +} udpard_user_context_t; +#ifdef __cplusplus +#define UDPARD_USER_CONTEXT_NULL \ + udpard_user_context_t {} +#else +#define UDPARD_USER_CONTEXT_NULL ((udpard_user_context_t){ .ptr = { NULL } }) +#endif + +/// Zeros if invalid/unset/unavailable. +typedef struct udpard_udpip_ep_t +{ + uint32_t ip; + uint16_t port; +} udpard_udpip_ep_t; + +/// The remote information can be used for sending P2P responses back to the sender, if needed. +/// The RX pipeline will attempt to discover the sender's UDP/IP endpoint per redundant interface +/// based on the source address of the received UDP datagrams. If the sender's endpoint could not be discovered +/// for a certain interface (e.g., if the sender is not connected to that interface), the corresponding entry in +/// the endpoints array will be zeroed and udpard_is_valid_endpoint() will return false for that entry. +/// +/// Cyphal/UDP thus allows nodes to change their network interface addresses dynamically. +/// The library does not make any assumptions about the specific values and their uniqueness; +/// as such, multiple remote nodes can even share the same endpoint. +typedef struct udpard_remote_t +{ + uint64_t uid; + udpard_udpip_ep_t endpoints[UDPARD_IFACE_COUNT_MAX]; ///< Zeros in unavailable ifaces. +} udpard_remote_t; + +/// Returns true if the given UDP/IP endpoint appears to be valid. Zero IP/port are considered invalid. +bool udpard_is_valid_endpoint(const udpard_udpip_ep_t ep); + +/// Returns the destination multicast UDP/IP endpoint for the given subject-ID. +/// The application should use this function when setting up subscription sockets or sending datagrams in +/// udpard_tx_vtable_t::eject_subject(). +/// If the subject-ID exceeds UDPARD_IPv4_SUBJECT_ID_MAX, the excessive bits are masked out. +/// For P2P use the unicast node address directly instead, as provided by the RX pipeline per received transfer. +udpard_udpip_ep_t udpard_make_subject_endpoint(const uint32_t subject_id); + +/// The memory resource semantics are similar to malloc/free. +/// Consider using O1Heap: https://github.com/pavel-kirienko/o1heap. +/// The API documentation is written on the assumption that the memory management functions are O(1). +typedef struct udpard_deleter_t udpard_deleter_t; +typedef struct udpard_mem_t udpard_mem_t; + +typedef struct udpard_deleter_vtable_t +{ + void (*free)(void* context, size_t size, void* pointer); +} udpard_deleter_vtable_t; + +struct udpard_deleter_t +{ + const udpard_deleter_vtable_t* vtable; + void* context; }; -/// Cyphal/UDP uses only multicast traffic. -/// Unicast support is not required; one consequence is that ARP tables are not needed. -struct UdpardUDPIPEndpoint +typedef struct udpard_mem_vtable_t +{ + udpard_deleter_vtable_t base; + void* (*alloc)(void* context, size_t size); +} udpard_mem_vtable_t; + +struct udpard_mem_t { - uint32_t ip_address; - uint16_t udp_port; + const udpard_mem_vtable_t* vtable; + void* context; }; +/// A helper that upcasts a memory resource into a deleter. +udpard_deleter_t udpard_make_deleter(const udpard_mem_t memory); + +/// This type represents payload as a binary tree of its fragments ordered by offset to eliminate data copying. +/// The fragments are guaranteed to be non-redundant and non-overlapping; therefore, they are also ordered by their +/// end offsets. See the helper functions below for managing the fragment tree. +typedef struct udpard_fragment_t +{ + /// The index_offset BST orders fragments by their offset (and also end=offset+size) within the transfer payload. + /// It must be the first member. + udpard_tree_t index_offset; + + /// Offset of this fragment's payload within the full payload buffer. The ordering key for the index_offset tree. + size_t offset; + + /// Contains the actual data to be used by the application. + /// The memory pointed to by this fragment shall not be freed nor mutated by the application. + udpard_bytes_t view; + + /// Points to the base buffer that contains this fragment. + /// The application can use this pointer to free the outer buffer after the payload has been consumed. + /// This memory must not be accessed by the application for any purpose other than freeing it. + udpard_bytes_mut_t origin; + + /// When the fragment is no longer needed, this deleter shall be used to free the origin buffer. + /// We provide a dedicated deleter per fragment to allow NIC drivers to manage the memory directly, + /// which allows DMA access to the fragment data without copying. + /// See https://github.com/OpenCyphal-Garage/libcyphal/issues/352#issuecomment-2163056622 + udpard_deleter_t payload_deleter; +} udpard_fragment_t; + +/// Frees the memory allocated for the payload and its fragment headers using the correct deleters: the fragment +/// deleter is given explicitly (use udpard_make_deleter() to obtain it from a memory resource), and the payload is +/// freed using the payload_deleter per fragment. +/// All fragments in the tree will be freed and invalidated. +/// The passed fragment can be any fragment inside the tree (not necessarily the root). +/// If the fragment argument is NULL, the function has no effect. The complexity is linear in the number of fragments. +void udpard_fragment_free_all(udpard_fragment_t* const frag, const udpard_deleter_t fragment_deleter); + +/// Given any fragment in a transfer, returns the fragment that contains the given payload offset. +/// Returns NULL if the offset points beyond the stored payload, or if frag is NULL. +/// This is also the idiomatic way to find the head of the fragment list when invoked with offset zero. +/// This function accepts any node in the fragment tree, not necessarily the head or the root, and +/// has a logarithmic complexity in the number of fragments, which makes it very efficient. +udpard_fragment_t* udpard_fragment_seek(const udpard_fragment_t* frag, const size_t offset); + +/// Given any fragment in a transfer, returns the next fragment in strictly ascending order of offsets. +/// The offset of the next fragment always equals the sum of the offset and size of the current fragment. +/// Returns NULL if there is no next fragment or if the given fragment is NULL. +/// The complexity is amortized-constant. +udpard_fragment_t* udpard_fragment_next(const udpard_fragment_t* frag); + +/// A convenience function built on top of udpard_fragment_seek() and udpard_fragment_next(). +/// Copies `size` bytes of payload stored in a fragment tree starting from `offset` into `destination`. +/// The cursor pointer is an iterator updated to the last fragment touched, enabling very efficient sequential +/// access without repeated searches; it is never set to NULL. +/// Returns the number of bytes copied into the contiguous destination buffer, which equals `size` unless +/// `offset+size` exceeds the amount of data stored in the fragments. +/// The function has no effect and returns zero if the destination buffer or iterator pointer are NULL. +size_t udpard_fragment_gather(const udpard_fragment_t** cursor, + const size_t offset, + const size_t size, + void* const destination); + // ===================================================================================================================== -// ================================================= MEMORY RESOURCE ================================================= +// ================================================= TX PIPELINE ================================================= // ===================================================================================================================== -/// A pointer to the memory allocation function. The semantics are similar to malloc(): -/// - The returned pointer shall point to an uninitialized block of memory that is at least "size" bytes large. -/// - If there is not enough memory, the returned pointer shall be NULL. -/// - The memory shall be aligned at least at max_align_t. -/// - The execution time should be constant (O(1)). -/// - The worst-case memory consumption (worst fragmentation) should be understood by the developer. +/// Graphically, the transmission pipeline is arranged as shown below. +/// There is a single pipeline instance that serves all topics, P2P, and all network interfaces. /// -/// If the standard dynamic memory manager of the target platform does not satisfy the above requirements, -/// consider using O1Heap: https://github.com/pavel-kirienko/o1heap. Alternatively, some applications may prefer to -/// use a set of fixed-size block pool allocators (see the high-level overview for details). +/// +---> REDUNDANT INTERFACE A +/// | +/// TRANSFERS ---> udpard_tx_t ---+---> REDUNDANT INTERFACE B +/// | +/// +---> ... /// -/// The API documentation is written on the assumption that the memory management functions have constant -/// complexity and are non-blocking. +/// The RX pipeline is linked with the TX pipeline for reliable message management: the RX pipeline notifies +/// the TX when acknowledgments are received, and also enqueues outgoing acknowledgments to confirm received messages. +/// Thus the transmission pipeline is inherently remote-controlled by other nodes and one needs to keep in mind +/// that new frames may appear in the TX pipeline even while the application is idle. /// -/// The value of the user reference is taken from the corresponding field of the memory resource structure. -typedef void* (*UdpardMemoryAllocate)(void* const user_reference, const size_t size); - -/// The counterpart of the above -- this function is invoked to return previously allocated memory to the allocator. -/// The size argument contains the amount of memory that was originally requested via the allocation function; -/// its value is undefined if the pointer is NULL. -/// The semantics are similar to free(): -/// - The pointer was previously returned by the allocation function. -/// - The pointer may be NULL, in which case the function shall have no effect. -/// - The execution time should be constant (O(1)). +/// The reliable delivery mechanism informs the application about the number of remote subscribers that confirmed the +/// reception of each reliable message. The library uses heuristics to determine the number of attempts needed to +/// deliver the message, but it is guaranteed to cease attempts by the specified deadline. +/// Rudimentary congestion control is implemented by exponential backoff of retransmission intervals. +/// The reliability is chosen by the publisher on a per-message basis; as such, the same topic may carry both +/// reliable and unreliable messages depending on who is publishing at any given time. /// -/// The value of the user reference is taken from the corresponding field of the memory resource structure. -typedef void (*UdpardMemoryDeallocate)(void* const user_reference, const size_t size, void* const pointer); +/// Reliable messages published over high-fanout topics will generate a large amount of feedback acknowledgments, +/// which must be kept in mind when designing the network. +/// +/// Subscribers operating in the ORDERED mode do not acknowledge messages that have been designated as lost +/// (arriving too late, after the reordering window has passed). No negative acknowledgments are sent either +/// because there may be other subscribers on the same topic who might still be able to receive the message. +typedef struct udpard_tx_t udpard_tx_t; -/// A kind of memory resource that can only be used to free memory previously allocated by the user. -/// Instances are mostly intended to be passed by value. -struct UdpardMemoryDeleter +typedef struct udpard_tx_mem_resources_t { - void* user_reference; ///< Passed as the first argument. - UdpardMemoryDeallocate deallocate; ///< Shall be a valid pointer. -}; + /// The queue bookkeeping structures are allocated per outgoing transfer, i.e., one per udpard_tx_push(). + /// Each allocation is sizeof(tx_transfer_t). + udpard_mem_t transfer; -/// A memory resource encapsulates the dynamic memory allocation and deallocation facilities. -/// Note that the library allocates a large amount of small fixed-size objects for bookkeeping purposes; -/// allocators for them can be implemented using fixed-size block pools to eliminate extrinsic memory fragmentation. -/// Instances are mostly intended to be passed by value. -struct UdpardMemoryResource + /// The UDP datagram payload buffers are allocated per frame, each at most HEADER_SIZE+MTU+sizeof(tx_frame_t). + /// These may be distinct per interface to allow each interface to draw buffers from a specific memory region + /// or a specific DMA-compatible memory pool. + /// + /// IMPORTANT: distinct memory resources increase tx memory usage and data copying. + /// If possible, it is recommended to use the same memory resource for all interfaces, because the library will be + /// able to avoid frame duplication and instead reuse each frame across all interfaces when the MTUs are identical. + udpard_mem_t payload[UDPARD_IFACE_COUNT_MAX]; +} udpard_tx_mem_resources_t; + +/// Outcome notification for a reliable transfer previously scheduled for transmission. +typedef struct udpard_tx_feedback_t { - void* user_reference; ///< Passed as the first argument. - UdpardMemoryDeallocate deallocate; ///< Shall be a valid pointer. - UdpardMemoryAllocate allocate; ///< Shall be a valid pointer. -}; + udpard_user_context_t user; ///< Same value that was passed to udpard_tx_push(). -// ===================================================================================================================== -// ================================================= TX PIPELINE ================================================= -// ===================================================================================================================== + /// The number of remote nodes that acknowledged the reception of the transfer. + /// For P2P transfers, this value is either 0 (failure) or 1 (success). + uint16_t acknowledgements; +} udpard_tx_feedback_t; -/// The set of memory resources is used per a TX pipeline instance. -/// These are used to serve the memory needs of the library to keep state while assembling outgoing frames. -/// Several memory resources are provided to enable fine control over the allocated memory. -/// -/// A TX queue uses these memory resources for allocating the enqueued items (UDP datagrams). -/// There are exactly two allocations per enqueued item: -/// - the first for bookkeeping purposes (UdpardTxItem) -/// - second for payload storage (the frame data) -/// In a simple application, there would be just one memory resource shared by all parts of the library. -/// If the application knows its MTU, it can use block allocation to avoid extrinsic fragmentation. -/// -struct UdpardTxMemoryResources +/// Request to transmit a UDP datagram over the specified interface. +/// Which interface indexes are available is determined by the user when pushing the transfer. +/// If Berkeley sockets or similar API is used, the application should use a dedicated socket per redundant interface. +typedef struct udpard_tx_ejection_t { - /// The fragment handles are allocated per payload fragment; each handle contains a pointer to its fragment. - /// Each instance is of a very small fixed size, so a trivial zero-fragmentation block allocator is enough. - struct UdpardMemoryResource fragment; + /// The current time carried over from the API function that initiated the ejection. + udpard_us_t now; - /// The payload fragments are allocated per payload frame; each payload fragment is at most MTU-sized buffer, - /// so a trivial zero-fragmentation MTU-sized block allocator is enough if MTU is known in advance. - struct UdpardMemoryResource payload; -}; + /// Specifies when the frame should be considered expired and dropped if not yet transmitted by then; + /// it is optional to use depending on the implementation of the NIC driver (most traditional drivers ignore it). + /// The library guarantees that now >= deadline at the time of ejection -- expired frames are purged beforehand. + udpard_us_t deadline; -/// The transmission pipeline is a prioritized transmission queue that keeps UDP datagrams (aka transport frames) -/// destined for transmission via one network interface. -/// Applications with redundant network interfaces are expected to have one instance of this type per interface. -/// Applications that are not interested in transmission may have zero such instances. -/// -/// All operations are logarithmic in complexity on the number of enqueued items. -/// -/// Once initialized, instances cannot be copied. -/// -/// API functions that work with this type are named "udpardTx*()", find them below. -/// -/// FUTURE: Eventually we might consider adding another way of arranging the transmission pipeline where the UDP -/// datagrams ready for transmission are not enqueued into the local prioritized queue but instead are sent directly -/// to the network interface driver using a dedicated callback. The callback would accept not just a single -/// chunk of data but a list of three chunks to avoid copying the source transfer payload: the datagram header, -/// the payload, and (only for the last frame) the CRC. The driver would then use some form of vectorized IO or -/// MSG_MORE/UDP_CORK to transmit the data; the advantage of this approach is that up to two data copy operations are -/// eliminated from the stack and the memory allocator is not used at all. The disadvantage is that if the driver -/// callback is blocking, the application thread will be blocked as well; plus the driver will be responsible -/// for the correct prioritization of the outgoing datagrams according to the DSCP value. -struct UdpardTx -{ - /// Pointer to the node-ID of the local node, which is used to populate the source node-ID field of outgoing - /// transfers. - /// This is made a pointer to allow the user to easily change the node-ID after a plug-and-play node-ID allocation - /// across multiple instances (remember there is a separate instance per redundant interface). - /// The node-ID value should be set to UDPARD_NODE_ID_UNSET if the local node is anonymous - /// (e.g., during PnP allocation or if no transmission is needed). - const UdpardNodeID* local_node_id; - - /// The maximum number of UDP datagrams this instance is allowed to enqueue. - /// An attempt to push more will fail with UDPARD_ERROR_CAPACITY. - /// The purpose of this limitation is to ensure that a blocked queue does not exhaust the memory. - size_t queue_capacity; - - /// The maximum number of Cyphal transfer payload bytes per UDP datagram. - /// The Cyphal/UDP header and the final CRC are added to this value to obtain the total UDP datagram payload size. - /// See UDPARD_MTU_*. - /// The value can be changed arbitrarily at any time between enqueue operations. - /// The value is constrained by the library to be positive. - size_t mtu; - - /// The mapping from the Cyphal priority level in [0,7], where the highest priority is at index 0 - /// and the lowest priority is at the last element of the array, to the IP DSCP field value. - /// See UdpardPriority. - /// By default, the mapping is initialized per the recommendations given in the Cyphal/UDP specification. - /// The value can be changed arbitrarily at any time between enqueue operations. - uint_least8_t dscp_value_per_priority[UDPARD_PRIORITY_MAX + 1U]; - - /// Refer to UdpardTxMemoryResources. - struct UdpardTxMemoryResources memory; - - /// The number of frames that are currently contained in the queue, initially zero. - /// READ-ONLY - size_t queue_size; - - /// Internal use only. - /// READ-ONLY - struct UdpardTreeNode* root; -}; + uint_fast8_t iface_index; ///< The interface index on which the datagram is to be transmitted. + uint_fast8_t dscp; ///< Set the DSCP field of the outgoing UDP packet to this. + + /// If the datagram pointer is retained by the application, udpard_tx_refcount_inc() must be invoked on it + /// to prevent it from being garbage collected. When no longer needed (e.g, upon transmission), + /// udpard_tx_refcount_dec() must be invoked to release the reference. + udpard_bytes_t datagram; -/// One transport frame (UDP datagram) stored in the UdpardTx transmission queue along with its metadata. -/// The datagram should be sent to the indicated UDP/IP endpoint with the specified DSCP value. -/// The datagram should be discarded (transmission aborted) if the deadline has expired. -/// All fields are READ-ONLY except the mutable `datagram_payload` field, which could be nullified to indicate -/// a transfer of the payload memory ownership to somewhere else. -struct UdpardTxItem + /// This is the same value that was passed to udpard_tx_push(). + udpard_user_context_t user; +} udpard_tx_ejection_t; + +/// Virtual function table for the TX pipeline, to be provided by the application. +typedef struct udpard_tx_vtable_t +{ + /// Invoked from udpard_tx_poll() to push outgoing UDP datagrams into the socket/NIC driver. + /// It is GUARANTEED that ONLY udpard_tx_poll() can invoke this function; in particular, pushing new transfers + /// will not trigger ejection callbacks. + /// The callback must not mutate the TX pipeline (no udpard_tx_push/cancel/free). + /// + /// The destination endpoint is provided only for P2P transfers; for multicast transfers, the application + /// must compute the endpoint using udpard_make_subject_endpoint() based on the subject-ID. This is because + /// the subject-ID may be changed by the consensus algorithm at any time if a collision/divergence is detected. + /// The application is expected to rely on the user context to access the topic context for subject-ID derivation. + bool (*eject_subject)(udpard_tx_t*, udpard_tx_ejection_t*); + bool (*eject_p2p)(udpard_tx_t*, udpard_tx_ejection_t*, udpard_udpip_ep_t destination); +} udpard_tx_vtable_t; + +/// The application must create a single instance of this struct to manage the TX pipeline. +/// A single instance manages all redundant interfaces. +struct udpard_tx_t { - /// Internal use only; do not access this field. - struct UdpardTreeNode base; - - /// Points to the next frame in this transfer or NULL. This field is mostly intended for own needs of the library. - /// Normally, the application would not use it because transfer frame ordering is orthogonal to global TX ordering. - /// It can be useful though for pulling pending frames from the TX queue if at least one frame of their transfer - /// failed to transmit; the idea is that if at least one frame is missing, the transfer will not be received by - /// remote nodes anyway, so all its remaining frames can be dropped from the queue at once using udpardTxPop(). - struct UdpardTxItem* next_in_transfer; - - /// This is the same value that is passed to udpardTxPublish/Request/Respond. - /// Frames whose transmission deadline is in the past should be dropped (transmission aborted). - UdpardMicrosecond deadline_usec; - - /// The IP differentiated services code point (DSCP) is used to prioritize UDP frames on the network. - /// LibUDPard selects the DSCP value based on the transfer priority level and the configured DSCP mapping. - uint_least8_t dscp; - - /// Holds the original transfer priority level (before DSCP mapping, see above `dscp`). - enum UdpardPriority priority; - - /// This UDP/IP datagram compiled by libudpard should be sent to this endpoint. - /// The endpoint is always at a multicast address. - struct UdpardUDPIPEndpoint destination; - - /// The completed UDP/IP datagram payload. This includes the Cyphal header as well as all required CRCs. - /// It should be sent through the socket (or equivalent abstraction) verbatim. - struct UdpardMutablePayload datagram_payload; - - /// This opaque pointer is assigned the value that is passed to udpardTxPublish/Request/Respond. - /// The library itself does not make use of it but the application can use it to provide continuity between - /// its high-level transfer objects and datagrams that originate from it. - /// If not needed, the application can set it to NULL. - void* user_transfer_reference; + const udpard_tx_vtable_t* vtable; + + /// The globally unique identifier of the local node. Must not change after initialization. + uint64_t local_uid; + + /// A random-initialized transfer-ID counter for all outgoing P2P transfers. Must not be changed by the application. + uint64_t p2p_transfer_id; + + /// The maximum number of Cyphal transfer payload bytes per UDP datagram. See UDPARD_MTU_*. + /// The Cyphal/UDP header is added to this value to obtain the total UDP datagram payload size. + /// The value can be changed arbitrarily between enqueue operations as long as it is at least UDPARD_MTU_MIN. + /// + /// IMPORTANT: distinct MTU values increase tx memory usage and data copying. + /// If possible, it is recommended to use the same MTU for all interfaces, because the library will be + /// able to avoid frame duplication and instead reuse each frame across all interfaces. + size_t mtu[UDPARD_IFACE_COUNT_MAX]; + + /// This duration is used to derive the acknowledgment timeout for reliable transfers in tx_ack_timeout(). + /// It must be a positive number of microseconds. + /// + /// The baseline timeout should be greater than the expected round-trip time (RTT) between the most distant + /// nodes in the network for a message at the highest priority level. + /// + /// A sensible default is provided at initialization, which can be overridden by the application. + udpard_us_t ack_baseline_timeout; + + /// Optional user-managed mapping from the Cyphal priority level in [0,7] (highest priority at index 0) + /// to the IP DSCP field value for use by the application when transmitting. By default, all entries are zero. + uint_least8_t dscp_value_per_priority[UDPARD_PRIORITY_COUNT]; + + /// The maximum number of UDP datagrams irrespective of the transfer count, for all ifaces pooled. + /// The purpose of this limitation is to ensure that a blocked interface queue does not exhaust the memory. + /// When the limit is reached, the library will apply simple heuristics to choose which transfers to sacrifice. + size_t enqueued_frames_limit; + + /// The number of frames that are currently registered in the queue, initially zero. + /// This includes frames that are handed over to the NIC driver for transmission that are not yet released + /// via udpard_tx_refcount_dec(). + /// READ-ONLY! + size_t enqueued_frames_count; + + udpard_tx_mem_resources_t memory; + + /// Error counters incremented automatically when the corresponding error condition occurs. + /// These counters are never decremented by the library but they can be reset by the application if needed. + uint64_t errors_oom; ///< A transfer could not be enqueued due to OOM, while there was queue space available. + uint64_t errors_capacity; ///< A transfer could not be enqueued due to queue capacity limit. + uint64_t errors_sacrifice; ///< A transfer had to be sacrificed to make room for a new transfer. + uint64_t errors_expiration; ///< A transfer had to be dequeued due to deadline expiration. + + /// Internal use only, do not modify! See tx_transfer_t for details. + udpard_list_t queue[UDPARD_IFACE_COUNT_MAX][UDPARD_PRIORITY_COUNT]; ///< Next to transmit at the tail. + udpard_list_t agewise; ///< Oldest at the tail. + udpard_tree_t* index_staged; + udpard_tree_t* index_deadline; + udpard_tree_t* index_transfer; + udpard_tree_t* index_transfer_ack; + + /// Opaque pointer for the application use only. Not accessed by the library. + void* user; }; -/// Construct a new transmission pipeline with the specified queue capacity and memory resource. -/// Refer to the documentation for UdpardTx for more information. -/// The other parameters will be initialized to the recommended defaults automatically, -/// which can be changed later by modifying the struct fields directly. -/// No memory allocation is going to take place until the pipeline is actually written to. -/// -/// The instance does not hold any resources itself except for the allocated memory. -/// To safely discard it, simply pop all enqueued frames from it. -/// -/// The return value is zero on success, otherwise it is a negative error code. -/// The time complexity is constant. This function does not invoke the dynamic memory manager. -int_fast8_t udpardTxInit(struct UdpardTx* const self, - const UdpardNodeID* const local_node_id, - const size_t queue_capacity, - const struct UdpardTxMemoryResources memory); - -/// This function serializes a message transfer into a sequence of UDP datagrams and inserts them into the prioritized -/// transmission queue at the appropriate position. Afterwards, the application is supposed to take the enqueued frames -/// from the transmission queue using the function udpardTxPeek and transmit them one by one. Each transmitted -/// (or discarded, e.g., due to timeout) frame should be removed from the queue using udpardTxPop. The enqueued items -/// are prioritized according to their Cyphal transfer priority to avoid the inner priority inversion. The transfer -/// payload will be copied into the transmission queue so that the lifetime of the datagrams is not related to the -/// lifetime of the input payload buffer. -/// -/// The MTU of the generated datagrams is dependent on the value of the MTU setting at the time when this function -/// is invoked. The MTU setting can be changed arbitrarily between invocations. -/// -/// The transfer_id parameter will be used to populate the transfer_id field of the generated datagrams. -/// The caller shall increment the transfer-ID counter after each successful invocation of this function -/// per redundant interface; the same transfer published over redundant interfaces shall have the same transfer-ID. -/// There shall be a separate transfer-ID counter per subject (topic). -/// The lifetime of the transfer-ID counter must exceed the lifetime of the intent to publish on this subject (topic); -/// one common approach is to use a static variable or a field in a type that contains the state of the publisher. +/// The parameters are default-initialized (MTU defaults to UDPARD_MTU_DEFAULT and counters are reset) +/// and can be changed later by modifying the struct fields directly. No memory allocation is going to take place +/// until the first transfer is successfully pushed via udpard_tx_push(). +/// +/// The local UID should be a globally unique EUI-64 identifier assigned to the local node. It may be a random +/// EUI-64, which is especially useful for short-lived software nodes. +/// +/// The p2p_transfer_id_initial value must be chosen randomly such that it is likely to be distinct per application +/// startup. See the transfer-ID counter requirements in udpard_tx_push() for details. +/// +/// The enqueued_frames_limit should be large enough to accommodate the expected burstiness of the application traffic. +/// If the limit is reached, the library will apply heuristics to sacrifice some older transfers to make room +/// for the new one. This behavior allows the library to make progress even when some interfaces are stalled. +/// +/// True on success, false if any of the arguments are invalid. +bool udpard_tx_new(udpard_tx_t* const self, + const uint64_t local_uid, + const uint64_t p2p_transfer_id_initial, + const size_t enqueued_frames_limit, + const udpard_tx_mem_resources_t memory, + const udpard_tx_vtable_t* const vtable); + +/// Submit a transfer for transmission. The payload data will be copied into the transmission queue, so it can be +/// invalidated immediately after this function returns. When redundant interfaces are used, the library will attempt to +/// minimize the number of copies by reusing frames across interfaces with identical MTU values and memory resources. +/// +/// The caller shall increment the transfer-ID counter after each successful invocation of this function per topic. +/// There shall be a separate transfer-ID counter per topic. The initial value shall be chosen randomly +/// such that it is likely to be distinct per application startup (embedded systems can use noinit memory sections, +/// hash uninitialized SRAM, use timers or ADC noise, etc). +/// Related thread on random transfer-ID init: https://forum.opencyphal.org/t/improve-the-transfer-id-timeout/2375 +/// +/// The user context value is carried through to the callbacks. It must contain enough context to allow subject-ID +/// derivation inside udpard_tx_vtable_t::eject_subject(). For example, it may contain a pointer to the topic struct. +/// +/// Returns true on success. Runtime failures increment the corresponding error counters, +/// while invocations with invalid arguments just return zero without modifying the queue state. +/// +/// The enqueued transfer will be emitted over all interfaces specified in the iface_bitmap. +/// The subject-ID is computed inside the udpard_tx_vtable::eject_subject() callback at the time of transmission. +/// The subject-ID cannot be computed beforehand at the time of enqueuing because the topic->subject consensus protocol +/// may find a different subject-ID allocation between the time of enqueuing and the time of (re)transmission. +/// +/// An attempt to push a transfer with a (topic hash, transfer-ID) pair that is already enqueued will fail, +/// as that violates the transfer-ID uniqueness requirement stated above. +/// +/// The feedback callback is set to NULL for best-effort (non-acknowledged) transfers. Otherwise, the transfer is +/// treated as reliable, requesting a delivery acknowledgement from remote subscribers with repeated retransmissions if +/// necessary; it is guaranteed that delivery attempts will cease no later than by the specified deadline. +/// The feedback callback is ALWAYS invoked EXACTLY ONCE per reliable transfer pushed via udpard_tx_push() successfully, +/// indicating the number of remote nodes that acknowledged the reception of the transfer. +/// The retransmission delay is increased exponentially with each retransmission attempt as a means of congestion +/// control and latency adaptation; please refer to udpard_tx_t::ack_baseline_timeout for details. +/// +/// Beware that reliable delivery may cause message reordering. For example, when sending messages A and B, +/// and A is lost on the first attempt, the next attempt may be scheduled after B is published, +/// so that the remote sees B followed by A. Most applications tolerate it without issues; if this is not the case, +/// the subscriber should use the ORDERED subscription mode (refer to the RX pipeline for details), +/// which will reconstruct the original message ordering. +/// +/// On success, the function allocates a single transfer state instance and a number of payload fragments. +/// The time complexity is O(p + log e), where p is the transfer payload size, and e is the number of +/// transfers already enqueued in the transmission queue. +bool udpard_tx_push(udpard_tx_t* const self, + const udpard_us_t now, + const udpard_us_t deadline, + const uint16_t iface_bitmap, + const udpard_prio_t priority, + const uint64_t topic_hash, + const uint64_t transfer_id, + const udpard_bytes_scattered_t payload, + void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), // NULL if best-effort. + const udpard_user_context_t user); + +/// This is a specialization of the general push function for P2P transfers. +/// P2P transfers treat the topic hash as the destination node's UID. +/// The transfer-ID counter is shared for all P2P outgoing P2P transfers and is managed automatically. +/// If out_transfer_id is not NULL, the assigned internal transfer-ID is stored there for use with udpard_tx_cancel_p2p. +bool udpard_tx_push_p2p(udpard_tx_t* const self, + const udpard_us_t now, + const udpard_us_t deadline, + const udpard_prio_t priority, + const udpard_remote_t remote, // Endpoints may be invalid for some ifaces. + const udpard_bytes_scattered_t payload, + void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), // NULL if best-effort. + const udpard_user_context_t user, + uint64_t* const out_transfer_id); + +/// This should be invoked whenever the socket/NIC of this queue becomes ready to accept new datagrams for transmission. +/// It is fine to also invoke it periodically unconditionally to drive the transmission process. +/// Internally, the function will query the scheduler for the next frame to be transmitted and will attempt +/// to submit it via the eject() callback provided in the vtable. +/// The iface bitmap indicates which interfaces are currently ready to accept new datagrams. +/// The function may deallocate memory. The time complexity is logarithmic in the number of enqueued transfers. +void udpard_tx_poll(udpard_tx_t* const self, const udpard_us_t now, const uint16_t iface_bitmap); + +/// Cancel a previously enqueued transfer. +/// To cancel a P2P transfer, pass the destination node's UID as the topic_hash. +/// If provided, the feedback callback will be invoked with success==false. +/// Not safe to call from the eject() callback. +/// Returns true if a transfer was found and cancelled, false if no such transfer was found. +/// The complexity is O(log t + f), where t is the number of enqueued transfers, +/// and f is the number of frames in the transfer. +/// The function will free the memory associated with the transfer. +bool udpard_tx_cancel(udpard_tx_t* const self, const uint64_t topic_hash, const uint64_t transfer_id); + +/// Like udpard_tx_cancel(), but cancels all transfers matching the given topic hash. +/// Returns the number of matched transfers. +/// This is important to invoke when destroying a topic to ensure no dangling callbacks remain. +size_t udpard_tx_cancel_all(udpard_tx_t* const self, const uint64_t topic_hash); + +/// Returns a bitmap of interfaces that have pending transmissions. This is useful for IO multiplexing loops. +/// Zero indicates that there are no pending transmissions. +/// Which interfaces are usable is defined by the remote endpoints provided when pushing transfers. +uint16_t udpard_tx_pending_ifaces(const udpard_tx_t* const self); + +/// When a datagram is ejected and the application opts to keep it, these functions must be used to manage the +/// datagram buffer lifetime. The datagram will be freed once the reference count reaches zero. +void udpard_tx_refcount_inc(const udpard_bytes_t tx_payload_view); +void udpard_tx_refcount_dec(const udpard_bytes_t tx_payload_view); + +/// Drops all enqueued items; afterward, the instance is safe to discard. Reliable transfer callbacks are still invoked. +void udpard_tx_free(udpard_tx_t* const self); + +// ===================================================================================================================== +// ================================================= RX PIPELINE ================================================= +// ===================================================================================================================== + +/// The reception (RX) pipeline is used to subscribe to subjects and to receive P2P transfers. +/// The reception pipeline is highly robust and is able to accept datagrams with arbitrary MTU distinct per interface, +/// delivered out-of-order (OOO) with duplication and arbitrary interleaving between transfers. +/// All redundant interfaces are pooled together into a single fragment stream per RX port, +/// thus providing seamless failover and great resilience against packet loss on any of the interfaces. +/// The RX pipeline operates at the speed/latency of the best-performing interface at any given time. /// -/// The user_transfer_reference is an opaque pointer that will be assigned to the user_transfer_reference field of -/// each enqueued item. The library itself does not use or check this value in any way, so it can be NULL if not needed. +/// The application should instantiate one RX port instance per subject it needs to receive messages from, +/// irrespective of the number of redundant interfaces. There needs to be one socket (or a similar abstraction +/// provided by the underlying UDP/IP stack) per RX port instance per redundant interface, +/// each socket bound to the same UDP/IP endpoint (IP address and UDP port) obtained using udpard_make_subject_endpoint. +/// The application needs to listen to all these sockets simultaneously and pass the received UDP datagrams to the +/// corresponding RX port instance as they arrive. /// -/// The deadline_usec value will be used to populate the eponymous field of the generated datagrams -/// (all will share the same deadline value). -/// This feature is intended to allow aborting frames that could not be transmitted before the specified deadline; -/// therefore, normally, the timestamp value should be in the future. -/// The library itself, however, does not use or check this value in any way, so it can be zero if not needed -/// (this is not recommended for real-time systems). +/// P2P transfers are handled in a similar way, except that the topic hash is replaced with the destination node's UID, +/// and the UDP/IP endpoints are unicast addresses instead of multicast addresses. /// -/// Note that due to the priority ordering, transient transfer loss may occur if the user increases the priority -/// level on a given port. This is because the frames of the new transfer will be enqueued before the frames of -/// the previous transfer, so the frames of the previous transfer will be transmitted only after the frames of -/// the new transfer are transmitted, causing the receiver to discard them as duplicates due to their lower transfer-ID. -/// To avoid this, it is necessary to wait for all frames originating from the port to be delivered before increasing -/// the priority level on the port. The "user_transfer_reference" may help here as it allows the user to establish -/// traceability from enqueued transfer frames (datagrams) back to the port they originate from. +/// Graphically, the subscription pipeline is arranged per port as shown below. +/// Remember that the application with N RX ports would have N such pipelines, one per port. /// -/// The function returns the number of UDP datagrams enqueued, which is always a positive number, in case of success. -/// In case of failure, the function returns a negated error code. +/// REDUNDANT INTERFACE A ---> UDP SOCKET ---+ +/// | +/// REDUNDANT INTERFACE B ---> UDP SOCKET ---+---> udpard_rx_port_t ---> TRANSFERS +/// | +/// ... ---+ /// -/// UDPARD_ERROR_ARGUMENT may be returned in the following cases: -/// - Any of the input arguments except user_transfer_reference are NULL. -/// - The priority or the port-ID exceed their respective maximums. -/// - The payload pointer is NULL while the payload size is nonzero. +/// The transfer reassembly state machine can operate in several modes described below. First, a brief summary: /// -/// UDPARD_ERROR_ANONYMOUS is returned if local node is anonymous (the local node-ID is unset) and -/// the transfer payload cannot fit into a single datagram (a multi-frame transfer is required). +/// Mode Guarantees Limitations Reordering window +/// -----------------------------------------−------------------------------------------------------------------ +/// ORDERED Strictly increasing transfer-ID May delay transfers, CPU heavier Non-negative microseconds +/// UNORDERED Unique transfer-ID Ordering not guaranteed Ignored +/// STATELESS Constant time, constant memory 1-frame only, dups, no responses Ignored /// -/// UDPARD_ERROR_MEMORY is returned if a TX frame could not be allocated due to the memory being exhausted. -/// UDPARD_ERROR_CAPACITY is returned if the capacity of the queue would be exceeded by this operation. -/// In such cases, all frames allocated for this transfer (if any) will be deallocated automatically. -/// In other words, either all frames of the transfer are enqueued successfully, or none are. +/// If not sure, choose unordered. The ordered mode is a good fit for ordering-sensitive use cases like state +/// estimators and control loops, but it is not suitable for P2P. +/// The stateless mode is chiefly intended for the heartbeat topic. /// -/// The memory allocation requirement is two allocations per datagram: -/// a single-frame transfer takes two allocations; a multi-frame transfer of N frames takes N*2 allocations. -/// In each pair of allocations: -/// - the first allocation is for `UdpardTxItem`; the size is `sizeof(UdpardTxItem)`; -/// the TX queue `memory.fragment` memory resource is used for this allocation (and later for deallocation); -/// - the second allocation is for payload storage (the frame data) - size is normally MTU but could be less for -/// the last frame of the transfer; the TX queue `memory.payload` memory resource is used for this allocation. +/// ORDERED /// -/// The time complexity is O(p + log e), where p is the amount of payload in the transfer, and e is the number of -/// frames already enqueued in the transmission queue. -int32_t udpardTxPublish(struct UdpardTx* const self, - const UdpardMicrosecond deadline_usec, - const enum UdpardPriority priority, - const UdpardPortID subject_id, - const UdpardTransferID transfer_id, - const struct UdpardPayload payload, - void* const user_transfer_reference); - -/// This is similar to udpardTxPublish except that it is intended for service request transfers. -/// It takes the node-ID of the server that is intended to receive the request. +/// Each transfer is received at most once. The sequence of transfers delivered (ejected) +/// to the application is STRICTLY INCREASING (with possible gaps in case of loss). /// -/// The transfer_id parameter will be used to populate the transfer_id field of the generated datagrams. -/// The caller shall increment the transfer-ID counter after each successful invocation of this function -/// per redundant interface; the same transfer published over redundant interfaces shall have the same transfer-ID. -/// There shall be a separate transfer-ID counter per pair of (service-ID, server node-ID). -/// The lifetime of the transfer-ID counter must exceed the lifetime of the intent to invoke this service -/// on this server node; one common approach is to use a static array or a struct field indexed by -/// the server node-ID per service-ID (memory-constrained applications may choose a more compact container; -/// e.g., a list or an AVL tree). +/// The reassembler may hold completed transfers for a brief time if they arrive out-of-order, +/// hoping for the earlier missing transfers to show up, such that they are not permanently lost. +/// For example, a sequence 1 2 4 3 5 will be delivered as 1 2 3 4 5 if 3 arrives shortly after 4; +/// however, if 3 does not arrive within the configured reordering window, +/// the application will receive 1 2 4 5, and transfer 3 will be permanently lost even if it arrives later +/// because accepting it without violating the strictly increasing transfer-ID constraint is not possible. /// -/// Additional error conditions: -/// - UDPARD_ERROR_ARGUMENT if the server node-ID value is invalid. -/// - UDPARD_ERROR_ANONYMOUS if the local node is anonymous (the local node-ID is unset). +/// This mode requires much more bookkeeping which results in a greater processing load per received fragment/transfer. /// -/// Other considerations are the same as for udpardTxPublish. -int32_t udpardTxRequest(struct UdpardTx* const self, - const UdpardMicrosecond deadline_usec, - const enum UdpardPriority priority, - const UdpardPortID service_id, - const UdpardNodeID server_node_id, - const UdpardTransferID transfer_id, - const struct UdpardPayload payload, - void* const user_transfer_reference); - -/// This is similar to udpardTxRequest except that it takes the node-ID of the client instead of server. -/// The transfer-ID must be the same as that of the corresponding RPC-request transfer; -/// this is to allow the client to match responses with their requests. -int32_t udpardTxRespond(struct UdpardTx* const self, - const UdpardMicrosecond deadline_usec, - const enum UdpardPriority priority, - const UdpardPortID service_id, - const UdpardNodeID client_node_id, - const UdpardTransferID transfer_id, - const struct UdpardPayload payload, - void* const user_transfer_reference); - -/// This function accesses the enqueued UDP datagram scheduled for transmission next. The queue itself is not modified -/// (i.e., the accessed element is not removed). The application should invoke this function to collect the datagrams -/// enqueued by udpardTxPublish/Request/Respond whenever the socket (or equivalent abstraction) becomes writable. +/// Zero is not really a special case for the reordering window; it simply means that out-of-order transfers +/// are not waited for at all (declared permanently lost immediately), and no received transfer is delayed +/// before ejection to the application. /// -/// The timestamp values of the enqueued items are initialized with deadline_usec from udpardTxPublish/Request/Respond. -/// The timestamps are used to specify the transmission deadline. It is up to the application and/or the socket layer -/// to implement the discardment of timed-out datagrams. The library does not check it, so a frame that is -/// already timed out may be returned here. +/// The ORDERED mode is mostly intended for applications like state estimators, control systems, and data streaming +/// where ordering is critical. /// -/// If the queue is empty or if the argument is NULL, the returned value is NULL. +/// UNORDERED /// -/// If the queue is non-empty, the returned value is a pointer to its top element (i.e., the next item to transmit). -/// The returned pointer points to an object allocated in the dynamic storage; it should be eventually freed by the -/// application by calling `udpardTxFree`. The memory shall not be freed before the item is removed -/// from the queue by calling udpardTxPop; this is because until udpardTxPop is executed, the library retains -/// ownership of the item. The pointer retains validity until explicitly freed by the application; in other words, -/// calling udpardTxPop does not invalidate the object. +/// Each transfer is ejected immediately upon successful reassembly. Ordering is not enforced, +/// but duplicates are still removed. For example, a sequence 1 2 4 3 5 will be delivered as-is without delay. /// -/// Calling functions that modify the queue may cause the next invocation to return a different pointer. +/// This mode does not reject nor delay transfers arriving late, making it the desired choice for applications +/// where all transfers need to be received no matter the order. This is in particular useful for request-response +/// topics, where late arrivals occur not only due to network conditions but also due to the inherent +/// asynchrony between requests and responses. For example, node A could publish messages X and Y on subject S, +/// while node B could respond to X only after receiving Y, thus causing the response to X to arrive late with +/// respect to Y. This would cause the ORDERED mode to delay or drop the response to X, which is undesirable; +/// therefore, the UNORDERED mode is preferred for request-response topics. /// -/// The payload buffer is allocated in the dynamic storage of the queue. The application may transfer ownership of -/// the payload to a different application component (f.e. to transmission media) by copying the pointer and then -/// (if the ownership transfer was accepted) by nullifying `datagram_payload` fields of the frame (`size` & `data`). -/// If these fields stay with their original values, the `udpardTxFree` (after proper `udpardTxPop` of course) will -/// deallocate the payload buffer. In any case, the payload has to be eventually deallocated by using the TX queue -/// `memory.payload` memory resource. It will be automatically done by the `udpardTxFree` (if the payload still -/// stays in the item), OR if moved, it is the responsibility of the application to eventually (f.e. at the end of -/// transmission) deallocate the memory with the TX queue `memory.payload` memory resource. -/// Note that the mentioned above nullification of the `datagram_payload` fields is the -/// only reason why a returned TX item pointer is mutable. It was constant in the past (before v2), -/// but it was changed to be mutable to allow the payload ownership transfer. +/// The unordered mode should be the default mode for most use cases. /// -/// The time complexity is logarithmic of the queue size. This function does not invoke the dynamic memory manager. -struct UdpardTxItem* udpardTxPeek(const struct UdpardTx* const self); - -/// This function transfers the ownership of the specified item of the prioritized transmission queue from the queue -/// to the application. The item does not necessarily need to be the top one -- it is safe to dequeue any item. -/// The item is dequeued but not invalidated; it is the responsibility of the application to deallocate its memory -/// later. The memory SHALL NOT be deallocated UNTIL this function is invoked (use `udpardTxFree` helper). -/// The function returns the same pointer that it is given except that it becomes mutable. +/// STATELESS /// -/// If any of the arguments are NULL, the function has no effect and returns NULL. +/// Only single-frame transfers are accepted (where the entire payload fits into a single datagram, +/// or the extent does not exceed the MTU). No attempt to enforce ordering or remove duplicates is made. +/// The return path is only discovered for the one interface that delivered the transfer. +/// Transfers arriving from N interfaces are duplicated N times. /// -/// The time complexity is logarithmic of the queue size. This function does not invoke the dynamic memory manager. -struct UdpardTxItem* udpardTxPop(struct UdpardTx* const self, struct UdpardTxItem* const item); +/// The stateless mode allocates only a fragment header per accepted frame and does not contain any +/// variable-complexity processing logic, enabling great scalability for topics with a very large number of +/// publishers where unordered and duplicated messages are acceptable, such as the heartbeat topic. -/// This is a simple helper that frees the memory allocated for the item and its payload, -/// using the correct sizes and memory resources. -/// If the item argument is NULL, the function has no effect. The time complexity is constant. -/// If the item frame payload is NULL then it is assumed that the payload buffer was already freed, -/// or moved to a different owner (f.e. to media layer). -void udpardTxFree(const struct UdpardTxMemoryResources memory, struct UdpardTxItem* const item); +/// The application will have a single RX instance to manage all subscriptions and P2P ports. +typedef struct udpard_rx_t +{ + udpard_list_t list_session_by_animation; ///< Oldest at the tail. + udpard_tree_t* index_session_by_reordering; ///< Earliest reordering window closure on the left. -// ===================================================================================================================== -// ================================================= RX PIPELINE ================================================= -// ===================================================================================================================== + uint64_t errors_oom; ///< A frame could not be processed (transfer possibly dropped) due to OOM. + uint64_t errors_frame_malformed; ///< A received frame was malformed and thus dropped. + uint64_t errors_transfer_malformed; ///< A transfer could not be reassembled correctly. -/// This type represents an open input port, such as a subscription to a subject (topic), a service server port -/// that accepts RPC-service requests, or a service client port that accepts RPC-service responses. -/// -/// The library performs transfer reassembly, deduplication, and integrity checks, along with the management of -/// redundant network interfaces. -struct UdpardRxPort + /// Incremented when an ack cannot be enqueued (including when tx is NULL). + /// If tx is available, inspect its error counters for details. + uint64_t errors_ack_tx; + + /// The transmission pipeline is needed to manage ack transmission and removal of acknowledged transfers. + /// If the application wants to only listen, the pointer may be NULL (no acks will be sent). + /// When initializing the library, the TX instance needs to be created first. + udpard_tx_t* tx; + + void* user; ///< Opaque pointer for the application use only. Not accessed by the library. +} udpard_rx_t; + +/// These are used to serve the memory needs of the library to keep state while reassembling incoming transfers. +/// Several memory resources are provided to enable fine control over the allocated memory if necessary; however, +/// simple applications may choose to use the same memory resource implemented via malloc()/free() for all of them. +typedef struct udpard_rx_mem_resources_t +{ + /// Provides memory for rx_session_t described below. + /// Each instance is fixed-size, so a trivial zero-fragmentation block allocator is sufficient. + udpard_mem_t session; + + /// The udpard_fragment_t handles are allocated per payload fragment; each contains a pointer to its fragment. + /// Each instance is of a very small fixed size, so a trivial zero-fragmentation block allocator is sufficient. + udpard_mem_t fragment; +} udpard_rx_mem_resources_t; + +typedef struct udpard_rx_port_t udpard_rx_port_t; +typedef struct udpard_rx_transfer_t udpard_rx_transfer_t; + +/// RX port mode for transfer reassembly behavior. +typedef enum udpard_rx_mode_t +{ + udpard_rx_unordered = 0, + udpard_rx_ordered = 1, + udpard_rx_stateless = 2, +} udpard_rx_mode_t; + +/// Provided by the application per port instance to specify the callbacks to be invoked on certain events. +/// This design allows distinct callbacks per port, which is especially useful for the P2P port. +typedef struct udpard_rx_port_vtable_t { - /// The maximum payload size that can be accepted at this port. - /// The rest will be truncated away following the implicit truncation rule defined in the Cyphal specification. - /// READ-ONLY + /// A new message is received on a port. The handler takes ownership of the payload; it must free it after use. + void (*on_message)(udpard_rx_t*, udpard_rx_port_t*, udpard_rx_transfer_t); + + /// A topic hash collision is detected on a port. + /// On P2P ports, this indicates that the destination UID doesn't match the local UID (misaddressed message); + /// safe to ignore. + /// May be NULL if the application is not interested. + void (*on_collision)(udpard_rx_t*, udpard_rx_port_t*, udpard_remote_t); +} udpard_rx_port_vtable_t; + +/// This type represents an open input port, such as a subscription to a topic. +struct udpard_rx_port_t +{ + /// Mismatch will be filtered out and the collision notification callback invoked. + /// For P2P ports, this is the destination node's UID (i.e., the local node's UID). + uint64_t topic_hash; + + /// Transfer payloads exceeding this extent may be truncated. + /// The total size of the received payload may still exceed this extent setting by some small margin. size_t extent; - /// Refer to the Cyphal specification for the description of the transfer-ID timeout. - /// By default, this is set to UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC and it can be changed by the user. - /// This field can be adjusted at runtime arbitrarily; e.g., this is useful to implement adaptive timeouts. - UdpardMicrosecond transfer_id_timeout_usec; + /// Behavior undefined if the reassembly mode or the reordering window are switched on a live port. + udpard_rx_mode_t mode; + udpard_us_t reordering_window; - /// Libudpard creates a new session instance per remote node-ID that emits transfers matching this port. + udpard_rx_mem_resources_t memory; + + /// Libudpard creates a new session instance per remote UID that emits transfers matching this port. /// For example, if the local node is subscribed to a certain subject and there are X nodes publishing /// transfers on that subject, then there will be X sessions created for that subject. - /// Same applies to RPC-services as well. - /// - /// Once a session is created, it is never freed again until the port that owns it (this structure) is destroyed. - /// This is in line with the assumption that the network configuration is usually mostly static, and that - /// once a node has started emitting data on a certain port, it is likely to continue doing so. - /// Applications where this is not the case may consider cycling their ports periodically - /// by destroying and re-creating them immediately. /// - /// Each session instance takes sizeof(UdpardInternalRxSession) bytes of dynamic memory for itself, - /// which is at most 512 bytes on wide-word platforms (on small word size platforms it is usually much smaller). + /// Each session instance takes sizeof(rx_session_t) bytes of dynamic memory for itself. /// On top of that, each session instance holds memory for the transfer payload fragments and small fixed-size - /// metadata objects called "fragment handles" (at most 128 bytes large, usually much smaller, - /// depending on the pointer width and the word size), one handle per fragment. + /// metadata objects of type udpard_fragment_t, one handle per fragment. /// /// The transfer payload memory is not allocated by the library but rather moved from the application - /// when the corresponding UDP datagram is received. If the library chooses to keep the frame payload - /// (which is the case if the frame is not a duplicate, the frame sequence is valid, and the received payload - /// does not exceed the extent configured for the port), a new fragment handle is allocated and it takes ownership - /// of the entire datagram payload (including all overheads such as the Cyphal/UDP frame header and possible - /// data that spills over the configured extent value for this port). + /// when the corresponding UDP datagram is received. If the library chooses to keep the frame payload, + /// a new fragment handle is allocated and it takes ownership of the entire datagram payload. /// If the library does not need the datagram to reassemble the transfer, its payload buffer is freed immediately. /// There is a 1-to-1 correspondence between the fragment handles and the payload fragments. /// Remote nodes that emit highly fragmented transfers cause a higher memory utilization in the local node /// because of the increased number of fragment handles and per-datagram overheads. /// - /// In the worst case, the library may keep up to two full transfer payloads in memory at the same time - /// (two transfer states are kept to allow acceptance of interleaved frames). - /// /// Ultimately, the worst-case memory consumption is dependent on the configured extent and the transmitting /// side's MTU, as these parameters affect the number of payload buffers retained in memory. /// /// The maximum memory consumption is when there is a large number of nodes emitting data such that each node - /// begins a multi-frame transfer while never completing it. - /// - /// Everything stated above holds for service transfers as well. + /// begins a multi-frame transfer while never completing it. The library mitigates this by pruning stale + /// transfers and removing sessions that have been inactive for a long time. /// /// If the dynamic memory pool(s) is(are) sized correctly, and all transmitting nodes are known to avoid excessive - /// fragmentation of egress transfers (which can be ensured by not using MTU values smaller than the default), + /// fragmentation of egress transfers (which can be ensured by avoiding small MTU values), /// the application is guaranteed to never encounter an out-of-memory (OOM) error at runtime. /// High-integrity applications can optionally police ingress traffic for MTU violations and filter it before /// passing it to the library; alternatively, applications could limit memory consumption per port, /// which is easy to implement since each port gets a dedicated set of memory resources. - /// - /// READ-ONLY - struct UdpardInternalRxSession* sessions; -}; - -/// The set of memory resources is used per an RX pipeline instance such as subscription or a service dispatcher. -/// These are used to serve the memory needs of the library to keep state while reassembling incoming transfers. -/// Several memory resources are provided to enable fine control over the allocated memory; -/// simple applications may choose to use the same memory resource implemented via malloc()/free() for all of them. -struct UdpardRxMemoryResources -{ - /// The session memory resource is used to provide memory for the session instances described above. - /// Each instance is fixed-size, so a trivial zero-fragmentation block allocator is sufficient. - struct UdpardMemoryResource session; + udpard_tree_t* index_session_by_remote_uid; - /// The fragment handles are allocated per payload fragment; each handle contains a pointer to its fragment. - /// Each instance is of a very small fixed size, so a trivial zero-fragmentation block allocator is sufficient. - struct UdpardMemoryResource fragment; + const udpard_rx_port_vtable_t* vtable; + const struct udpard_rx_port_vtable_private_t* vtable_private; - /// The library never allocates payload buffers itself, as they are handed over by the application via - /// udpardRx*Receive. Once a buffer is handed over, the library may choose to keep it if it is deemed to be - /// necessary to complete a transfer reassembly, or to discard it if it is deemed to be unnecessary. - /// Discarded payload buffers are freed using this object. - struct UdpardMemoryDeleter payload; + /// Opaque pointer for the application use only. Not accessed by the library. + void* user; }; /// Represents a received Cyphal transfer. -/// The payload is owned by this instance, so the application must free it after use; see udpardRxTransferFree. -struct UdpardRxTransfer +/// The payload is owned by this instance, so the application must free it after use using udpard_fragment_free_all() +/// together with the port's fragment memory resource. +struct udpard_rx_transfer_t { - UdpardMicrosecond timestamp_usec; - enum UdpardPriority priority; - UdpardNodeID source_node_id; - UdpardTransferID transfer_id; + udpard_us_t timestamp; + udpard_prio_t priority; + uint64_t transfer_id; + udpard_remote_t remote; /// The total size of the payload available to the application, in bytes, is provided for convenience; /// it is the sum of the sizes of all its fragments. For example, if the sender emitted a transfer of 2000 /// bytes split into two frames, 1408 bytes in the first frame and 592 bytes in the second frame, - /// then the payload_size will be 2000 and the payload buffer will contain two fragments of 1408 and 592 bytes. - /// The transfer CRC is not included here. If the received payload exceeds the configured extent, - /// the excess payload will be discarded and the payload_size will be set to the extent. + /// then the payload_size_stored will be 2000 and the payload buffer will contain two fragments of 1408 and + /// 592 bytes. If the received payload exceeds the configured extent, fragments starting past the extent are + /// dropped but fragments crossing it are kept, so payload_size_stored may exceed the extent. /// /// The application is given ownership of the payload buffer, so it is required to free it after use; /// this requires freeing both the handles and the payload buffers they point to. @@ -791,324 +806,101 @@ struct UdpardRxTransfer /// the application is responsible for freeing them using the correct memory resource. /// /// If the payload is empty, the corresponding buffer pointers may be NULL. - size_t payload_size; - struct UdpardFragment payload; -}; - -/// This is, essentially, a helper that frees the memory allocated for the payload and its fragment headers -/// using the correct memory resources. The application can do the same thing manually if it has access to the -/// required context to compute the size, or if the memory resource implementation does not require deallocation size. -/// -/// The head of the fragment list is passed by value so it is not freed. This is in line with the UdpardRxTransfer -/// design, where the head is stored by value to reduce indirection in small transfers. We call it Scott's Head. -/// -/// If any of the arguments are NULL, the function has no effect. -void udpardRxFragmentFree(const struct UdpardFragment head, - const struct UdpardMemoryResource memory_fragment, - const struct UdpardMemoryDeleter memory_payload); + size_t payload_size_stored; -// --------------------------------------------- SUBJECTS --------------------------------------------- + /// The original size of the transfer payload before extent-based dropping, in bytes. + /// This may exceed the stored payload if fragments beyond the extent were skipped. Cannot be less than + /// payload_size_stored. + size_t payload_size_wire; -/// This is a specialization of a port for subject (topic) subscriptions. -/// -/// In Cyphal/UDP, each subject (topic) has a specific IP multicast group address associated with it. -/// This address is contained in the field named "udp_ip_endpoint". -/// The application is expected to open a separate socket bound to that endpoint per redundant interface, -/// and then feed the UDP datagrams received from these sockets into udpardRxSubscriptionReceive, -/// collecting UdpardRxTransfer instances at the output. -/// -/// Observe that the subscription pipeline is entirely independent of the node-ID of the local node. -/// This is by design, allowing nodes to listen to subjects without having to be present online. -struct UdpardRxSubscription -{ - /// See UdpardRxPort. - /// Use this to change the transfer-ID timeout value for this subscription. - struct UdpardRxPort port; + /// The payload is stored in a tree of fragments ordered by their offset within the payload. + /// See udpard_fragment_t and its helper functions for managing the fragment tree. + udpard_fragment_t* payload; +}; - /// The IP multicast group address and the UDP port number where UDP/IP datagrams matching this Cyphal - /// subject will be sent by the publishers (remote nodes). - /// READ-ONLY - struct UdpardUDPIPEndpoint udp_ip_endpoint; +/// The RX instance holds no resources and can be destroyed at any time by simply freeing all its ports first +/// using udpard_rx_port_free(), then discarding the instance itself. The self pointer must not be NULL. +/// The TX instance must be initialized beforehand, unless the application wants to only listen, +/// in which case it may be NULL. +void udpard_rx_new(udpard_rx_t* const self, udpard_tx_t* const tx); - /// Refer to UdpardRxMemoryResources. - struct UdpardRxMemoryResources memory; -}; +/// Must be invoked at least every few milliseconds (more often is fine) to purge timed-out sessions and eject +/// received transfers when the reordering window expires. If this is invoked simultaneously with rx subscription +/// reception, then this function should ideally be invoked after the reception handling. +/// The time complexity is logarithmic in the number of living sessions. +void udpard_rx_poll(udpard_rx_t* const self, const udpard_us_t now); /// To subscribe to a subject, the application should do this: +/// 1. Create a new udpard_rx_port_t instance using udpard_rx_port_new(). +/// 2. Per redundant network interface: +/// - Create a new RX socket bound to the IP multicast group address and UDP port number returned by +/// udpard_make_subject_endpoint() for the desired subject-ID. +/// For P2P transfer ports use ordinary unicast sockets. +/// 3. Read data from the sockets continuously and forward each datagram to udpard_rx_port_push(), +/// along with the index of the redundant interface the datagram was received on. /// -/// 1. Create a new UdpardRxSubscription instance. -/// -/// 2. Initialize it by calling udpardRxSubscriptionInit. The subject-ID and port-ID are synonymous here. -/// -/// 3. Per redundant network interface: -/// - Create a new socket bound to the IP multicast group address and UDP port number specified in the -/// udp_ip_endpoint field of the initialized subscription instance. The library will determine the -/// endpoint to use based on the subject-ID. -/// -/// 4. Read data from the sockets continuously and forward each received UDP datagram to -/// udpardRxSubscriptionReceive, along with the index of the redundant interface the datagram was received on. +/// For P2P ports, the procedure is identical, except that the topic hash is set to the local node's UID. +/// There must be exactly one P2P port per node. The P2P port is also used for acks. /// /// The extent defines the maximum possible size of received objects, considering also possible future data type -/// versions with new fields. It is safe to pick larger values. -/// Note well that the extent is not the same thing as the maximum size of the object, it is usually larger! -/// Transfers that carry payloads that exceed the specified extent will be accepted anyway but the excess payload -/// will be truncated away, as mandated by the Specification. The transfer CRC is always validated regardless of -/// whether its payload is truncated. +/// versions with new fields. It is safe to pick larger values. Note well that the extent is not the same thing as +/// the maximum size of the object, it is usually larger! Transfers that carry payloads beyond the specified extent +/// still keep fragments that start before the extent, so the delivered payload may exceed it; fragments starting past +/// the limit are dropped. /// -/// By default, the transfer-ID timeout value is set to UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC. -/// It can be changed by the user at any time by modifying the corresponding field in the subscription instance. +/// The topic hash is needed to detect and ignore transfers that use different topics on the same subject-ID. +/// The collision callback is invoked if a topic hash collision is detected. /// -/// The return value is 0 on success. -/// The return value is a negated UDPARD_ERROR_ARGUMENT if any of the input arguments are invalid. +/// If not sure which reassembly mode to choose, consider `udpard_rx_unordered` as the default choice. +/// For ordering-sensitive use cases, such as state estimators and control loops, use `udpard_rx_ordered` with a short +/// window. /// -/// The time complexity is constant. This function does not invoke the dynamic memory manager. -int_fast8_t udpardRxSubscriptionInit(struct UdpardRxSubscription* const self, - const UdpardPortID subject_id, - const size_t extent, - const struct UdpardRxMemoryResources memory); - -/// Frees all memory held by the subscription instance. -/// After invoking this function, the instance is no longer usable. -/// The function has no effect if the instance is NULL. -/// Do not forget to close the sockets that were opened for this subscription. -void udpardRxSubscriptionFree(struct UdpardRxSubscription* const self); - -/// Datagrams received from the sockets of this subscription are fed into this function. +/// The pointed-to vtable instance must outlive the port instance. /// -/// The timestamp value indicates the arrival time of the datagram; the arrival time of the earliest datagram of -/// a transfer becomes the transfer timestamp upon successful reassembly. -/// This value is also used for the transfer-ID timeout management. -/// Usually, naive software timestamping is adequate for these purposes, but some applications may require +/// The return value is true on success, false if any of the arguments are invalid. +/// The time complexity is constant. This function does not invoke the dynamic memory manager. +bool udpard_rx_port_new(udpard_rx_port_t* const self, + const uint64_t topic_hash, // For P2P ports, this is the local node's UID. + const size_t extent, + const udpard_rx_mode_t mode, + const udpard_us_t reordering_window, + const udpard_rx_mem_resources_t memory, + const udpard_rx_port_vtable_t* const vtable); + +/// Returns all memory allocated for the sessions, slots, fragments, etc of the given port. +/// Does not free the port itself since it is allocated by the application rather than the library, +/// and does not alter the RX instance aside from unlinking the port from it. +/// It is safe to invoke this at any time, but the port instance shall not be used again unless re-initialized. +/// The function has no effect if any of the arguments are NULL. +void udpard_rx_port_free(udpard_rx_t* const rx, udpard_rx_port_t* const port); + +/// The timestamp value indicates the arrival time of the datagram and shall be non-negative. +/// Often, naive software timestamping is adequate for these purposes, but some applications may require /// a greater accuracy (e.g., for time synchronization). /// -/// The redundant interface index shall not exceed UDPARD_NETWORK_INTERFACE_COUNT_MAX. -/// /// The function takes ownership of the passed datagram payload buffer. The library will either store it as a /// fragment of the reassembled transfer payload or free it using the corresponding memory resource -/// (see UdpardRxMemoryResources) if the datagram is not needed for reassembly. Because of the ownership transfer, -/// the datagram payload buffer has to be mutable (non-const). -/// One exception is that if the "self" pointer is invalid, the library will be unable to process or free the datagram, -/// which may lead to a memory leak in the application; hence, the caller should always check that the "self" pointer -/// is always valid. -/// -/// The accepted datagram may either be invalid, carry a non-final part of a multi-frame transfer, -/// carry a final part of a valid multi-frame transfer, or carry a valid single-frame transfer. -/// The last two cases are said to complete a transfer. -/// -/// If the datagram completes a transfer, the out_transfer argument is filled with the transfer details -/// and the return value is one. -/// The caller is assigned ownership of the transfer payload buffer memory; it has to be freed after use as described -/// in the documentation for UdpardRxTransfer. -/// The memory pointed to by out_transfer may be mutated arbitrarily if no transfer is completed. -/// -/// If the datagram does not complete a transfer or is malformed, the function returns zero and the out_transfer -/// is not modified. Observe that malformed frames are not treated as errors, as the local application is not -/// responsible for the behavior of external agents producing the datagrams. -/// -/// The function invokes the dynamic memory manager in the following cases only (refer to UdpardRxPort for details): -/// -/// 1. A new session state instance is allocated when a new session is initiated. -/// -/// 2. A new transfer fragment handle is allocated when a new transfer fragment is accepted. -/// -/// 3. Allocated objects may occasionally be deallocated at the discretion of the library. -/// This behavior does not increase the worst case execution time and does not improve the worst case memory -/// consumption, so a deterministic application need not consider this behavior in its resource analysis. -/// This behavior is implemented for the benefit of applications where rigorous characterization is unnecessary. -/// -/// The time complexity is O(log n) where n is the number of remote notes publishing on this subject (topic). -/// No data copy takes place. Malformed frames are discarded in constant time. -/// Linear time is spent on the CRC verification of the transfer payload when the transfer is complete. -/// -/// UDPARD_ERROR_MEMORY is returned if the function fails to allocate memory. -/// UDPARD_ERROR_ARGUMENT is returned if any of the input arguments are invalid. -int_fast8_t udpardRxSubscriptionReceive(struct UdpardRxSubscription* const self, - const UdpardMicrosecond timestamp_usec, - const struct UdpardMutablePayload datagram_payload, - const uint_fast8_t redundant_iface_index, - struct UdpardRxTransfer* const out_transfer); - -// --------------------------------------------- RPC-SERVICES --------------------------------------------- - -/// An RPC-service RX port models the interest of the application in receiving RPC-service transfers of -/// a particular kind (request or response) and a particular service-ID. -struct UdpardRxRPCPort -{ - /// READ-ONLY - struct UdpardTreeNode base; - - /// READ-ONLY - UdpardPortID service_id; - - /// See UdpardRxPort. - /// Use this to change the transfer-ID timeout value for this RPC-service port. - struct UdpardRxPort port; - - /// This field can be arbitrarily mutated by the user. It is never accessed by the library. - /// Its purpose is to simplify integration with OOP interfaces. - void* user_reference; -}; - -/// A service dispatcher is a collection of RPC-service RX ports. -/// Anonymous nodes (nodes without a node-ID of their own) cannot use RPC-services. -struct UdpardRxRPCDispatcher -{ - /// The local node-ID has to be stored to facilitate correctness checking of incoming transfers. - /// This value shall not be modified. - /// READ-ONLY - UdpardNodeID local_node_id; - - /// Refer to UdpardRxMemoryResources. - struct UdpardRxMemoryResources memory; - - /// READ-ONLY - struct UdpardTreeNode* request_ports; - struct UdpardTreeNode* response_ports; -}; - -/// Represents a received Cyphal RPC-service transfer -- either request or response. -struct UdpardRxRPCTransfer -{ - struct UdpardRxTransfer base; - UdpardPortID service_id; - bool is_request; -}; - -/// To begin receiving RPC-service requests and/or responses, the application should do this: -/// -/// 1. Create a new UdpardRxRPCDispatcher instance and initialize it by calling udpardRxRPCDispatcherInit. -/// -/// 2. Announce its interest in specific RPC-services (requests and/or responses) by calling -/// udpardRxRPCDispatcherListen per each. This can be done at any later point as well. -/// -/// 3. When the local node-ID is known, invoke udpardRxRPCDispatcherStart to inform the library of the -/// node-ID value of the local node, and at the same time obtain the address of the UDP/IP multicast group -/// to bind the socket(s) to. This step can be taken before or after the RPC-service port registration. -/// If the application has to perform a plug-and-play node-ID allocation, it has to complete that beforehand -/// (the dispatcher is not needed for PnP node-ID allocation). -/// -/// 4. Having obtained the UDP/IP endpoint in the previous step, do per redundant network interface: -/// - Create a new socket bound to the IP multicast group address and UDP port number obtained earlier. -/// The multicast group address depends on the local node-ID. -/// -/// 5. Read data from the sockets continuously and forward each received UDP datagram to -/// udpardRxRPCDispatcherReceive, along with the index of the redundant interface -/// the datagram was received on. Only those services that were announced in step 3 will be processed. -/// -/// The reason the local node-ID has to be specified via a separate call is to allow the application to set up the -/// RPC ports early, without having to be aware of its own node-ID. This is useful for applications that perform -/// plug-and-play node-ID allocation. Applications where PnP is not needed will simply call both functions -/// at the same time during early initialization. -/// -/// There is no resource deallocation function ("free") for the RPC dispatcher. This is because the dispatcher -/// does not own any resources. To dispose of a dispatcher safely, the application shall invoke -/// udpardRxRPCDispatcherCancel for each RPC-service port on that dispatcher. -/// -/// The return value is 0 on success. -/// The return value is a negated UDPARD_ERROR_ARGUMENT if any of the input arguments are invalid. -/// -/// The time complexity is constant. This function does not invoke the dynamic memory manager. -int_fast8_t udpardRxRPCDispatcherInit(struct UdpardRxRPCDispatcher* const self, - const struct UdpardRxMemoryResources memory); - -/// This function must be called exactly once to complete the initialization of the RPC dispatcher. -/// It takes the node-ID of the local node, which is used to derive the UDP/IP multicast group address -/// to bind the sockets to, which is returned via the out parameter. -/// -/// In Cyphal/UDP, each node has a specific IP multicast group address where RPC-service transfers destined to that -/// node are sent to. This is similar to subject (topic) multicast group addressed except that the node-ID takes -/// the place of the subject-ID. The IP multicast group address is derived from the local node-ID. -/// -/// The application is expected to open a separate socket bound to that endpoint per redundant interface, -/// and then feed the UDP datagrams received from these sockets into udpardRxRPCDispatcherReceive, -/// collecting UdpardRxRPCTransfer instances at the output. -/// -/// This function shall not be called more than once per dispatcher. If the local node needs to change its node-ID, -/// this dispatcher instance must be destroyed and a new one created instead. -/// -/// The return value is 0 on success. -/// The return value is a negated UDPARD_ERROR_ARGUMENT if any of the input arguments are invalid. -/// -/// The time complexity is constant. This function does not invoke the dynamic memory manager. -int_fast8_t udpardRxRPCDispatcherStart(struct UdpardRxRPCDispatcher* const self, - const UdpardNodeID local_node_id, - struct UdpardUDPIPEndpoint* const out_udp_ip_endpoint); - -/// This function lets the application register its interest in a particular service-ID and kind (request/response) -/// by creating an RPC-service RX port. The port pointer shall retain validity until its unregistration or until -/// the dispatcher is destroyed. The service instance shall not be moved or destroyed. -/// -/// If such registration already exists, it will be unregistered first as if udpardRxRPCDispatcherCancel was -/// invoked by the application, and then re-created anew with the new parameters. -/// -/// For the meaning of extent, please refer to the documentation of the subscription pipeline. -/// -/// By default, the transfer-ID timeout value is set to UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC. -/// It can be changed by the user at any time by modifying the corresponding field in the registration instance. -/// -/// The return value is 1 if a new registration has been created as requested. -/// The return value is 0 if such registration existed at the time the function was invoked. In this case, -/// the existing registration is terminated and then a new one is created in its place. Pending transfers may be lost. -/// The return value is a negated UDPARD_ERROR_ARGUMENT if any of the input arguments are invalid. -/// -/// The time complexity is logarithmic from the number of current registrations under the specified transfer kind -/// (request or response). -/// This function does not allocate new memory. The function may deallocate memory if such registration already -/// existed; the deallocation behavior is specified in the documentation for udpardRxRPCDispatcherCancel. -int_fast8_t udpardRxRPCDispatcherListen(struct UdpardRxRPCDispatcher* const self, - struct UdpardRxRPCPort* const port, - const UdpardPortID service_id, - const bool is_request, - const size_t extent); - -/// This function reverses the effect of udpardRxRPCDispatcherListen. -/// If the registration is found, all its memory is de-allocated (session states and payload buffers). -/// Please refer to the UdpardRxPort session description for detailed information on the amount of memory freed. -/// -/// The return value is 1 if such registration existed (and, therefore, it was removed). -/// The return value is 0 if such registration does not exist. In this case, the function has no effect. -/// The return value is a negated UDPARD_ERROR_ARGUMENT if any of the input arguments are invalid. -/// -/// The time complexity is logarithmic from the number of current registration under the specified transfer kind. -/// This function does not allocate new memory. -int_fast8_t udpardRxRPCDispatcherCancel(struct UdpardRxRPCDispatcher* const self, - const UdpardPortID service_id, - const bool is_request); - -/// Datagrams received from the sockets of this RPC service dispatcher are fed into this function. -/// It is the analog of udpardRxSubscriptionReceive for RPC-service transfers. -/// Please refer to the documentation of udpardRxSubscriptionReceive for the usage information. -/// -/// Frames (datagrams) that belong to transfers for which there is no active RX RPC port are ignored. -/// -/// The "out_port" pointer-to-pointer can be used to retrieve the specific UdpardRxRPCPort instance that was used to -/// process the received transfer. Remember that each UdpardRxRPCPort instance has a user reference field, -/// which in combination with this feature can be used to construct OOP interfaces on top of the library. -/// If this is not needed, the pointer-to-pointer can be NULL. -/// -/// The memory pointed to by out_transfer may be mutated arbitrarily if no transfer is completed. -int_fast8_t udpardRxRPCDispatcherReceive(struct UdpardRxRPCDispatcher* const self, - const UdpardMicrosecond timestamp_usec, - const struct UdpardMutablePayload datagram_payload, - const uint_fast8_t redundant_iface_index, - struct UdpardRxRPCPort** const out_port, - struct UdpardRxRPCTransfer* const out_transfer); - -// ===================================================================================================================== -// ==================================================== MISC ===================================================== -// ===================================================================================================================== - -/// This helper function takes the head of a fragmented buffer list and copies the data into the contiguous buffer -/// provided by the user. If the total size of all fragments combined exceeds the size of the user-provided buffer, -/// copying will stop early after the buffer is filled, thus truncating the fragmented data short. -/// -/// The source list is not modified. Do not forget to free its memory afterward if it was dynamically allocated. -/// -/// The function has no effect and returns zero if the destination buffer is NULL. -/// The data pointers in the fragment list shall be valid, otherwise the behavior is undefined. -/// -/// Returns the number of bytes copied into the contiguous destination buffer. -size_t udpardGather(const struct UdpardFragment head, const size_t destination_size_bytes, void* const destination); +/// (see udpard_rx_mem_resources_t) if the datagram is not needed for reassembly. Because of the ownership transfer, +/// the datagram payload buffer has to be mutable (non-const). The ownership transfer does not take place if +/// any of the arguments are invalid; the function returns false in that case and the caller must clean up. +/// +/// The function invokes the dynamic memory manager in the following cases only (refer to udpard_rx_port_t): +/// 1. A new session state instance is allocated when a new session is initiated. +/// 2. A new transfer fragment handle is allocated when a new transfer fragment is accepted. +/// 3. Allocated objects may occasionally be deallocated to clean up stale transfers and sessions. +/// +/// The time complexity is O(log n + log k) where n is the number of remote nodes publishing on this subject, +/// and k is the number of fragments retained in memory for the corresponding in-progress transfer. +/// No data copying takes place. +/// +/// Returns false if any of the arguments are invalid. +bool udpard_rx_port_push(udpard_rx_t* const rx, + udpard_rx_port_t* const port, + const udpard_us_t timestamp, + const udpard_udpip_ep_t source_ep, + const udpard_bytes_mut_t datagram_payload, + const udpard_deleter_t payload_deleter, + const uint_fast8_t iface_index); #ifdef __cplusplus } diff --git a/tests/.clang-tidy b/tests/.clang-tidy index 657d99d..942b2b5 100644 --- a/tests/.clang-tidy +++ b/tests/.clang-tidy @@ -40,12 +40,22 @@ Checks: >- -*-no-malloc, -cert-msc30-c, -cert-msc50-cpp, - -modernize-macro-to-enum, + -*-macro-to-enum, -modernize-use-trailing-return-type, + -*-macro-usage, + -*-enum-size, + -*-use-using, -cppcoreguidelines-owning-memory, -misc-include-cleaner, -performance-avoid-endl, -cppcoreguidelines-avoid-do-while, + -*DeprecatedOrUnsafeBufferHandling, + -*-prefer-static-over-anonymous-namespace, + -*-pro-bounds-avoid-unchecked-container-access, + -*-array*decay, + -*-avoid-c-arrays, + -*-casting-through-void, + -*-named-parameter, WarningsAsErrors: '*' HeaderFilterRegex: '.*\.hpp' FormatStyle: file diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index bc0f063..0e8f0b4 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -15,10 +15,13 @@ enable_testing() set(CTEST_OUTPUT_ON_FAILURE ON) set(NO_STATIC_ANALYSIS OFF CACHE BOOL "disable udpard static analysis") +set(ENABLE_COVERAGE OFF CACHE BOOL "enable code coverage measurement") set(library_dir "${CMAKE_SOURCE_DIR}/libudpard") set(unity_root "${CMAKE_SOURCE_DIR}/submodules/unity") +include_directories(SYSTEM ${CMAKE_SOURCE_DIR}/lib/cavl) + # Use -DNO_STATIC_ANALYSIS=1 to suppress static analysis. # If not suppressed, the tools used here shall be available, otherwise the build will fail. if (NOT NO_STATIC_ANALYSIS) @@ -48,6 +51,14 @@ function(gen_test name files compile_definitions compile_flags link_flags c_stan target_include_directories(${name} PUBLIC ${library_dir}) target_compile_definitions(${name} PUBLIC ${compile_definitions}) target_link_libraries(${name} "${name}_unity") + + # Apply coverage flags if coverage is enabled + if (ENABLE_COVERAGE) + target_compile_options(${name} PRIVATE --coverage -fprofile-arcs -ftest-coverage) + target_link_options(${name} PRIVATE --coverage -fprofile-arcs) + target_compile_definitions(${name} PRIVATE NDEBUG=1) # Remove assertion checks as they interfere with coverage + endif() + set_target_properties( ${name} PROPERTIES @@ -69,15 +80,61 @@ function(gen_test_matrix name files) gen_test("${name}_x32_c11" "${files}" "" "-m32" "-m32" "11") endfunction() +function(gen_test_single name files) # When the full matrix is not needed, to keep pipelines fast. + gen_test("${name}" "${files}" "" "-m32" "-m32" "11") +endfunction() + # Add the test targets. # Those that are written in C may #include to reach its internals; they are called "intrusive". # The public interface tests may be written in C++ for convenience. gen_test_matrix(test_helpers "src/test_helpers.c") -gen_test_matrix(test_cavl "src/test_cavl.cpp") -gen_test_matrix(test_tx "${library_dir}/udpard.c;src/test_tx.cpp") -gen_test_matrix(test_rx "${library_dir}/udpard.c;src/test_rx.cpp") -gen_test_matrix(test_e2e "${library_dir}/udpard.c;src/test_e2e.cpp") -gen_test_matrix(test_misc "${library_dir}/udpard.c;src/test_misc.cpp") -gen_test_matrix(test_intrusive_crc "src/test_intrusive_crc.c") +gen_test_matrix(test_intrusive_header "src/test_intrusive_header.c") +gen_test_matrix(test_intrusive_misc "src/test_intrusive_misc.c") gen_test_matrix(test_intrusive_tx "src/test_intrusive_tx.c") gen_test_matrix(test_intrusive_rx "src/test_intrusive_rx.c") +gen_test_matrix(test_intrusive_guards "src/test_intrusive_guards.c") +gen_test_matrix(test_fragment "src/test_fragment.cpp;${library_dir}/udpard.c") +gen_test_single(test_e2e_random "src/test_e2e_random.cpp;${library_dir}/udpard.c") +gen_test_single(test_e2e_edge "src/test_e2e_edge.cpp;${library_dir}/udpard.c") +gen_test_single(test_e2e_api "src/test_e2e_api.cpp;${library_dir}/udpard.c") +gen_test_single(test_e2e_responses "src/test_e2e_responses.cpp;${library_dir}/udpard.c") +gen_test_single(test_e2e_reliable_ordered "src/test_e2e_reliable_ordered.cpp;${library_dir}/udpard.c") +gen_test_single(test_integration_sockets "src/test_integration_sockets.cpp;${library_dir}/udpard.c") + +# Coverage targets. Usage: +# cmake -DENABLE_COVERAGE=ON .. +# make -j16 && make test && make coverage +# xdg-open coverage-html/index.html +if (ENABLE_COVERAGE) + find_program(LCOV_PATH lcov REQUIRED) + find_program(GENHTML_PATH genhtml REQUIRED) + + # Target to reset coverage counters + add_custom_target(coverage-reset + COMMAND ${LCOV_PATH} --zerocounters --directory . + COMMAND ${LCOV_PATH} --capture --initial --directory . --output-file coverage-base.info + --rc lcov_branch_coverage=1 + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + COMMENT "Resetting coverage counters" + ) + + # Target to generate coverage report + add_custom_target(coverage + COMMAND ${LCOV_PATH} --capture --directory . --output-file coverage-total.info --rc lcov_branch_coverage=1 + COMMAND ${LCOV_PATH} --extract coverage-total.info '*/libudpard/udpard.c' --output-file coverage-udpard.info + --rc lcov_branch_coverage=1 + + COMMAND ${CMAKE_COMMAND} -E echo "" + COMMAND ${CMAKE_COMMAND} -E echo "=== 🔬 COVERAGE SUMMARY BEGIN 📐 ===" + COMMAND ${LCOV_PATH} --list coverage-udpard.info --rc lcov_branch_coverage=1 + COMMAND ${CMAKE_COMMAND} -E echo "==== ⬆️ COVERAGE SUMMARY END ⬆️ ====" + COMMAND ${CMAKE_COMMAND} -E echo "" + + COMMAND ${GENHTML_PATH} coverage-udpard.info --output-directory coverage-html --title "libudpard coverage" + --legend --demangle-cpp --branch-coverage + COMMAND ${CMAKE_COMMAND} -E echo "Coverage report: file://${CMAKE_BINARY_DIR}/coverage-html/index.html" + + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + COMMENT "Generating coverage HTML report" + ) +endif() diff --git a/tests/src/helpers.h b/tests/src/helpers.h index f4e035f..60ee34a 100644 --- a/tests/src/helpers.h +++ b/tests/src/helpers.h @@ -2,62 +2,82 @@ // Copyright (c) 2016 Cyphal Development Team. /// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// ReSharper disable CppRedundantInlineSpecifier +// NOLINTBEGIN(*-unchecked-string-to-number-conversion,*-deprecated-headers,*-designated-initializers,*-loop-convert) +// NOLINTBEGIN(*DeprecatedOrUnsafeBufferHandling,*err34-c,*-vararg,*-use-auto,*-use-nullptr,*-redundant-void-arg) +// NOLINTBEGIN(*-cstyle-cast) #pragma once -#include // Shall always be included first. +#include // Shall always be included first. #include #include -#include #include #if !(defined(UDPARD_VERSION_MAJOR) && defined(UDPARD_VERSION_MINOR)) -# error "Library version not defined" +#error "Library version not defined" #endif -#if !(defined(UDPARD_CYPHAL_SPECIFICATION_VERSION_MAJOR) && defined(UDPARD_CYPHAL_SPECIFICATION_VERSION_MINOR)) -# error "Cyphal specification version not defined" +#if !(defined(UDPARD_CYPHAL_VERSION_MAJOR) && defined(UDPARD_CYPHAL_VERSION_MINOR)) +#error "Cyphal specification version not defined" #endif // This is only needed to tell static analyzers that the code that follows is not C++. #ifdef __cplusplus -extern "C" { +extern "C" +{ #endif -#define TEST_PANIC(message) \ - do \ - { \ - (void) fprintf(stderr, "%s:%u: PANIC: %s\n", __FILE__, (unsigned) __LINE__, message); \ - (void) fflush(stderr); \ - abort(); \ +#define TEST_PANIC(message) \ + do { \ + (void)fprintf(stderr, "%s:%u: PANIC: %s\n", __FILE__, (unsigned)__LINE__, message); \ + (void)fflush(stderr); \ + abort(); \ } while (0) #define TEST_PANIC_UNLESS(condition) \ - do \ - { \ - if (!(condition)) \ - { \ + do { \ + if (!(condition)) { \ TEST_PANIC(#condition); \ } \ } while (0) -static inline void* dummyAllocatorAllocate(void* const user_reference, const size_t size) +static inline void* dummy_alloc(void* const user, const size_t size) { - (void) user_reference; - (void) size; + (void)user; + (void)size; return NULL; } -static inline void dummyAllocatorDeallocate(void* const user_reference, const size_t size, void* const pointer) +static inline void dummy_free(void* const user, const size_t size, void* const pointer) { - (void) user_reference; - (void) size; + (void)user; + (void)size; TEST_PANIC_UNLESS(pointer == NULL); } +// Single-fragment scatter helper. +static inline udpard_bytes_scattered_t make_scattered(const void* const data, const size_t size) +{ + udpard_bytes_scattered_t out; + out.bytes.size = size; + out.bytes.data = data; + out.next = NULL; + return out; +} + +// Wraps an application pointer for user context plumbing. +static inline udpard_user_context_t make_user_context(void* const obj) +{ + udpard_user_context_t out = UDPARD_USER_CONTEXT_NULL; + out.ptr[0] = obj; + return out; +} + /// The instrumented allocator tracks memory consumption, checks for heap corruption, and can be configured to fail /// allocations above a certain threshold. #define INSTRUMENTED_ALLOCATOR_CANARY_SIZE 1024U typedef struct { + /// Each allocator has its own canary, to catch an attempt to free memory allocated by a different allocator. uint_least8_t canary[INSTRUMENTED_ALLOCATOR_CANARY_SIZE]; /// The limit can be changed at any moment to control the maximum amount of memory that can be allocated. /// It may be set to a value less than the currently allocated amount. @@ -66,31 +86,34 @@ typedef struct /// The current state of the allocator. size_t allocated_fragments; size_t allocated_bytes; -} InstrumentedAllocator; + /// Event counters. + uint64_t count_alloc; + uint64_t count_free; +} instrumented_allocator_t; -static inline void* instrumentedAllocatorAllocate(void* const user_reference, const size_t size) +static inline void* instrumented_allocator_alloc(void* const user_reference, const size_t size) { - InstrumentedAllocator* const self = (InstrumentedAllocator*) user_reference; - void* result = NULL; - if ((size > 0U) && // - ((self->allocated_bytes + size) <= self->limit_bytes) && // - ((self->allocated_fragments + 1U) <= self->limit_fragments)) - { - const size_t size_with_canaries = size + ((size_t) INSTRUMENTED_ALLOCATOR_CANARY_SIZE * 2U); + instrumented_allocator_t* const self = (instrumented_allocator_t*)user_reference; + void* result = NULL; // NOLINT(*-const-correctness) + self->count_alloc++; + if ((size > 0U) && // + ((self->allocated_bytes + size) <= self->limit_bytes) && // + ((self->allocated_fragments + 1U) <= self->limit_fragments)) { + const size_t size_with_canaries = size + ((size_t)INSTRUMENTED_ALLOCATOR_CANARY_SIZE * 2U); void* origin = malloc(size_with_canaries); TEST_PANIC_UNLESS(origin != NULL); - *((size_t*) origin) = size; - uint_least8_t* p = ((uint_least8_t*) origin) + sizeof(size_t); - result = ((uint_least8_t*) origin) + INSTRUMENTED_ALLOCATOR_CANARY_SIZE; - for (size_t i = sizeof(size_t); i < INSTRUMENTED_ALLOCATOR_CANARY_SIZE; i++) // Fill the front canary. + *((size_t*)origin) = size; + uint_least8_t* p = ((uint_least8_t*)origin) + sizeof(size_t); // NOLINT(*-const-correctness) + result = ((uint_least8_t*)origin) + INSTRUMENTED_ALLOCATOR_CANARY_SIZE; + for (size_t i = sizeof(size_t); i < INSTRUMENTED_ALLOCATOR_CANARY_SIZE; i++) // Fill the front canary. { *p++ = self->canary[i]; } - for (size_t i = 0; i < size; i++) // Randomize the allocated fragment. + for (size_t i = 0; i < size; i++) // Randomize the allocated fragment. { - *p++ = (uint_least8_t) (rand() % (UINT_LEAST8_MAX + 1)); + *p++ = (uint_least8_t)(rand() % (UINT_LEAST8_MAX + 1)); } - for (size_t i = 0; i < INSTRUMENTED_ALLOCATOR_CANARY_SIZE; i++) // Fill the back canary. + for (size_t i = 0; i < INSTRUMENTED_ALLOCATOR_CANARY_SIZE; i++) // Fill the back canary. { *p++ = self->canary[i]; } @@ -100,25 +123,25 @@ static inline void* instrumentedAllocatorAllocate(void* const user_reference, co return result; } -static inline void instrumentedAllocatorDeallocate(void* const user_reference, const size_t size, void* const pointer) +static inline void instrumented_allocator_free(void* const user_reference, const size_t size, void* const pointer) { - InstrumentedAllocator* const self = (InstrumentedAllocator*) user_reference; - if (pointer != NULL) - { - uint_least8_t* p = ((uint_least8_t*) pointer) - INSTRUMENTED_ALLOCATOR_CANARY_SIZE; + instrumented_allocator_t* const self = (instrumented_allocator_t*)user_reference; + self->count_free++; + if (pointer != NULL) { // NOLINTNEXTLINE(*-const-correctness) + uint_least8_t* p = ((uint_least8_t*)pointer) - INSTRUMENTED_ALLOCATOR_CANARY_SIZE; void* const origin = p; - const size_t true_size = *((const size_t*) origin); + const size_t true_size = *((const size_t*)origin); TEST_PANIC_UNLESS(size == true_size); p += sizeof(size_t); - for (size_t i = sizeof(size_t); i < INSTRUMENTED_ALLOCATOR_CANARY_SIZE; i++) // Check the front canary. + for (size_t i = sizeof(size_t); i < INSTRUMENTED_ALLOCATOR_CANARY_SIZE; i++) // Check the front canary. { TEST_PANIC_UNLESS(*p++ == self->canary[i]); } - for (size_t i = 0; i < size; i++) // Destroy the returned memory to prevent use-after-free. + for (size_t i = 0; i < size; i++) // Destroy the returned memory to prevent use-after-free. { - *p++ = (uint_least8_t) (rand() % (UINT_LEAST8_MAX + 1)); + *p++ = (uint_least8_t)(rand() % (UINT_LEAST8_MAX + 1)); } - for (size_t i = 0; i < INSTRUMENTED_ALLOCATOR_CANARY_SIZE; i++) // Check the back canary. + for (size_t i = 0; i < INSTRUMENTED_ALLOCATOR_CANARY_SIZE; i++) // Check the back canary. { TEST_PANIC_UNLESS(*p++ == self->canary[i]); } @@ -131,46 +154,77 @@ static inline void instrumentedAllocatorDeallocate(void* const user_reference, c } /// By default, the limit is unrestricted (set to the maximum possible value). -static inline void instrumentedAllocatorNew(InstrumentedAllocator* const self) +static inline void instrumented_allocator_new(instrumented_allocator_t* const self) { - for (size_t i = 0; i < INSTRUMENTED_ALLOCATOR_CANARY_SIZE; i++) - { - self->canary[i] = (uint_least8_t) (rand() % (UINT_LEAST8_MAX + 1)); + for (size_t i = 0; i < INSTRUMENTED_ALLOCATOR_CANARY_SIZE; i++) { + self->canary[i] = (uint_least8_t)(rand() % (UINT_LEAST8_MAX + 1)); } self->limit_fragments = SIZE_MAX; self->limit_bytes = SIZE_MAX; self->allocated_fragments = 0U; self->allocated_bytes = 0U; + self->count_alloc = 0U; + self->count_free = 0U; } -static inline struct UdpardMemoryResource instrumentedAllocatorMakeMemoryResource( - const InstrumentedAllocator* const self) +/// Resets the counters and generates a new canary. +/// Will crash if there are outstanding allocations. +static inline void instrumented_allocator_reset(instrumented_allocator_t* const self) { - const struct UdpardMemoryResource out = {.user_reference = (void*) self, - .deallocate = &instrumentedAllocatorDeallocate, - .allocate = &instrumentedAllocatorAllocate}; - return out; + TEST_PANIC_UNLESS(self->allocated_fragments == 0U); + TEST_PANIC_UNLESS(self->allocated_bytes == 0U); + instrumented_allocator_new(self); } -static inline struct UdpardMemoryDeleter instrumentedAllocatorMakeMemoryDeleter(const InstrumentedAllocator* const self) +// Shared vtable for instrumented allocators. +static const udpard_mem_vtable_t instrumented_allocator_vtable = { + .base = { .free = instrumented_allocator_free }, + .alloc = instrumented_allocator_alloc, +}; + +static inline udpard_mem_t instrumented_allocator_make_resource(const instrumented_allocator_t* const self) { - const struct UdpardMemoryDeleter out = {.user_reference = (void*) self, - .deallocate = &instrumentedAllocatorDeallocate}; - return out; + const udpard_mem_t result = { .vtable = &instrumented_allocator_vtable, .context = (void*)self }; + return result; } -static inline void seedRandomNumberGenerator(void) +static inline udpard_deleter_t instrumented_allocator_make_deleter(const instrumented_allocator_t* const self) { - unsigned seed = (unsigned) time(NULL); + const udpard_deleter_t result = { .vtable = &instrumented_allocator_vtable.base, .context = (void*)self }; + return result; +} + +// Shortcuts for vtable-based memory access. +static inline void* mem_res_alloc(const udpard_mem_t mem, const size_t size) +{ + return mem.vtable->alloc(mem.context, size); +} + +static inline void mem_res_free(const udpard_mem_t mem, const size_t size, void* const ptr) +{ + mem.vtable->base.free(mem.context, size, ptr); +} + +static inline void mem_del_free(const udpard_deleter_t del, const size_t size, void* const ptr) +{ + del.vtable->free(del.context, size, ptr); +} + +static inline void seed_prng(void) +{ + unsigned seed = (unsigned)time(NULL); const char* const env_var = getenv("RANDOM_SEED"); - if (env_var != NULL) - { - seed = (unsigned) atoll(env_var); // Conversion errors are possible but ignored. + if (env_var != NULL) { + seed = (unsigned)atoll(env_var); // Conversion errors are possible but ignored. } srand(seed); - (void) fprintf(stderr, "RANDOM_SEED=%u\n", seed); + (void)fprintf(stderr, "export RANDOM_SEED=%u\n", seed); } #ifdef __cplusplus } #endif + +// NOLINTEND(*-cstyle-cast) +// NOLINTEND(*DeprecatedOrUnsafeBufferHandling,*err34-c,*-vararg,*-use-auto,*-use-nullptr,*-redundant-void-arg) +// NOLINTEND(*-unchecked-string-to-number-conversion,*-deprecated-headers,*-designated-initializers,*-loop-convert) diff --git a/tests/src/hexdump.hpp b/tests/src/hexdump.hpp deleted file mode 100644 index d27bc83..0000000 --- a/tests/src/hexdump.hpp +++ /dev/null @@ -1,83 +0,0 @@ -/// This software is distributed under the terms of the MIT License. -/// Copyright (C) OpenCyphal Development Team -/// Copyright Amazon.com Inc. or its affiliates. -/// SPDX-License-Identifier: MIT -/// Author: Pavel Kirienko - -#include -#include -#include -#include - -namespace hexdump -{ -using Byte = std::uint_least8_t; - -template -[[nodiscard]] std::string hexdump(InputIterator begin, const InputIterator end) -{ - static_assert(BytesPerRow > 0); - static constexpr std::pair PrintableASCIIRange{32, 126}; - std::uint32_t offset = 0; - std::ostringstream output; - bool first = true; - output << std::hex << std::setfill('0'); - do - { - if (first) - { - first = false; - } - else - { - output << "\n"; - } - output << std::setw(8) << offset << " "; - offset += BytesPerRow; - auto it = begin; - for (Byte i = 0; i < BytesPerRow; ++i) - { - if (i == 8) - { - output << ' '; - } - if (it != end) - { - output << std::setw(2) << static_cast(*it) << ' '; - ++it; - } - else - { - output << " "; - } - } - output << " "; - for (Byte i = 0; i < BytesPerRow; ++i) - { - if (begin != end) - { - output << (((static_cast(*begin) >= PrintableASCIIRange.first) && - (static_cast(*begin) <= PrintableASCIIRange.second)) - ? static_cast(*begin) // NOSONAR intentional conversion to plain char - : '.'); - ++begin; - } - else - { - output << ' '; - } - } - } while (begin != end); - return output.str(); -} - -[[nodiscard]] auto hexdump(const auto& cont) -{ - return hexdump(std::begin(cont), std::end(cont)); -} - -[[nodiscard]] inline auto hexdump(const void* const data, const std::size_t size) -{ - return hexdump(static_cast(data), static_cast(data) + size); -} -} // namespace hexdump diff --git a/tests/src/test_cavl.cpp b/tests/src/test_cavl.cpp deleted file mode 100644 index 8d816b6..0000000 --- a/tests/src/test_cavl.cpp +++ /dev/null @@ -1,1404 +0,0 @@ -// This software is distributed under the terms of the MIT License. -// Copyright (c) 2016-2020 OpenCyphal Development Team. -// These tests have been adapted from the Cavl test suite that you can find at https://github.com/pavel-kirienko/cavl - -#include <_udpard_cavl.h> -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace -{ -/// These aliases are introduced to keep things nicely aligned in test cases. -constexpr auto Zz = nullptr; -constexpr auto Zzzzz = nullptr; -constexpr auto Zzzzzz = nullptr; - -template -struct Node final : Cavl -{ - explicit Node(const T val) : Cavl{Cavl{}}, value(val) {} - - Node(const Cavl& cv, const T val) : Cavl{cv}, value(val) {} - - Node() : Cavl{Cavl{}} {} - - T value{}; - - auto checkLinkageUpLeftRightBF(const Cavl* const check_up, - const Cavl* const check_le, - const Cavl* const check_ri, - const std::int_fast8_t check_bf) const -> bool - { - return (up == check_up) && // - (lr[0] == check_le) && (lr[1] == check_ri) && // - (bf == check_bf) && // - ((check_up == nullptr) || (check_up->lr[0] == this) || (check_up->lr[1] == this)) && // - ((check_le == nullptr) || (check_le->up == this)) && // - ((check_ri == nullptr) || (check_ri->up == this)); - } - - auto min() -> Node* { return static_cast(cavlFindExtremum(this, false)); } - - auto max() -> Node* { return static_cast(cavlFindExtremum(this, true)); } - - auto operator=(const Cavl& cv) -> Node& - { - static_cast(*this) = cv; - return *this; - } -}; - -/// Wrapper over cavlSearch() that supports closures. -template -auto search(Node** const root, const Predicate& predicate, const Factory& factory) -> Node* -{ - struct Refs - { - Predicate predicate; - Factory factory; - - static auto callPredicate(void* const user_reference, const Cavl* const node) -> std::int_fast8_t - { - const auto ret = static_cast(user_reference)->predicate(static_cast&>(*node)); - if (ret > 0) - { - return 1; - } - if (ret < 0) - { - return -1; - } - return 0; - } - - static auto callFactory(void* const user_reference) -> Cavl* - { - return static_cast(user_reference)->factory(); - } - } refs{predicate, factory}; - Cavl* const out = cavlSearch(reinterpret_cast(root), &refs, &Refs::callPredicate, &Refs::callFactory); - return static_cast*>(out); -} - -template -auto search(Node** const root, const Predicate& predicate) -> Node* -{ - return search(root, predicate, []() { return nullptr; }); -} - -/// Wrapper over cavlRemove(). -template -void remove(Node** const root, const Node* const n) -{ - cavlRemove(reinterpret_cast(root), n); -} - -template -auto getHeight(const Node* const n) -> std::uint_fast8_t // NOLINT recursion -{ - return (n != nullptr) ? static_cast(1U + std::max(getHeight(static_cast*>(n->lr[0])), - getHeight(static_cast*>(n->lr[1])))) - : 0; -} - -template -void print(const Node* const nd, const std::uint_fast8_t depth = 0, const char marker = 'T') // NOLINT recursion -{ - TEST_ASSERT(10 > getHeight(nd)); // Fail early for malformed cyclic trees, do not overwhelm stdout. - if (nd != nullptr) - { - print(static_cast*>(nd->lr[0]), static_cast(depth + 1U), 'L'); - for (std::uint16_t i = 1U; i < depth; i++) - { - std::cout << " "; - } - if (marker == 'L') - { - std::cout << " ............."; - } - else if (marker == 'R') - { - std::cout << " `````````````"; - } - else - { - (void) 0; - } - std::cout << marker << "=" << static_cast(nd->value) // - << " [" << static_cast(nd->bf) << "]" << std::endl; - print(static_cast*>(nd->lr[1]), static_cast(depth + 1U), 'R'); - } -} - -template -void traverse(Node* const root, const Visitor& visitor) // NOLINT recursion needed for testing -{ - if (root != nullptr) - { - traverse(static_cast(root->lr[!Ascending]), visitor); - visitor(root); - traverse(static_cast(root->lr[Ascending]), visitor); - } -} - -template -auto checkAscension(const Node* const root) -> std::optional -{ - const Node* prev = nullptr; - bool valid = true; - std::size_t size = 0; - traverse>(root, [&](const Node* const nd) { - if (prev != nullptr) - { - valid = valid && (prev->value < nd->value); - } - prev = nd; - size++; - }); - return valid ? std::optional(size) : std::optional{}; -} - -template -auto findBrokenAncestry(const Node* const n, const Cavl* const parent = nullptr) // NOLINT recursion - -> const Node* -{ - if ((n != nullptr) && (n->up == parent)) - { - for (auto* ch : n->lr) // NOLINT array decay due to C API - { - if (const Node* p = findBrokenAncestry(static_cast*>(ch), n)) - { - return p; - } - } - return nullptr; - } - return n; -} - -template -auto findBrokenBalanceFactor(const Node* const n) -> const Cavl* // NOLINT recursion -{ - if (n != nullptr) - { - if (std::abs(n->bf) > 1) - { - return n; - } - const std::int16_t hl = getHeight(static_cast*>(n->lr[0])); - const std::int16_t hr = getHeight(static_cast*>(n->lr[1])); - if (n->bf != (hr - hl)) - { - return n; - } - for (auto* ch : n->lr) // NOLINT array decay due to C API - { - if (const Cavl* p = findBrokenBalanceFactor(static_cast*>(ch))) - { - return p; - } - } - } - return nullptr; -} - -void testCheckAscension() -{ - using N = Node; - N t{2}; - N l{1}; - N r{3}; - N rr{4}; - // Correctly arranged tree -- smaller items on the left. - t.lr[0] = &l; - t.lr[1] = &r; - r.lr[1] = &rr; - TEST_ASSERT(4 == checkAscension(&t)); - TEST_ASSERT(3 == getHeight(&t)); - // Break the arrangement and make sure the breakage is detected. - t.lr[1] = &l; - t.lr[0] = &r; - TEST_ASSERT(4 != checkAscension(&t)); - TEST_ASSERT(3 == getHeight(&t)); - TEST_ASSERT(&t == findBrokenBalanceFactor(&t)); // All zeros, incorrect. - r.lr[1] = nullptr; - std::cout << __LINE__ << ": " << static_cast(getHeight(&t)) << std::endl; - print(&t); - std::cout << __LINE__ << ": " << static_cast(getHeight(&t)) << std::endl; - TEST_ASSERT_EQUAL_size_t(2, getHeight(&t)); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(&t)); // Balanced now as we removed one node. -} - -void testRotation() -{ - using N = Node; - // Original state: - // x.left = a - // x.right = z - // z.left = b - // z.right = c - // After left rotation of X: - // x.left = a - // x.right = b - // z.left = x - // z.right = c - N c{{Zz, {Zz, Zz}, 0}, 3}; - N b{{Zz, {Zz, Zz}, 0}, 2}; - N a{{Zz, {Zz, Zz}, 0}, 1}; - N z{{Zz, {&b, &c}, 0}, 8}; - N x{{Zz, {&a, &z}, 1}, 9}; - z.up = &x; - c.up = &z; - b.up = &z; - a.up = &x; - - std::cout << "Before rotation:\n"; - TEST_ASSERT(nullptr == findBrokenAncestry(&x)); - print(&x); - - std::cout << "After left rotation:\n"; - cavlPrivateRotate(&x, false); // z is now the root - TEST_ASSERT(nullptr == findBrokenAncestry(&z)); - print(&z); - TEST_ASSERT(&a == x.lr[0]); - TEST_ASSERT(&b == x.lr[1]); - TEST_ASSERT(&x == z.lr[0]); - TEST_ASSERT(&c == z.lr[1]); - - std::cout << "After right rotation, back into the original configuration:\n"; - cavlPrivateRotate(&z, true); // x is now the root - TEST_ASSERT(nullptr == findBrokenAncestry(&x)); - print(&x); - TEST_ASSERT(&a == x.lr[0]); - TEST_ASSERT(&z == x.lr[1]); - TEST_ASSERT(&b == z.lr[0]); - TEST_ASSERT(&c == z.lr[1]); -} - -void testBalancingA() -{ - using N = Node; - // Double left-right rotation. - // X X Y - // / ` / ` / ` - // Z C => Y C => Z X - // / ` / ` / ` / ` - // D Y Z G D F G C - // / ` / ` - // F G D F - N x{{Zz, {Zz, Zz}, 0}, 1}; // bf = -2 - N z{{&x, {Zz, Zz}, 0}, 2}; // bf = +1 - N c{{&x, {Zz, Zz}, 0}, 3}; - N d{{&z, {Zz, Zz}, 0}, 4}; - N y{{&z, {Zz, Zz}, 0}, 5}; - N f{{&y, {Zz, Zz}, 0}, 6}; - N g{{&y, {Zz, Zz}, 0}, 7}; - x.lr[0] = &z; - x.lr[1] = &c; - z.lr[0] = &d; - z.lr[1] = &y; - y.lr[0] = &f; - y.lr[1] = &g; - print(&x); - TEST_ASSERT(nullptr == findBrokenAncestry(&x)); - TEST_ASSERT(&x == cavlPrivateAdjustBalance(&x, false)); // bf = -1, same topology - TEST_ASSERT(-1 == x.bf); - TEST_ASSERT(&z == cavlPrivateAdjustBalance(&z, true)); // bf = +1, same topology - TEST_ASSERT(+1 == z.bf); - TEST_ASSERT(&y == cavlPrivateAdjustBalance(&x, false)); // bf = -2, rotation needed - print(&y); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(&y)); // Should be balanced now. - TEST_ASSERT(nullptr == findBrokenAncestry(&y)); - TEST_ASSERT(&z == y.lr[0]); - TEST_ASSERT(&x == y.lr[1]); - TEST_ASSERT(&d == z.lr[0]); - TEST_ASSERT(&f == z.lr[1]); - TEST_ASSERT(&g == x.lr[0]); - TEST_ASSERT(&c == x.lr[1]); - TEST_ASSERT(Zz == d.lr[0]); - TEST_ASSERT(Zz == d.lr[1]); - TEST_ASSERT(Zz == f.lr[0]); - TEST_ASSERT(Zz == f.lr[1]); - TEST_ASSERT(Zz == g.lr[0]); - TEST_ASSERT(Zz == g.lr[1]); - TEST_ASSERT(Zz == c.lr[0]); - TEST_ASSERT(Zz == c.lr[1]); -} - -void testBalancingB() -{ - using N = Node; - // Without F the handling of Z and Y is more complex; Z flips the sign of its balance factor: - // X X Y - // / ` / ` / ` - // Z C => Y C => Z X - // / ` / ` / / ` - // D Y Z G D G C - // ` / - // G D - N x{}; - N z{}; - N c{}; - N d{}; - N y{}; - N g{}; - x = {{Zz, {&z, &c}, 0}, 1}; // bf = -2 - z = {{&x, {&d, &y}, 0}, 2}; // bf = +1 - c = {{&x, {Zz, Zz}, 0}, 3}; - d = {{&z, {Zz, Zz}, 0}, 4}; - y = {{&z, {Zz, &g}, 0}, 5}; // bf = +1 - g = {{&y, {Zz, Zz}, 0}, 7}; - print(&x); - TEST_ASSERT(nullptr == findBrokenAncestry(&x)); - TEST_ASSERT(&x == cavlPrivateAdjustBalance(&x, false)); // bf = -1, same topology - TEST_ASSERT(-1 == x.bf); - TEST_ASSERT(&z == cavlPrivateAdjustBalance(&z, true)); // bf = +1, same topology - TEST_ASSERT(+1 == z.bf); - TEST_ASSERT(&y == cavlPrivateAdjustBalance(&y, true)); // bf = +1, same topology - TEST_ASSERT(+1 == y.bf); - TEST_ASSERT(&y == cavlPrivateAdjustBalance(&x, false)); // bf = -2, rotation needed - print(&y); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(&y)); // Should be balanced now. - TEST_ASSERT(nullptr == findBrokenAncestry(&y)); - TEST_ASSERT(&z == y.lr[0]); - TEST_ASSERT(&x == y.lr[1]); - TEST_ASSERT(&d == z.lr[0]); - TEST_ASSERT(Zz == z.lr[1]); - TEST_ASSERT(&g == x.lr[0]); - TEST_ASSERT(&c == x.lr[1]); - TEST_ASSERT(Zz == d.lr[0]); - TEST_ASSERT(Zz == d.lr[1]); - TEST_ASSERT(Zz == g.lr[0]); - TEST_ASSERT(Zz == g.lr[1]); - TEST_ASSERT(Zz == c.lr[0]); - TEST_ASSERT(Zz == c.lr[1]); -} - -void testBalancingC() -{ - using N = Node; - // Both X and Z are heavy on the same side. - // X Z - // / ` / ` - // Z C => D X - // / ` / ` / ` - // D Y F G Y C - // / ` - // F G - N x{}; - N z{}; - N c{}; - N d{}; - N y{}; - N f{}; - N g{}; - x = {{Zz, {&z, &c}, 0}, 1}; // bf = -2 - z = {{&x, {&d, &y}, 0}, 2}; // bf = -1 - c = {{&x, {Zz, Zz}, 0}, 3}; - d = {{&z, {&f, &g}, 0}, 4}; - y = {{&z, {Zz, Zz}, 0}, 5}; - f = {{&d, {Zz, Zz}, 0}, 6}; - g = {{&d, {Zz, Zz}, 0}, 7}; - print(&x); - TEST_ASSERT(nullptr == findBrokenAncestry(&x)); - TEST_ASSERT(&x == cavlPrivateAdjustBalance(&x, false)); // bf = -1, same topology - TEST_ASSERT(-1 == x.bf); - TEST_ASSERT(&z == cavlPrivateAdjustBalance(&z, false)); // bf = -1, same topology - TEST_ASSERT(-1 == z.bf); - TEST_ASSERT(&z == cavlPrivateAdjustBalance(&x, false)); - print(&z); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(&z)); - TEST_ASSERT(nullptr == findBrokenAncestry(&z)); - TEST_ASSERT(&d == z.lr[0]); - TEST_ASSERT(&x == z.lr[1]); - TEST_ASSERT(&f == d.lr[0]); - TEST_ASSERT(&g == d.lr[1]); - TEST_ASSERT(&y == x.lr[0]); - TEST_ASSERT(&c == x.lr[1]); - TEST_ASSERT(Zz == f.lr[0]); - TEST_ASSERT(Zz == f.lr[1]); - TEST_ASSERT(Zz == g.lr[0]); - TEST_ASSERT(Zz == g.lr[1]); - TEST_ASSERT(Zz == y.lr[0]); - TEST_ASSERT(Zz == y.lr[1]); - TEST_ASSERT(Zz == c.lr[0]); - TEST_ASSERT(Zz == c.lr[1]); -} - -void testRetracingOnGrowth() -{ - using N = Node; - std::array t{}; - for (std::uint_fast8_t i = 0; i < 100; i++) - { - t[i].value = i; - } - // 50 30 - // / ` / ` - // 30 60? => 20 50 - // / ` / / ` - // 20 40? 10 40? 60? - // / - // 10 - t[50] = {Zzzzzz, {&t[30], &t[60]}, -1}; - t[30] = {&t[50], {&t[20], &t[40]}, 00}; - t[60] = {&t[50], {Zzzzzz, Zzzzzz}, 00}; - t[20] = {&t[30], {&t[10], Zzzzzz}, 00}; - t[40] = {&t[30], {Zzzzzz, Zzzzzz}, 00}; - t[10] = {&t[20], {Zzzzzz, Zzzzzz}, 00}; - print(&t[50]); // The tree is imbalanced because we just added 1 and are about to retrace it. - TEST_ASSERT(nullptr == findBrokenAncestry(&t[50])); - TEST_ASSERT(6 == checkAscension(&t[50])); - TEST_ASSERT(&t[30] == cavlPrivateRetraceOnGrowth(&t[10])); - std::puts("ADD 10:"); - print(&t[30]); // This is the new root. - TEST_ASSERT(&t[20] == t[30].lr[0]); - TEST_ASSERT(&t[50] == t[30].lr[1]); - TEST_ASSERT(&t[10] == t[20].lr[0]); - TEST_ASSERT(Zzzzzz == t[20].lr[1]); - TEST_ASSERT(&t[40] == t[50].lr[0]); - TEST_ASSERT(&t[60] == t[50].lr[1]); - TEST_ASSERT(Zzzzzz == t[10].lr[0]); - TEST_ASSERT(Zzzzzz == t[10].lr[1]); - TEST_ASSERT(Zzzzzz == t[40].lr[0]); - TEST_ASSERT(Zzzzzz == t[40].lr[1]); - TEST_ASSERT(Zzzzzz == t[60].lr[0]); - TEST_ASSERT(Zzzzzz == t[60].lr[1]); - TEST_ASSERT(-1 == t[20].bf); - TEST_ASSERT(+0 == t[30].bf); - TEST_ASSERT(nullptr == findBrokenAncestry(&t[30])); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(&t[30])); - TEST_ASSERT(6 == checkAscension(&t[30])); - // Add a new child under 20 and ensure that retracing stops at 20 because it becomes perfectly balanced: - // 30 - // / ` - // 20 50 - // / ` / ` - // 10 21 40 60 - TEST_ASSERT(nullptr == findBrokenAncestry(&t[30])); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(&t[30])); - t[21] = {&t[20], {Zzzzzz, Zzzzzz}, 0}; - t[20].lr[1] = &t[21]; - TEST_ASSERT(nullptr == cavlPrivateRetraceOnGrowth(&t[21])); // Root not reached, NULL returned. - std::puts("ADD 21:"); - print(&t[30]); - TEST_ASSERT(0 == t[20].bf); - TEST_ASSERT(0 == t[30].bf); - TEST_ASSERT(nullptr == findBrokenAncestry(&t[30])); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(&t[30])); - TEST_ASSERT(7 == checkAscension(&t[30])); - // 30 - // / ` - // 20 50 - // / ` / ` - // 10 21 40 60 - // ` - // 15 <== first we add this, no balancing needed - // ` - // 17 <== then we add this, forcing left rotation at 10 - // - // After the left rotation of 10, we get: - // - // 30 - // / ` - // 20 50 - // / ` / ` - // 15 21 40 60 - // / ` - // 10 17 - // - // When we add one extra item after 17, we force a double rotation (15 left, 20 right). Before the rotation: - // - // 30 - // / ` - // 20 50 - // / ` / ` - // 15 21 40 60 - // / ` - // 10 17 - // ` - // 18 <== new item causes imbalance - // - // After left rotation of 15: - // - // 30 - // / ` - // 20 50 - // / ` / ` - // 17 21 40 60 - // / ` - // 15 18 - // / - // 10 - // - // After right rotation of 20, this is the final state: - // - // 30 - // / ` - // 17 50 - // / ` / ` - // 15 20 40 60 - // / / ` - // 10 18 21 - std::puts("ADD 15:"); - TEST_ASSERT(nullptr == findBrokenAncestry(&t[30])); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(&t[30])); - TEST_ASSERT(7 == checkAscension(&t[30])); - t[15] = {&t[10], {Zzzzzz, Zzzzzz}, 0}; - t[10].lr[1] = &t[15]; - TEST_ASSERT(&t[30] == cavlPrivateRetraceOnGrowth(&t[15])); // Same root, its balance becomes -1. - print(&t[30]); - TEST_ASSERT(+1 == t[10].bf); - TEST_ASSERT(-1 == t[20].bf); - TEST_ASSERT(-1 == t[30].bf); - TEST_ASSERT(nullptr == findBrokenAncestry(&t[30])); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(&t[30])); - TEST_ASSERT(8 == checkAscension(&t[30])); - - std::puts("ADD 17:"); - t[17] = {&t[15], {Zzzzzz, Zzzzzz}, 0}; - t[15].lr[1] = &t[17]; - TEST_ASSERT(nullptr == cavlPrivateRetraceOnGrowth(&t[17])); // Same root, same balance, 10 rotated left. - print(&t[30]); - // Check 10 - TEST_ASSERT(&t[15] == t[10].up); - TEST_ASSERT(0 == t[10].bf); - TEST_ASSERT(nullptr == t[10].lr[0]); - TEST_ASSERT(nullptr == t[10].lr[1]); - // Check 17 - TEST_ASSERT(&t[15] == t[17].up); - TEST_ASSERT(0 == t[17].bf); - TEST_ASSERT(nullptr == t[17].lr[0]); - TEST_ASSERT(nullptr == t[17].lr[1]); - // Check 15 - TEST_ASSERT(&t[20] == t[15].up); - TEST_ASSERT(0 == t[15].bf); - TEST_ASSERT(&t[10] == t[15].lr[0]); - TEST_ASSERT(&t[17] == t[15].lr[1]); - // Check 20 -- leaning left - TEST_ASSERT(&t[30] == t[20].up); - TEST_ASSERT(-1 == t[20].bf); - TEST_ASSERT(&t[15] == t[20].lr[0]); - TEST_ASSERT(&t[21] == t[20].lr[1]); - // Check the root -- still leaning left by one. - TEST_ASSERT(nullptr == t[30].up); - TEST_ASSERT(-1 == t[30].bf); - TEST_ASSERT(&t[20] == t[30].lr[0]); - TEST_ASSERT(&t[50] == t[30].lr[1]); - // Check hard invariants. - TEST_ASSERT(nullptr == findBrokenAncestry(&t[30])); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(&t[30])); - TEST_ASSERT(9 == checkAscension(&t[30])); - - std::puts("ADD 18:"); - t[18] = {&t[17], {Zzzzzz, Zzzzzz}, 0}; - t[17].lr[1] = &t[18]; - TEST_ASSERT(nullptr == cavlPrivateRetraceOnGrowth(&t[18])); // Same root, 15 went left, 20 went right. - print(&t[30]); - // Check 17 - TEST_ASSERT(&t[30] == t[17].up); - TEST_ASSERT(0 == t[17].bf); - TEST_ASSERT(&t[15] == t[17].lr[0]); - TEST_ASSERT(&t[20] == t[17].lr[1]); - // Check 15 - TEST_ASSERT(&t[17] == t[15].up); - TEST_ASSERT(-1 == t[15].bf); - TEST_ASSERT(&t[10] == t[15].lr[0]); - TEST_ASSERT(nullptr == t[15].lr[1]); - // Check 20 - TEST_ASSERT(&t[17] == t[20].up); - TEST_ASSERT(0 == t[20].bf); - TEST_ASSERT(&t[18] == t[20].lr[0]); - TEST_ASSERT(&t[21] == t[20].lr[1]); - // Check 10 - TEST_ASSERT(&t[15] == t[10].up); - TEST_ASSERT(0 == t[10].bf); - TEST_ASSERT(nullptr == t[10].lr[0]); - TEST_ASSERT(nullptr == t[10].lr[1]); - // Check 18 - TEST_ASSERT(&t[20] == t[18].up); - TEST_ASSERT(0 == t[18].bf); - TEST_ASSERT(nullptr == t[18].lr[0]); - TEST_ASSERT(nullptr == t[18].lr[1]); - // Check 21 - TEST_ASSERT(&t[20] == t[21].up); - TEST_ASSERT(0 == t[21].bf); - TEST_ASSERT(nullptr == t[21].lr[0]); - TEST_ASSERT(nullptr == t[21].lr[1]); - // Check hard invariants. - TEST_ASSERT(nullptr == findBrokenAncestry(&t[30])); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(&t[30])); - TEST_ASSERT(10 == checkAscension(&t[30])); -} - -void testSearchTrivial() -{ - using N = Node; - // A - // B C - // D E F G - N a{4}; - N b{2}; - N c{6}; - N d{1}; - N e{3}; - N f{5}; - N g{7}; - N q{9}; - a = {Zz, {&b, &c}, 0}; - b = {&a, {&d, &e}, 0}; - c = {&a, {&f, &g}, 0}; - d = {&b, {Zz, Zz}, 0}; - e = {&b, {Zz, Zz}, 0}; - f = {&c, {Zz, Zz}, 0}; - g = {&c, {Zz, Zz}, 0}; - q = {Zz, {Zz, Zz}, 0}; - TEST_ASSERT(nullptr == findBrokenBalanceFactor(&a)); - TEST_ASSERT(nullptr == findBrokenAncestry(&a)); - TEST_ASSERT(7 == checkAscension(&a)); - N* root = &a; - TEST_ASSERT(nullptr == cavlSearch(reinterpret_cast(&root), nullptr, nullptr, nullptr)); // Bad arguments. - TEST_ASSERT(&a == root); - TEST_ASSERT(nullptr == search(&root, [&](const N& v) { return q.value - v.value; })); - TEST_ASSERT(&a == root); - TEST_ASSERT(&e == search(&root, [&](const N& v) { return e.value - v.value; })); - TEST_ASSERT(&b == search(&root, [&](const N& v) { return b.value - v.value; })); - TEST_ASSERT(&a == root); - print(&a); - TEST_ASSERT(nullptr == cavlFindExtremum(nullptr, true)); - TEST_ASSERT(nullptr == cavlFindExtremum(nullptr, false)); - TEST_ASSERT(&g == a.max()); - TEST_ASSERT(&d == a.min()); - TEST_ASSERT(&g == g.max()); - TEST_ASSERT(&g == g.min()); - TEST_ASSERT(&d == d.max()); - TEST_ASSERT(&d == d.min()); -} - -void testRemovalA() -{ - using N = Node; - // 4 - // / ` - // 2 6 - // / ` / ` - // 1 3 5 8 - // / ` - // 7 9 - std::array t{}; - for (std::uint_fast8_t i = 0; i < 10; i++) - { - t[i].value = i; - } - t[1] = {&t[2], {Zzzzz, Zzzzz}, 00}; - t[2] = {&t[4], {&t[1], &t[3]}, 00}; - t[3] = {&t[2], {Zzzzz, Zzzzz}, 00}; - t[4] = {Zzzzz, {&t[2], &t[6]}, +1}; - t[5] = {&t[6], {Zzzzz, Zzzzz}, 00}; - t[6] = {&t[4], {&t[5], &t[8]}, +1}; - t[7] = {&t[8], {Zzzzz, Zzzzz}, 00}; - t[8] = {&t[6], {&t[7], &t[9]}, 00}; - t[9] = {&t[8], {Zzzzz, Zzzzz}, 00}; - N* root = &t[4]; - print(root); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(root)); - TEST_ASSERT(nullptr == findBrokenAncestry(root)); - TEST_ASSERT(9 == checkAscension(root)); - - // Remove 9, the easiest case. The rest of the tree remains unchanged. - // 4 - // / ` - // 2 6 - // / ` / ` - // 1 3 5 8 - // / - // 7 - std::puts("REMOVE 9:"); - remove(&root, &t[9]); - TEST_ASSERT(&t[4] == root); - print(root); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(root)); - TEST_ASSERT(nullptr == findBrokenAncestry(root)); - TEST_ASSERT(8 == checkAscension(root)); - // 1 - TEST_ASSERT(&t[2] == t[1].up); - TEST_ASSERT(Zzzzz == t[1].lr[0]); - TEST_ASSERT(Zzzzz == t[1].lr[1]); - TEST_ASSERT(00 == t[1].bf); - // 2 - TEST_ASSERT(&t[4] == t[2].up); - TEST_ASSERT(&t[1] == t[2].lr[0]); - TEST_ASSERT(&t[3] == t[2].lr[1]); - TEST_ASSERT(00 == t[2].bf); - // 3 - TEST_ASSERT(&t[2] == t[3].up); - TEST_ASSERT(Zzzzz == t[3].lr[0]); - TEST_ASSERT(Zzzzz == t[3].lr[1]); - TEST_ASSERT(00 == t[3].bf); - // 4 - TEST_ASSERT(Zzzzz == t[4].up); // Nihil Supernum - TEST_ASSERT(&t[2] == t[4].lr[0]); - TEST_ASSERT(&t[6] == t[4].lr[1]); - TEST_ASSERT(+1 == t[4].bf); - // 5 - TEST_ASSERT(&t[6] == t[5].up); - TEST_ASSERT(Zzzzz == t[5].lr[0]); - TEST_ASSERT(Zzzzz == t[5].lr[1]); - TEST_ASSERT(00 == t[5].bf); - // 6 - TEST_ASSERT(&t[4] == t[6].up); - TEST_ASSERT(&t[5] == t[6].lr[0]); - TEST_ASSERT(&t[8] == t[6].lr[1]); - TEST_ASSERT(+1 == t[6].bf); - // 7 - TEST_ASSERT(&t[8] == t[7].up); - TEST_ASSERT(Zzzzz == t[7].lr[0]); - TEST_ASSERT(Zzzzz == t[7].lr[1]); - TEST_ASSERT(00 == t[7].bf); - // 8 - TEST_ASSERT(&t[6] == t[8].up); - TEST_ASSERT(&t[7] == t[8].lr[0]); - TEST_ASSERT(Zzzzz == t[8].lr[1]); - TEST_ASSERT(-1 == t[8].bf); - - // Remove 8, 7 takes its place (the one-child case). The rest of the tree remains unchanged. - // 4 - // / ` - // 2 6 - // / ` / ` - // 1 3 5 7 - std::puts("REMOVE 8:"); - remove(&root, &t[8]); - TEST_ASSERT(&t[4] == root); - print(root); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(root)); - TEST_ASSERT(nullptr == findBrokenAncestry(root)); - TEST_ASSERT(7 == checkAscension(root)); - // 1 - TEST_ASSERT(&t[2] == t[1].up); - TEST_ASSERT(Zzzzz == t[1].lr[0]); - TEST_ASSERT(Zzzzz == t[1].lr[1]); - TEST_ASSERT(00 == t[1].bf); - // 2 - TEST_ASSERT(&t[4] == t[2].up); - TEST_ASSERT(&t[1] == t[2].lr[0]); - TEST_ASSERT(&t[3] == t[2].lr[1]); - TEST_ASSERT(00 == t[2].bf); - // 3 - TEST_ASSERT(&t[2] == t[3].up); - TEST_ASSERT(Zzzzz == t[3].lr[0]); - TEST_ASSERT(Zzzzz == t[3].lr[1]); - TEST_ASSERT(00 == t[3].bf); - // 4 - TEST_ASSERT(Zzzzz == t[4].up); // Nihil Supernum - TEST_ASSERT(&t[2] == t[4].lr[0]); - TEST_ASSERT(&t[6] == t[4].lr[1]); - TEST_ASSERT(00 == t[4].bf); - // 5 - TEST_ASSERT(&t[6] == t[5].up); - TEST_ASSERT(Zzzzz == t[5].lr[0]); - TEST_ASSERT(Zzzzz == t[5].lr[1]); - TEST_ASSERT(00 == t[5].bf); - // 6 - TEST_ASSERT(&t[4] == t[6].up); - TEST_ASSERT(&t[5] == t[6].lr[0]); - TEST_ASSERT(&t[7] == t[6].lr[1]); - TEST_ASSERT(00 == t[6].bf); - // 7 - TEST_ASSERT(&t[6] == t[7].up); - TEST_ASSERT(Zzzzz == t[7].lr[0]); - TEST_ASSERT(Zzzzz == t[7].lr[1]); - TEST_ASSERT(00 == t[7].bf); - - // Remove the root node 4, 5 takes its place. The overall structure remains unchanged except that 5 is now the root. - // 5 - // / ` - // 2 6 - // / ` ` - // 1 3 7 - std::puts("REMOVE 4:"); - remove(&root, &t[4]); - print(root); - TEST_ASSERT(&t[5] == root); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(root)); - TEST_ASSERT(nullptr == findBrokenAncestry(root)); - TEST_ASSERT(6 == checkAscension(root)); - // 1 - TEST_ASSERT(&t[2] == t[1].up); - TEST_ASSERT(Zzzzz == t[1].lr[0]); - TEST_ASSERT(Zzzzz == t[1].lr[1]); - TEST_ASSERT(00 == t[1].bf); - // 2 - TEST_ASSERT(&t[5] == t[2].up); - TEST_ASSERT(&t[1] == t[2].lr[0]); - TEST_ASSERT(&t[3] == t[2].lr[1]); - TEST_ASSERT(00 == t[2].bf); - // 3 - TEST_ASSERT(&t[2] == t[3].up); - TEST_ASSERT(Zzzzz == t[3].lr[0]); - TEST_ASSERT(Zzzzz == t[3].lr[1]); - TEST_ASSERT(00 == t[3].bf); - // 5 - TEST_ASSERT(Zzzzz == t[5].up); // Nihil Supernum - TEST_ASSERT(&t[2] == t[5].lr[0]); - TEST_ASSERT(&t[6] == t[5].lr[1]); - TEST_ASSERT(00 == t[5].bf); - // 6 - TEST_ASSERT(&t[5] == t[6].up); - TEST_ASSERT(Zzzzz == t[6].lr[0]); - TEST_ASSERT(&t[7] == t[6].lr[1]); - TEST_ASSERT(+1 == t[6].bf); - // 7 - TEST_ASSERT(&t[6] == t[7].up); - TEST_ASSERT(Zzzzz == t[7].lr[0]); - TEST_ASSERT(Zzzzz == t[7].lr[1]); - TEST_ASSERT(00 == t[7].bf); - - // Remove the root node 5, 6 takes its place. - // 6 - // / ` - // 2 7 - // / ` - // 1 3 - std::puts("REMOVE 5:"); - remove(&root, &t[5]); - TEST_ASSERT(&t[6] == root); - print(root); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(root)); - TEST_ASSERT(nullptr == findBrokenAncestry(root)); - TEST_ASSERT(5 == checkAscension(root)); - // 1 - TEST_ASSERT(&t[2] == t[1].up); - TEST_ASSERT(Zzzzz == t[1].lr[0]); - TEST_ASSERT(Zzzzz == t[1].lr[1]); - TEST_ASSERT(00 == t[1].bf); - // 2 - TEST_ASSERT(&t[6] == t[2].up); - TEST_ASSERT(&t[1] == t[2].lr[0]); - TEST_ASSERT(&t[3] == t[2].lr[1]); - TEST_ASSERT(00 == t[2].bf); - // 3 - TEST_ASSERT(&t[2] == t[3].up); - TEST_ASSERT(Zzzzz == t[3].lr[0]); - TEST_ASSERT(Zzzzz == t[3].lr[1]); - TEST_ASSERT(00 == t[3].bf); - // 6 - TEST_ASSERT(Zzzzz == t[6].up); // Nihil Supernum - TEST_ASSERT(&t[2] == t[6].lr[0]); - TEST_ASSERT(&t[7] == t[6].lr[1]); - TEST_ASSERT(-1 == t[6].bf); - // 7 - TEST_ASSERT(&t[6] == t[7].up); - TEST_ASSERT(Zzzzz == t[7].lr[0]); - TEST_ASSERT(Zzzzz == t[7].lr[1]); - TEST_ASSERT(00 == t[7].bf); - - // Remove the root node 6, 7 takes its place, then right rotation is done to restore balance, 2 is the new root. - // 2 - // / ` - // 1 7 - // / - // 3 - std::puts("REMOVE 6:"); - remove(&root, &t[6]); - TEST_ASSERT(&t[2] == root); - print(root); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(root)); - TEST_ASSERT(nullptr == findBrokenAncestry(root)); - TEST_ASSERT(4 == checkAscension(root)); - // 1 - TEST_ASSERT(&t[2] == t[1].up); - TEST_ASSERT(Zzzzz == t[1].lr[0]); - TEST_ASSERT(Zzzzz == t[1].lr[1]); - TEST_ASSERT(00 == t[1].bf); - // 2 - TEST_ASSERT(Zzzzz == t[2].up); // Nihil Supernum - TEST_ASSERT(&t[1] == t[2].lr[0]); - TEST_ASSERT(&t[7] == t[2].lr[1]); - TEST_ASSERT(+1 == t[2].bf); - // 3 - TEST_ASSERT(&t[7] == t[3].up); - TEST_ASSERT(Zzzzz == t[3].lr[0]); - TEST_ASSERT(Zzzzz == t[3].lr[1]); - TEST_ASSERT(00 == t[3].bf); - // 7 - TEST_ASSERT(&t[2] == t[7].up); - TEST_ASSERT(&t[3] == t[7].lr[0]); - TEST_ASSERT(Zzzzz == t[7].lr[1]); - TEST_ASSERT(-1 == t[7].bf); - - // Remove 1, then balancing makes 3 the new root node. - // 3 - // / ` - // 2 7 - std::puts("REMOVE 1:"); - remove(&root, &t[1]); - TEST_ASSERT(&t[3] == root); - print(root); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(root)); - TEST_ASSERT(nullptr == findBrokenAncestry(root)); - TEST_ASSERT(3 == checkAscension(root)); - // 2 - TEST_ASSERT(&t[3] == t[2].up); - TEST_ASSERT(Zzzzz == t[2].lr[0]); - TEST_ASSERT(Zzzzz == t[2].lr[1]); - TEST_ASSERT(0 == t[2].bf); - // 3 - TEST_ASSERT(Zzzzz == t[3].up); // Nihil Supernum - TEST_ASSERT(&t[2] == t[3].lr[0]); - TEST_ASSERT(&t[7] == t[3].lr[1]); - TEST_ASSERT(00 == t[3].bf); - // 7 - TEST_ASSERT(&t[3] == t[7].up); - TEST_ASSERT(Zzzzz == t[7].lr[0]); - TEST_ASSERT(Zzzzz == t[7].lr[1]); - TEST_ASSERT(00 == t[7].bf); - - // Remove 7. - // 3 - // / - // 2 - std::puts("REMOVE 7:"); - remove(&root, &t[7]); - TEST_ASSERT(&t[3] == root); - print(root); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(root)); - TEST_ASSERT(nullptr == findBrokenAncestry(root)); - TEST_ASSERT(2 == checkAscension(root)); - // 2 - TEST_ASSERT(&t[3] == t[2].up); - TEST_ASSERT(Zzzzz == t[2].lr[0]); - TEST_ASSERT(Zzzzz == t[2].lr[1]); - TEST_ASSERT(0 == t[2].bf); - // 3 - TEST_ASSERT(Zzzzz == t[3].up); // Nihil Supernum - TEST_ASSERT(&t[2] == t[3].lr[0]); - TEST_ASSERT(Zzzzz == t[3].lr[1]); - TEST_ASSERT(-1 == t[3].bf); - - // Remove 3. Only 2 is left, which is now obviously the root. - std::puts("REMOVE 3:"); - remove(&root, &t[3]); - TEST_ASSERT(&t[2] == root); - print(root); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(root)); - TEST_ASSERT(nullptr == findBrokenAncestry(root)); - TEST_ASSERT(1 == checkAscension(root)); - // 2 - TEST_ASSERT(Zzzzz == t[2].up); - TEST_ASSERT(Zzzzz == t[2].lr[0]); - TEST_ASSERT(Zzzzz == t[2].lr[1]); - TEST_ASSERT(0 == t[2].bf); - - // Remove 2. The tree is now empty, make sure the root pointer is updated accordingly. - std::puts("REMOVE 2:"); - remove(&root, &t[2]); - TEST_ASSERT(nullptr == root); -} - -void testMutationManual() -{ - using N = Node; - // Build a tree with 31 elements from 1 to 31 inclusive by adding new elements successively: - // 16 - // / ` - // 8 24 - // / ` / ` - // 4 12 20 28 - // / ` / ` / ` / ` - // 2 6 10 14 18 22 26 30 - // / ` / ` / ` / ` / ` / ` / ` / ` - // 1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31 - std::array t{}; - for (std::uint_fast8_t i = 0; i < 32; i++) - { - t[i].value = i; - } - // Build the actual tree. - N* root = nullptr; - for (std::uint_fast8_t i = 1; i < 32; i++) - { - const auto pred = [&](const N& v) { return t.at(i).value - v.value; }; - TEST_ASSERT(nullptr == search(&root, pred)); - TEST_ASSERT(&t[i] == search(&root, pred, [&]() { return &t.at(i); })); - TEST_ASSERT(&t[i] == search(&root, pred)); - // Validate the tree after every mutation. - TEST_ASSERT(nullptr != root); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(root)); - TEST_ASSERT(nullptr == findBrokenAncestry(root)); - TEST_ASSERT(i == checkAscension(root)); - } - print(root); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(root)); - TEST_ASSERT(nullptr == findBrokenAncestry(root)); - TEST_ASSERT(31 == checkAscension(root)); - // Check composition -- ensure that every element is in the tree and it is there exactly once. - { - std::array seen{}; - traverse(root, [&](const N* const n) { - TEST_ASSERT(!seen.at(n->value)); - seen[n->value] = true; - }); - TEST_ASSERT(std::all_of(&seen[1], &seen[31], [](bool x) { return x; })); - } - - // REMOVE 24 - // 16 - // / ` - // 8 25 - // / ` / ` - // 4 12 20 28 - // / ` / ` / ` / ` - // 2 6 10 14 18 22 26 30 - // / ` / ` / ` / ` / ` / ` ` / ` - // 1 3 5 7 9 11 13 15 17 19 21 23 27 29 31 - std::puts("REMOVE 24:"); - TEST_ASSERT(t[24].checkLinkageUpLeftRightBF(&t[16], &t[20], &t[28], 00)); - remove(&root, &t[24]); - TEST_ASSERT(&t[16] == root); - print(root); - TEST_ASSERT(t[25].checkLinkageUpLeftRightBF(&t[16], &t[20], &t[28], 00)); - TEST_ASSERT(t[26].checkLinkageUpLeftRightBF(&t[28], Zzzzzz, &t[27], +1)); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(root)); - TEST_ASSERT(nullptr == findBrokenAncestry(root)); - TEST_ASSERT(30 == checkAscension(root)); - - // REMOVE 25 - // 16 - // / ` - // 8 26 - // / ` / ` - // 4 12 20 28 - // / ` / ` / ` / ` - // 2 6 10 14 18 22 27 30 - // / ` / ` / ` / ` / ` / ` / ` - // 1 3 5 7 9 11 13 15 17 19 21 23 29 31 - std::puts("REMOVE 25:"); - TEST_ASSERT(t[25].checkLinkageUpLeftRightBF(&t[16], &t[20], &t[28], 00)); - remove(&root, &t[25]); - TEST_ASSERT(&t[16] == root); - print(root); - TEST_ASSERT(t[26].checkLinkageUpLeftRightBF(&t[16], &t[20], &t[28], 00)); - TEST_ASSERT(t[28].checkLinkageUpLeftRightBF(&t[26], &t[27], &t[30], +1)); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(root)); - TEST_ASSERT(nullptr == findBrokenAncestry(root)); - TEST_ASSERT(29 == checkAscension(root)); - - // REMOVE 26 - // 16 - // / ` - // 8 27 - // / ` / ` - // 4 12 20 30 - // / ` / ` / ` / ` - // 2 6 10 14 18 22 28 31 - // / ` / ` / ` / ` / ` / ` ` - // 1 3 5 7 9 11 13 15 17 19 21 23 29 - std::puts("REMOVE 26:"); - TEST_ASSERT(t[26].checkLinkageUpLeftRightBF(&t[16], &t[20], &t[28], 00)); - remove(&root, &t[26]); - TEST_ASSERT(&t[16] == root); - print(root); - TEST_ASSERT(t[27].checkLinkageUpLeftRightBF(&t[16], &t[20], &t[30], 00)); - TEST_ASSERT(t[30].checkLinkageUpLeftRightBF(&t[27], &t[28], &t[31], -1)); - TEST_ASSERT(t[28].checkLinkageUpLeftRightBF(&t[30], Zzzzzz, &t[29], +1)); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(root)); - TEST_ASSERT(nullptr == findBrokenAncestry(root)); - TEST_ASSERT(28 == checkAscension(root)); - - // REMOVE 20 - // 16 - // / ` - // 8 27 - // / ` / ` - // 4 12 21 30 - // / ` / ` / ` / ` - // 2 6 10 14 18 22 28 31 - // / ` / ` / ` / ` / ` ` ` - // 1 3 5 7 9 11 13 15 17 19 23 29 - std::puts("REMOVE 20:"); - TEST_ASSERT(t[20].checkLinkageUpLeftRightBF(&t[27], &t[18], &t[22], 00)); - remove(&root, &t[20]); - TEST_ASSERT(&t[16] == root); - print(root); - TEST_ASSERT(t[21].checkLinkageUpLeftRightBF(&t[27], &t[18], &t[22], 00)); - TEST_ASSERT(t[22].checkLinkageUpLeftRightBF(&t[21], Zzzzzz, &t[23], +1)); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(root)); - TEST_ASSERT(nullptr == findBrokenAncestry(root)); - TEST_ASSERT(27 == checkAscension(root)); - - // REMOVE 27 - // 16 - // / ` - // 8 28 - // / ` / ` - // 4 12 21 30 - // / ` / ` / ` / ` - // 2 6 10 14 18 22 29 31 - // / ` / ` / ` / ` / ` ` - // 1 3 5 7 9 11 13 15 17 19 23 - std::puts("REMOVE 27:"); - TEST_ASSERT(t[27].checkLinkageUpLeftRightBF(&t[16], &t[21], &t[30], 00)); - remove(&root, &t[27]); - TEST_ASSERT(&t[16] == root); - print(root); - TEST_ASSERT(t[28].checkLinkageUpLeftRightBF(&t[16], &t[21], &t[30], -1)); - TEST_ASSERT(t[30].checkLinkageUpLeftRightBF(&t[28], &t[29], &t[31], 00)); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(root)); - TEST_ASSERT(nullptr == findBrokenAncestry(root)); - TEST_ASSERT(26 == checkAscension(root)); - - // REMOVE 28 - // 16 - // / ` - // 8 29 - // / ` / ` - // 4 12 21 30 - // / ` / ` / ` ` - // 2 6 10 14 18 22 31 - // / ` / ` / ` / ` / ` ` - // 1 3 5 7 9 11 13 15 17 19 23 - std::puts("REMOVE 28:"); - TEST_ASSERT(t[28].checkLinkageUpLeftRightBF(&t[16], &t[21], &t[30], -1)); - remove(&root, &t[28]); - TEST_ASSERT(&t[16] == root); - print(root); - TEST_ASSERT(t[29].checkLinkageUpLeftRightBF(&t[16], &t[21], &t[30], -1)); - TEST_ASSERT(t[30].checkLinkageUpLeftRightBF(&t[29], Zzzzzz, &t[31], +1)); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(root)); - TEST_ASSERT(nullptr == findBrokenAncestry(root)); - TEST_ASSERT(25 == checkAscension(root)); - - // REMOVE 29; UNBALANCED TREE BEFORE ROTATION: - // 16 - // / ` - // 8 30 - // / ` / ` - // 4 12 21 31 - // / ` / ` / ` - // 2 6 10 14 18 22 - // / ` / ` / ` / ` / ` ` - // 1 3 5 7 9 11 13 15 17 19 23 - // - // FINAL STATE AFTER ROTATION: - // 16 - // / ` - // 8 21 - // / ` / ` - // 4 12 18 30 - // / ` / ` / ` / ` - // 2 6 10 14 17 19 22 31 - // / ` / ` / ` / ` ` - // 1 3 5 7 9 11 13 15 23 - std::puts("REMOVE 29:"); - TEST_ASSERT(t[29].checkLinkageUpLeftRightBF(&t[16], &t[21], &t[30], -1)); - remove(&root, &t[29]); - TEST_ASSERT(&t[16] == root); - print(root); - TEST_ASSERT(t[21].checkLinkageUpLeftRightBF(&t[16], &t[18], &t[30], +1)); - TEST_ASSERT(t[18].checkLinkageUpLeftRightBF(&t[21], &t[17], &t[19], 00)); - TEST_ASSERT(t[30].checkLinkageUpLeftRightBF(&t[21], &t[22], &t[31], -1)); - TEST_ASSERT(t[22].checkLinkageUpLeftRightBF(&t[30], Zzzzzz, &t[23], +1)); - TEST_ASSERT(t[16].checkLinkageUpLeftRightBF(Zzzzzz, &t[8], &t[21], 00)); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(root)); - TEST_ASSERT(nullptr == findBrokenAncestry(root)); - TEST_ASSERT(24 == checkAscension(root)); - - // REMOVE 8 - // 16 - // / ` - // 9 21 - // / ` / ` - // 4 12 18 30 - // / ` / ` / ` / ` - // 2 6 10 14 17 19 22 31 - // / ` / ` ` / ` ` - // 1 3 5 7 11 13 15 23 - std::puts("REMOVE 8:"); - TEST_ASSERT(t[8].checkLinkageUpLeftRightBF(&t[16], &t[4], &t[12], 00)); - remove(&root, &t[8]); - TEST_ASSERT(&t[16] == root); - print(root); - TEST_ASSERT(t[9].checkLinkageUpLeftRightBF(&t[16], &t[4], &t[12], 00)); - TEST_ASSERT(t[10].checkLinkageUpLeftRightBF(&t[12], Zzzzz, &t[11], +1)); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(root)); - TEST_ASSERT(nullptr == findBrokenAncestry(root)); - TEST_ASSERT(23 == checkAscension(root)); - - // REMOVE 9 - // 16 - // / ` - // 10 21 - // / ` / ` - // 4 12 18 30 - // / ` / ` / ` / ` - // 2 6 11 14 17 19 22 31 - // / ` / ` / ` ` - // 1 3 5 7 13 15 23 - std::puts("REMOVE 9:"); - TEST_ASSERT(t[9].checkLinkageUpLeftRightBF(&t[16], &t[4], &t[12], 00)); - remove(&root, &t[9]); - TEST_ASSERT(&t[16] == root); - print(root); - TEST_ASSERT(t[10].checkLinkageUpLeftRightBF(&t[16], &t[4], &t[12], 00)); - TEST_ASSERT(t[12].checkLinkageUpLeftRightBF(&t[10], &t[11], &t[14], +1)); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(root)); - TEST_ASSERT(nullptr == findBrokenAncestry(root)); - TEST_ASSERT(22 == checkAscension(root)); - - // REMOVE 1 - // 16 - // / ` - // 10 21 - // / ` / ` - // 4 12 18 30 - // / ` / ` / ` / ` - // 2 6 11 14 17 19 22 31 - // ` / ` / ` ` - // 3 5 7 13 15 23 - std::puts("REMOVE 1:"); - TEST_ASSERT(t[1].checkLinkageUpLeftRightBF(&t[2], Zzzzz, Zzzzz, 00)); - remove(&root, &t[1]); - TEST_ASSERT(&t[16] == root); - print(root); - TEST_ASSERT(t[2].checkLinkageUpLeftRightBF(&t[4], Zzzzz, &t[3], +1)); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(root)); - TEST_ASSERT(nullptr == findBrokenAncestry(root)); - TEST_ASSERT(21 == checkAscension(root)); -} - -auto getRandomByte() -{ - return static_cast((0xFFLL * std::rand()) / RAND_MAX); -} - -void testMutationRandomized() -{ - using N = Node; - std::array t{}; - for (auto i = 0U; i < 256U; i++) - { - t.at(i).value = static_cast(i); - } - std::array mask{}; - std::size_t size = 0; - N* root = nullptr; - - std::uint64_t cnt_addition = 0; - std::uint64_t cnt_removal = 0; - - const auto validate = [&] { - TEST_ASSERT(size == std::accumulate(mask.begin(), mask.end(), 0U, [](const std::size_t a, const std::size_t b) { - return a + b; - })); - TEST_ASSERT(nullptr == findBrokenBalanceFactor(root)); - TEST_ASSERT(nullptr == findBrokenAncestry(root)); - TEST_ASSERT(size == checkAscension(root)); - std::array new_mask{}; - traverse(root, [&](const N* node) { new_mask.at(node->value) = true; }); - TEST_ASSERT(mask == new_mask); // Otherwise, the contents of the tree does not match our expectations. - }; - validate(); - - const auto add = [&](const std::uint_fast8_t x) { - const auto predicate = [&](const N& v) { return x - v.value; }; - if (N* const existing = search(&root, predicate)) - { - TEST_ASSERT(mask.at(x)); - TEST_ASSERT(x == existing->value); - TEST_ASSERT(x == search(&root, predicate, []() -> N* { - throw std::logic_error("Attempted to create a new node when there is one already"); - })->value); - } - else - { - TEST_ASSERT(!mask.at(x)); - bool factory_called = false; - TEST_ASSERT(x == search(&root, predicate, [&]() -> N* { - factory_called = true; // NOLINT(bugprone-assignment-in-if-condition) - return &t.at(x); - })->value); - TEST_ASSERT(factory_called); - size++; - cnt_addition++; - mask.at(x) = true; - } - }; - - const auto drop = [&](const std::uint_fast8_t x) { - const auto predicate = [&](const N& v) { return x - v.value; }; - if (N* const existing = search(&root, predicate)) - { - TEST_ASSERT(mask.at(x)); - TEST_ASSERT(x == existing->value); - remove(&root, existing); - size--; - cnt_removal++; - mask.at(x) = false; - TEST_ASSERT(nullptr == search(&root, predicate)); - } - else - { - TEST_ASSERT(!mask.at(x)); - } - }; - - std::puts("Running the randomized test..."); - for (std::uint32_t iteration = 0U; iteration < 10'000U; iteration++) - { - if ((getRandomByte() % 2U) != 0) - { - add(getRandomByte()); - } - else - { - drop(getRandomByte()); - } - validate(); - } - - std::cout << "Randomized test finished. Final state:\n" // - << "\tsize: " << size // - << "\tcnt_addition: " << cnt_addition // - << "\tcnt_removal: " << cnt_removal // - << std::endl; - if (root != nullptr) - { - std::cout << "\tmin/max: " << static_cast(root->min()->value) // - << "/" << static_cast(root->max()->value) // - << std::endl; - } - validate(); -} - -} // namespace - -void setUp() {} - -void tearDown() {} - -int main(const int argc, const char* const argv[]) -{ - const auto seed = static_cast((argc > 1) ? std::atoll(argv[1]) : std::time(nullptr)); // NOLINT - std::cout << "Randomness seed: " << seed << std::endl; - std::srand(seed); - UNITY_BEGIN(); - RUN_TEST(testCheckAscension); - RUN_TEST(testRotation); - RUN_TEST(testBalancingA); - RUN_TEST(testBalancingB); - RUN_TEST(testBalancingC); - RUN_TEST(testRetracingOnGrowth); - RUN_TEST(testSearchTrivial); - RUN_TEST(testRemovalA); - RUN_TEST(testMutationManual); - RUN_TEST(testMutationRandomized); - return UNITY_END(); -} diff --git a/tests/src/test_e2e.cpp b/tests/src/test_e2e.cpp deleted file mode 100644 index 8ea7800..0000000 --- a/tests/src/test_e2e.cpp +++ /dev/null @@ -1,623 +0,0 @@ -/// This software is distributed under the terms of the MIT License. -/// Copyright (C) OpenCyphal Development Team -/// Copyright Amazon.com Inc. or its affiliates. -/// SPDX-License-Identifier: MIT - -#include -#include "helpers.h" -#include -#include -#include -#include -#include - -namespace -{ - -UdpardPayload makePayload(const std::string_view& payload) -{ - return {.size = payload.size(), .data = payload.data()}; -} - -/// A wrapper over udpardRxSubscriptionReceive() that copies the datagram payload into a newly allocated buffer. -[[nodiscard]] int_fast8_t rxSubscriptionReceive(UdpardRxSubscription* const self, - InstrumentedAllocator& payload_memory, - const UdpardMicrosecond timestamp_usec, - const UdpardMutablePayload datagram_payload, - const uint_fast8_t redundant_iface_index, - UdpardRxTransfer* const out_transfer) -{ - return udpardRxSubscriptionReceive(self, - timestamp_usec, - { - .size = datagram_payload.size, - .data = std::memmove(instrumentedAllocatorAllocate(&payload_memory, - datagram_payload.size), - datagram_payload.data, - datagram_payload.size), - }, - redundant_iface_index, - out_transfer); -} - -/// A wrapper over udpardRxRPCDispatcherReceive() that copies the datagram payload into a newly allocated buffer. -[[nodiscard]] int_fast8_t rxRPCDispatcherReceive(UdpardRxRPCDispatcher* const self, - InstrumentedAllocator& payload_memory, - const UdpardMicrosecond timestamp_usec, - const UdpardMutablePayload datagram_payload, - const uint_fast8_t redundant_iface_index, - UdpardRxRPCPort** const out_port, - UdpardRxRPCTransfer* const out_transfer) -{ - return udpardRxRPCDispatcherReceive(self, - timestamp_usec, - { - .size = datagram_payload.size, - .data = std::memmove(instrumentedAllocatorAllocate(&payload_memory, - datagram_payload.size), - datagram_payload.data, - datagram_payload.size), - }, - redundant_iface_index, - out_port, - out_transfer); -} - -void testPubSub() -{ - InstrumentedAllocator alloc_tx; - InstrumentedAllocator alloc_rx_session; - InstrumentedAllocator alloc_rx_fragment; - InstrumentedAllocator alloc_rx_payload; - instrumentedAllocatorNew(&alloc_tx); - instrumentedAllocatorNew(&alloc_rx_session); - instrumentedAllocatorNew(&alloc_rx_fragment); - instrumentedAllocatorNew(&alloc_rx_payload); - const UdpardTxMemoryResources mem_tx{ - .fragment = instrumentedAllocatorMakeMemoryResource(&alloc_tx), - .payload = instrumentedAllocatorMakeMemoryResource(&alloc_tx), - }; - const UdpardRxMemoryResources mem_rx{ - .session = instrumentedAllocatorMakeMemoryResource(&alloc_rx_session), - .fragment = instrumentedAllocatorMakeMemoryResource(&alloc_rx_fragment), - .payload = instrumentedAllocatorMakeMemoryDeleter(&alloc_rx_payload), - }; - // Initialize the TX pipeline. Set the MTU to a low value to ensure that we test multi-frame transfers. - UdpardTx tx{}; - UdpardNodeID node_id = UDPARD_NODE_ID_UNSET; - TEST_ASSERT_EQUAL(0, udpardTxInit(&tx, &node_id, 7, mem_tx)); - tx.mtu = 100; - for (auto i = 0U; i <= UDPARD_PRIORITY_MAX; i++) - { - tx.dscp_value_per_priority[i] = static_cast(0xA0U + i); - } - // Initialize the subscriptions. - std::array sub{}; - TEST_ASSERT_EQUAL(0, udpardRxSubscriptionInit(&sub.at(0), 5000, 300, mem_rx)); - TEST_ASSERT_EQUAL(0, udpardRxSubscriptionInit(&sub.at(1), 5001, 200, mem_rx)); - TEST_ASSERT_EQUAL(0, udpardRxSubscriptionInit(&sub.at(2), 5002, 100, mem_rx)); - - // Publish something on subject 5000. - std::array transfer_id{}; - TEST_ASSERT_EQUAL(1, // Single-frame anonymous = success. - udpardTxPublish(&tx, - 10'000'000, - UdpardPrioritySlow, - 5000, - transfer_id.at(0)++, - makePayload("Last night, I had a dream."), - nullptr)); - const std::string_view Eden = - "After speaking with Scott, Lan Xi halted his busy work amid chaotic feelings, and stopped to think, as the " - "colonel had advised. Faster than he had imagined, Eden's cold, slippery vipers crawled into his " - "consciousness. He found the fruit of knowledge and ate it, and the last rays of sunshine in his soul " - "disappeared forever as everything plunged into darkness."; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ANONYMOUS, - udpardTxPublish(&tx, - 10'001'000, - UdpardPriorityNominal, - 5000, - transfer_id.at(0), - makePayload(Eden), - nullptr)); - node_id = 42; // Change the node-ID to allow multi-frame transfers, then try again. - TEST_ASSERT_EQUAL(4, - udpardTxPublish(&tx, - 10'002'000, - UdpardPriorityOptional, - 5000, - transfer_id.at(0)++, - makePayload(Eden), - nullptr)); - TEST_ASSERT_EQUAL(5, tx.queue_size); - - // Publish something on subject 5001. The priority here is higher so it should be delivered earlier. - node_id = 43; // Change the node-ID. - const std::string_view Later = "Two days later, the captain of Ultimate Law committed suicide."; - TEST_ASSERT_EQUAL(1, - udpardTxPublish(&tx, - 10'003'000, - UdpardPriorityNominal, - 5001, - transfer_id.at(1)++, - makePayload(Later), - nullptr)); - TEST_ASSERT_EQUAL(6, tx.queue_size); - - // Publish something on subject 5002. The priority here is the same. - const std::string_view Dark = "'Dark. It's so fucking dark,' the captain murmured, and then shot himself."; - TEST_ASSERT_EQUAL(1, - udpardTxPublish(&tx, - 10'004'000, - UdpardPriorityNominal, - 5002, - transfer_id.at(2)++, - makePayload(Dark), - nullptr)); - TEST_ASSERT_EQUAL(7, tx.queue_size); - TEST_ASSERT_EQUAL(7 * 2ULL, alloc_tx.allocated_fragments); - - // Transmit the enqueued frames by pushing them into the subscribers. - // Here we pop the frames one by one ensuring that they come out in the correct order. - UdpardRxTransfer transfer{}; - // First transfer. - TEST_ASSERT_EQUAL(0, alloc_rx_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments); - UdpardTxItem* tx_item = udpardTxPeek(&tx); - TEST_ASSERT_NOT_NULL(tx_item); - TEST_ASSERT_EQUAL(sub.at(1).udp_ip_endpoint.ip_address, tx_item->destination.ip_address); - TEST_ASSERT_NULL(tx_item->next_in_transfer); - TEST_ASSERT_EQUAL(10'003'000, tx_item->deadline_usec); - TEST_ASSERT_EQUAL(0xA4, tx_item->dscp); - TEST_ASSERT_EQUAL(1, - rxSubscriptionReceive(&sub.at(1), - alloc_rx_payload, - 10'005'000, - tx_item->datagram_payload, - 0, - &transfer)); - TEST_ASSERT_EQUAL(1, alloc_rx_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_rx_payload.allocated_fragments); - // Check the received transfer. - TEST_ASSERT_EQUAL(10'005'000, transfer.timestamp_usec); - TEST_ASSERT_EQUAL(UdpardPriorityNominal, transfer.priority); - TEST_ASSERT_EQUAL(43, transfer.source_node_id); - TEST_ASSERT_EQUAL(0, transfer.transfer_id); - TEST_ASSERT_EQUAL(Later.size(), transfer.payload_size); - TEST_ASSERT_EQUAL(Later.size(), transfer.payload.view.size); - TEST_ASSERT_EQUAL_MEMORY(Later.data(), transfer.payload.view.data, transfer.payload.view.size); - TEST_ASSERT_NULL(transfer.payload.next); - // Free the transfer payload. - udpardRxFragmentFree(transfer.payload, mem_rx.fragment, mem_rx.payload); - TEST_ASSERT_EQUAL(1, alloc_rx_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments); - // Send duplicates. - TEST_ASSERT_EQUAL(0, // Duplicate on same iface. - rxSubscriptionReceive(&sub.at(1), - alloc_rx_payload, - 10'005'100, - tx_item->datagram_payload, - 0, - &transfer)); - TEST_ASSERT_EQUAL(0, // Duplicate on another iface. - rxSubscriptionReceive(&sub.at(1), - alloc_rx_payload, - 10'005'200, - tx_item->datagram_payload, - 1, - &transfer)); - // Ensure the duplicates do no alter memory usage. - TEST_ASSERT_EQUAL(1, alloc_rx_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments); - // Free the TX item. - udpardTxFree(mem_tx, udpardTxPop(&tx, tx_item)); - TEST_ASSERT_EQUAL(6 * 2ULL, alloc_tx.allocated_fragments); - - // Second transfer. - tx_item = udpardTxPeek(&tx); - TEST_ASSERT_NOT_NULL(tx_item); - TEST_ASSERT_EQUAL(sub.at(2).udp_ip_endpoint.ip_address, tx_item->destination.ip_address); - TEST_ASSERT_NULL(tx_item->next_in_transfer); - TEST_ASSERT_EQUAL(10'004'000, tx_item->deadline_usec); - TEST_ASSERT_EQUAL(0xA4, tx_item->dscp); - TEST_ASSERT_EQUAL(1, - rxSubscriptionReceive(&sub.at(2), - alloc_rx_payload, - 10'006'000, - tx_item->datagram_payload, - 1, - &transfer)); - TEST_ASSERT_EQUAL(2, alloc_rx_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_rx_payload.allocated_fragments); - // Check the received transfer. - TEST_ASSERT_EQUAL(10'006'000, transfer.timestamp_usec); - TEST_ASSERT_EQUAL(UdpardPriorityNominal, transfer.priority); - TEST_ASSERT_EQUAL(43, transfer.source_node_id); - TEST_ASSERT_EQUAL(0, transfer.transfer_id); - TEST_ASSERT_EQUAL(Dark.size(), transfer.payload_size); - TEST_ASSERT_EQUAL(Dark.size(), transfer.payload.view.size); - TEST_ASSERT_EQUAL_MEMORY(Dark.data(), transfer.payload.view.data, transfer.payload.view.size); - TEST_ASSERT_NULL(transfer.payload.next); - // Free the transfer payload. - udpardRxFragmentFree(transfer.payload, mem_rx.fragment, mem_rx.payload); - TEST_ASSERT_EQUAL(2, alloc_rx_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments); - // Free the TX item. - udpardTxFree(mem_tx, udpardTxPop(&tx, tx_item)); - TEST_ASSERT_EQUAL(5 * 2ULL, alloc_tx.allocated_fragments); - - // Third transfer. This one is anonymous. - tx_item = udpardTxPeek(&tx); - TEST_ASSERT_NOT_NULL(tx_item); - TEST_ASSERT_EQUAL(sub.at(0).udp_ip_endpoint.ip_address, tx_item->destination.ip_address); - TEST_ASSERT_NULL(tx_item->next_in_transfer); - TEST_ASSERT_EQUAL(10'000'000, tx_item->deadline_usec); - TEST_ASSERT_EQUAL(0xA6, tx_item->dscp); - TEST_ASSERT_EQUAL(1, - rxSubscriptionReceive(&sub.at(0), - alloc_rx_payload, - 10'007'000, - tx_item->datagram_payload, - 2, - &transfer)); - TEST_ASSERT_EQUAL(2, alloc_rx_session.allocated_fragments); // No increment, anonymous transfers are stateless. - TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_rx_payload.allocated_fragments); - // Check the received transfer. - TEST_ASSERT_EQUAL(10'007'000, transfer.timestamp_usec); - TEST_ASSERT_EQUAL(UdpardPrioritySlow, transfer.priority); - TEST_ASSERT_EQUAL(UDPARD_NODE_ID_UNSET, transfer.source_node_id); - TEST_ASSERT_EQUAL(0, transfer.transfer_id); - TEST_ASSERT_EQUAL(26, transfer.payload_size); - TEST_ASSERT_EQUAL(26, transfer.payload.view.size); - TEST_ASSERT_EQUAL_MEMORY("Last night, I had a dream.", transfer.payload.view.data, transfer.payload.view.size); - TEST_ASSERT_NULL(transfer.payload.next); - // Free the transfer payload. - udpardRxFragmentFree(transfer.payload, mem_rx.fragment, mem_rx.payload); - TEST_ASSERT_EQUAL(2, alloc_rx_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments); - // Free the TX item. - udpardTxFree(mem_tx, udpardTxPop(&tx, tx_item)); - TEST_ASSERT_EQUAL(4 * 2ULL, alloc_tx.allocated_fragments); - - // Fourth transfer. This one contains multiple frames. We process them one-by-one. - // Frame #0. - tx_item = udpardTxPeek(&tx); - TEST_ASSERT_NOT_NULL(tx_item); - const UdpardTxItem* prev_next = tx_item->next_in_transfer; - TEST_ASSERT_NOT_NULL(prev_next); - TEST_ASSERT_EQUAL(sub.at(0).udp_ip_endpoint.ip_address, tx_item->destination.ip_address); - TEST_ASSERT_EQUAL(10'002'000, tx_item->deadline_usec); - TEST_ASSERT_EQUAL(0xA7, tx_item->dscp); - TEST_ASSERT_EQUAL(0, - rxSubscriptionReceive(&sub.at(0), - alloc_rx_payload, - 10'008'000, - tx_item->datagram_payload, - 0, - &transfer)); - TEST_ASSERT_EQUAL(3, alloc_rx_session.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_rx_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_rx_payload.allocated_fragments); - // Free the TX item. - udpardTxFree(mem_tx, udpardTxPop(&tx, tx_item)); - TEST_ASSERT_EQUAL(3 * 2ULL, alloc_tx.allocated_fragments); - // Frame #1. - tx_item = udpardTxPeek(&tx); - TEST_ASSERT_NOT_NULL(tx_item); - TEST_ASSERT_EQUAL_PTR(prev_next, tx_item); - prev_next = tx_item->next_in_transfer; - TEST_ASSERT_NOT_NULL(prev_next); - TEST_ASSERT_EQUAL(sub.at(0).udp_ip_endpoint.ip_address, tx_item->destination.ip_address); - TEST_ASSERT_EQUAL(10'002'000, tx_item->deadline_usec); - TEST_ASSERT_EQUAL(0xA7, tx_item->dscp); - TEST_ASSERT_EQUAL(0, - rxSubscriptionReceive(&sub.at(0), - alloc_rx_payload, - 10'008'001, - tx_item->datagram_payload, - 0, - &transfer)); - TEST_ASSERT_EQUAL(3, alloc_rx_session.allocated_fragments); - TEST_ASSERT_EQUAL(2, alloc_rx_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(2, alloc_rx_payload.allocated_fragments); - // Free the TX item. - udpardTxFree(mem_tx, udpardTxPop(&tx, tx_item)); - TEST_ASSERT_EQUAL(2 * 2ULL, alloc_tx.allocated_fragments); - // Frame #2. - tx_item = udpardTxPeek(&tx); - TEST_ASSERT_NOT_NULL(tx_item); - TEST_ASSERT_EQUAL_PTR(prev_next, tx_item); - prev_next = tx_item->next_in_transfer; - TEST_ASSERT_NOT_NULL(prev_next); - TEST_ASSERT_EQUAL(sub.at(0).udp_ip_endpoint.ip_address, tx_item->destination.ip_address); - TEST_ASSERT_EQUAL(10'002'000, tx_item->deadline_usec); - TEST_ASSERT_EQUAL(0xA7, tx_item->dscp); - TEST_ASSERT_EQUAL(0, - rxSubscriptionReceive(&sub.at(0), - alloc_rx_payload, - 10'008'002, - tx_item->datagram_payload, - 0, - &transfer)); - TEST_ASSERT_EQUAL(3, alloc_rx_session.allocated_fragments); - TEST_ASSERT_EQUAL(3, alloc_rx_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(3, alloc_rx_payload.allocated_fragments); - // Free the TX item. - udpardTxFree(mem_tx, udpardTxPop(&tx, tx_item)); - TEST_ASSERT_EQUAL(1 * 2ULL, alloc_tx.allocated_fragments); - // Frame #3. This is the last frame of the transfer. The payload is truncated, see the extent. - tx_item = udpardTxPeek(&tx); - TEST_ASSERT_NOT_NULL(tx_item); - TEST_ASSERT_EQUAL_PTR(prev_next, tx_item); - prev_next = tx_item->next_in_transfer; - TEST_ASSERT_NULL(prev_next); - TEST_ASSERT_EQUAL(sub.at(0).udp_ip_endpoint.ip_address, tx_item->destination.ip_address); - TEST_ASSERT_EQUAL(10'002'000, tx_item->deadline_usec); - TEST_ASSERT_EQUAL(0xA7, tx_item->dscp); - TEST_ASSERT_EQUAL(1, - rxSubscriptionReceive(&sub.at(0), - alloc_rx_payload, - 10'008'003, - tx_item->datagram_payload, - 0, - &transfer)); - TEST_ASSERT_EQUAL(3, alloc_rx_session.allocated_fragments); - TEST_ASSERT_EQUAL(2, alloc_rx_fragment.allocated_fragments); // Extent truncation + head optimization. - TEST_ASSERT_EQUAL(3, alloc_rx_payload.allocated_fragments); // Extent truncation. - // Check the received transfer. - TEST_ASSERT_EQUAL(10'008'000, transfer.timestamp_usec); - TEST_ASSERT_EQUAL(UdpardPriorityOptional, transfer.priority); - TEST_ASSERT_EQUAL(42, transfer.source_node_id); - TEST_ASSERT_EQUAL(1, transfer.transfer_id); - TEST_ASSERT_EQUAL(300, transfer.payload_size); // Defined by the configured extent setting for this sub. - TEST_ASSERT_EQUAL(100, transfer.payload.view.size); // Defined by the MTU setting. - std::array rx_eden{}; - TEST_ASSERT_EQUAL(300, udpardGather(transfer.payload, rx_eden.size(), rx_eden.data())); - TEST_ASSERT_EQUAL_MEMORY(Eden.data(), rx_eden.data(), 300); - // Free the transfer payload. - udpardRxFragmentFree(transfer.payload, mem_rx.fragment, mem_rx.payload); - TEST_ASSERT_EQUAL(3, alloc_rx_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments); - // Free the TX item. - udpardTxFree(mem_tx, udpardTxPop(&tx, tx_item)); - TEST_ASSERT_EQUAL(0, alloc_tx.allocated_fragments); - - // Close the subscriptions and ensure the memory is freed. - udpardRxSubscriptionFree(&sub.at(0)); - udpardRxSubscriptionFree(&sub.at(1)); - udpardRxSubscriptionFree(&sub.at(2)); - - // Final memory check. - TEST_ASSERT_EQUAL(0, alloc_rx_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_tx.allocated_fragments); -} - -void testRPC() -{ - InstrumentedAllocator alloc_tx; - InstrumentedAllocator alloc_rx_session; - InstrumentedAllocator alloc_rx_fragment; - InstrumentedAllocator alloc_rx_payload; - instrumentedAllocatorNew(&alloc_tx); - instrumentedAllocatorNew(&alloc_rx_session); - instrumentedAllocatorNew(&alloc_rx_fragment); - instrumentedAllocatorNew(&alloc_rx_payload); - const UdpardTxMemoryResources mem_tx{ - .fragment = instrumentedAllocatorMakeMemoryResource(&alloc_tx), - .payload = instrumentedAllocatorMakeMemoryResource(&alloc_tx), - }; - const UdpardRxMemoryResources mem_rx{ - .session = instrumentedAllocatorMakeMemoryResource(&alloc_rx_session), - .fragment = instrumentedAllocatorMakeMemoryResource(&alloc_rx_fragment), - .payload = instrumentedAllocatorMakeMemoryDeleter(&alloc_rx_payload), - }; - // Initialize the TX pipeline. - UdpardTx tx{}; - const UdpardNodeID tx_node_id = 1234; - TEST_ASSERT_EQUAL(0, udpardTxInit(&tx, &tx_node_id, 2, mem_tx)); - tx.mtu = 500; - for (auto i = 0U; i <= UDPARD_PRIORITY_MAX; i++) - { - tx.dscp_value_per_priority[i] = static_cast(0xA0U + i); - } - // Initialize the RPC dispatcher and the RPC services. - UdpardRxRPCDispatcher dispatcher{}; - TEST_ASSERT_EQUAL(0, udpardRxRPCDispatcherInit(&dispatcher, mem_rx)); - UdpardUDPIPEndpoint udp_ip_endpoint{}; - TEST_ASSERT_EQUAL(0, udpardRxRPCDispatcherStart(&dispatcher, 4321, &udp_ip_endpoint)); - UdpardRxRPCPort port_foo_a{}; - UdpardRxRPCPort port_foo_q{}; - TEST_ASSERT_EQUAL(1, udpardRxRPCDispatcherListen(&dispatcher, &port_foo_a, 200, false, 500)); - TEST_ASSERT_EQUAL(1, udpardRxRPCDispatcherListen(&dispatcher, &port_foo_q, 200, true, 500)); - - // Send a request. - UdpardTransferID transfer_id_shared = 0; - const std::string_view Entry = "But this simple world held a perplexing riddle: The entire galaxy was a vast " - "empty desert, but a highly intelligent civilization had appeared on the star " - "nearest to us. In this mystery, his thoughts found an entry point."; - TEST_ASSERT_EQUAL_INT32(1, - udpardTxRequest(&tx, - 10'000'000, - UdpardPriorityFast, - 200, - 4321, - transfer_id_shared++, - makePayload(Entry), - nullptr)); - TEST_ASSERT_EQUAL(1, tx.queue_size); - TEST_ASSERT_EQUAL(1, transfer_id_shared); - - // Send a response. - const std::string_view Forest = "In the dead, lonely, cold blackness, he saw the truth of the universe."; - TEST_ASSERT_EQUAL_INT32(1, - udpardTxRespond(&tx, - 10'001'000, - UdpardPriorityImmediate, - 200, - 4321, - transfer_id_shared, - makePayload(Forest), - nullptr)); - TEST_ASSERT_EQUAL(2, tx.queue_size); - - // Transmit the enqueued frames by pushing them into the RPC dispatcher. - UdpardRxRPCTransfer transfer{}; - UdpardRxRPCPort* active_port = nullptr; - // First transfer. - TEST_ASSERT_EQUAL(0, alloc_rx_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments); - UdpardTxItem* tx_item = udpardTxPeek(&tx); - TEST_ASSERT_NOT_NULL(tx_item); - TEST_ASSERT_EQUAL(udp_ip_endpoint.ip_address, tx_item->destination.ip_address); - TEST_ASSERT_NULL(tx_item->next_in_transfer); - TEST_ASSERT_EQUAL(10'001'000, tx_item->deadline_usec); - TEST_ASSERT_EQUAL(0xA1, tx_item->dscp); - TEST_ASSERT_EQUAL(1, - rxRPCDispatcherReceive(&dispatcher, - alloc_rx_payload, - 10'000'000, - tx_item->datagram_payload, - 0, - &active_port, - &transfer)); - TEST_ASSERT_EQUAL(1, alloc_rx_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_rx_payload.allocated_fragments); - // Check the received transfer. - TEST_ASSERT_EQUAL(&port_foo_a, active_port); - TEST_ASSERT_EQUAL(200, transfer.service_id); - TEST_ASSERT_EQUAL(false, transfer.is_request); - TEST_ASSERT_EQUAL(10'000'000, transfer.base.timestamp_usec); - TEST_ASSERT_EQUAL(UdpardPriorityImmediate, transfer.base.priority); - TEST_ASSERT_EQUAL(1234, transfer.base.source_node_id); - TEST_ASSERT_EQUAL(1, transfer.base.transfer_id); - TEST_ASSERT_EQUAL(Forest.size(), transfer.base.payload_size); - TEST_ASSERT_EQUAL(Forest.size(), transfer.base.payload.view.size); - TEST_ASSERT_EQUAL_MEMORY(Forest.data(), transfer.base.payload.view.data, transfer.base.payload.view.size); - TEST_ASSERT_NULL(transfer.base.payload.next); - // Free the transfer payload. - udpardRxFragmentFree(transfer.base.payload, mem_rx.fragment, mem_rx.payload); - TEST_ASSERT_EQUAL(1, alloc_rx_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments); - // Send duplicates. - TEST_ASSERT_EQUAL(0, // Duplicate on the same iface. - rxRPCDispatcherReceive(&dispatcher, - alloc_rx_payload, - 10'000'100, - tx_item->datagram_payload, - 0, - &active_port, - &transfer)); - TEST_ASSERT_EQUAL(0, // Duplicate on another iface. - rxRPCDispatcherReceive(&dispatcher, - alloc_rx_payload, - 10'000'200, - tx_item->datagram_payload, - 2, - &active_port, - &transfer)); - // Ensure the duplicates do no alter memory usage. - TEST_ASSERT_EQUAL(1, alloc_rx_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments); - // Free the TX item. - udpardTxFree(mem_tx, udpardTxPop(&tx, tx_item)); - TEST_ASSERT_EQUAL(1 * 2ULL, alloc_tx.allocated_fragments); - - // Second transfer. - tx_item = udpardTxPeek(&tx); - TEST_ASSERT_NOT_NULL(tx_item); - TEST_ASSERT_EQUAL(udp_ip_endpoint.ip_address, tx_item->destination.ip_address); - TEST_ASSERT_NULL(tx_item->next_in_transfer); - TEST_ASSERT_EQUAL(10'000'000, tx_item->deadline_usec); - TEST_ASSERT_EQUAL(0xA2, tx_item->dscp); - TEST_ASSERT_EQUAL(1, - rxRPCDispatcherReceive(&dispatcher, - alloc_rx_payload, - 10'001'000, - tx_item->datagram_payload, - 1, - &active_port, - &transfer)); - TEST_ASSERT_EQUAL(2, alloc_rx_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_rx_payload.allocated_fragments); - // Check the received transfer. - TEST_ASSERT_EQUAL(&port_foo_q, active_port); - TEST_ASSERT_EQUAL(200, transfer.service_id); - TEST_ASSERT_EQUAL(true, transfer.is_request); - TEST_ASSERT_EQUAL(10'001'000, transfer.base.timestamp_usec); - TEST_ASSERT_EQUAL(UdpardPriorityFast, transfer.base.priority); - TEST_ASSERT_EQUAL(1234, transfer.base.source_node_id); - TEST_ASSERT_EQUAL(0, transfer.base.transfer_id); - TEST_ASSERT_EQUAL(Entry.size(), transfer.base.payload_size); - TEST_ASSERT_EQUAL(Entry.size(), transfer.base.payload.view.size); - TEST_ASSERT_EQUAL_MEMORY(Entry.data(), transfer.base.payload.view.data, transfer.base.payload.view.size); - TEST_ASSERT_NULL(transfer.base.payload.next); - // Free the transfer payload. - udpardRxFragmentFree(transfer.base.payload, mem_rx.fragment, mem_rx.payload); - TEST_ASSERT_EQUAL(2, alloc_rx_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments); - // Send duplicates. - TEST_ASSERT_EQUAL(0, // Duplicate on the same iface. - rxRPCDispatcherReceive(&dispatcher, - alloc_rx_payload, - 10'001'100, - tx_item->datagram_payload, - 0, - &active_port, - &transfer)); - TEST_ASSERT_EQUAL(0, // Duplicate on another iface. - rxRPCDispatcherReceive(&dispatcher, - alloc_rx_payload, - 10'001'200, - tx_item->datagram_payload, - 2, - &active_port, - &transfer)); - // Ensure the duplicates do no alter memory usage. - TEST_ASSERT_EQUAL(2, alloc_rx_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments); - // Free the TX item. - udpardTxFree(mem_tx, udpardTxPop(&tx, tx_item)); - TEST_ASSERT_EQUAL(0, alloc_tx.allocated_fragments); - - // Destroy the ports. - udpardRxRPCDispatcherCancel(&dispatcher, 200, false); - udpardRxRPCDispatcherCancel(&dispatcher, 200, true); - - // Final memory check. - TEST_ASSERT_EQUAL(0, alloc_rx_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_rx_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_tx.allocated_fragments); -} - -} // namespace - -void setUp() {} - -void tearDown() {} - -int main() -{ - UNITY_BEGIN(); - RUN_TEST(testPubSub); - RUN_TEST(testRPC); - return UNITY_END(); -} diff --git a/tests/src/test_e2e_api.cpp b/tests/src/test_e2e_api.cpp new file mode 100644 index 0000000..520cb4e --- /dev/null +++ b/tests/src/test_e2e_api.cpp @@ -0,0 +1,450 @@ +/// This software is distributed under the terms of the MIT License. +/// Copyright (C) OpenCyphal Development Team +/// Copyright Amazon.com Inc. or its affiliates. +/// SPDX-License-Identifier: MIT + +// ReSharper disable CppPassValueParameterByConstReference + +#include +#include "helpers.h" +#include +#include +#include + +namespace { + +struct CapturedFrame +{ + udpard_bytes_mut_t datagram; + uint_fast8_t iface_index; +}; + +struct FeedbackState +{ + size_t count = 0; + uint16_t acknowledgements = 0; +}; + +struct RxContext +{ + std::vector expected; + std::array sources{}; + uint64_t remote_uid = 0; + size_t received = 0; + size_t collisions = 0; +}; + +// Refcount helpers keep captured datagrams alive. +void tx_refcount_free(void* const user, const size_t size, void* const payload) +{ + (void)user; + udpard_tx_refcount_dec(udpard_bytes_t{ .size = size, .data = payload }); +} + +// Shared deleter for captured TX frames. +constexpr udpard_deleter_vtable_t tx_refcount_deleter_vt{ .free = &tx_refcount_free }; + +bool capture_tx_frame_impl(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) +{ + auto* frames = static_cast*>(tx->user); + if (frames == nullptr) { + return false; + } + udpard_tx_refcount_inc(ejection->datagram); + void* const data = const_cast(ejection->datagram.data); // NOLINT(cppcoreguidelines-pro-type-const-cast) + frames->push_back(CapturedFrame{ .datagram = { .size = ejection->datagram.size, .data = data }, + .iface_index = ejection->iface_index }); + return true; +} + +bool capture_tx_frame_subject(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) +{ + return capture_tx_frame_impl(tx, ejection); +} + +bool capture_tx_frame_p2p(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection, udpard_udpip_ep_t /*dest*/) +{ + return capture_tx_frame_impl(tx, ejection); +} + +void drop_frame(const CapturedFrame& frame) +{ + udpard_tx_refcount_dec(udpard_bytes_t{ .size = frame.datagram.size, .data = frame.datagram.data }); +} + +void fill_random(std::vector& data) +{ + for (auto& byte : data) { + byte = static_cast(rand()) & 0xFFU; + } +} + +constexpr udpard_tx_vtable_t tx_vtable{ .eject_subject = &capture_tx_frame_subject, + .eject_p2p = &capture_tx_frame_p2p }; + +// Feedback callback records completion. +void record_feedback(udpard_tx_t*, const udpard_tx_feedback_t fb) +{ + auto* st = static_cast(fb.user.ptr[0]); + if (st != nullptr) { + st->count++; + st->acknowledgements = fb.acknowledgements; + } +} + +// RX callbacks validate payload and sender. +void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) +{ + auto* ctx = static_cast(rx->user); + TEST_ASSERT_EQUAL_UINT64(ctx->remote_uid, transfer.remote.uid); + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + if ((transfer.remote.endpoints[i].ip != 0U) || (transfer.remote.endpoints[i].port != 0U)) { + TEST_ASSERT_EQUAL_UINT32(ctx->sources[i].ip, transfer.remote.endpoints[i].ip); + TEST_ASSERT_EQUAL_UINT16(ctx->sources[i].port, transfer.remote.endpoints[i].port); + } + } + std::vector assembled(transfer.payload_size_stored); + const udpard_fragment_t* cursor = transfer.payload; + const size_t gathered = udpard_fragment_gather(&cursor, 0, transfer.payload_size_stored, assembled.data()); + TEST_ASSERT_EQUAL_size_t(transfer.payload_size_stored, gathered); + TEST_ASSERT_EQUAL_size_t(ctx->expected.size(), transfer.payload_size_wire); + if (!ctx->expected.empty()) { + TEST_ASSERT_EQUAL_MEMORY(ctx->expected.data(), assembled.data(), transfer.payload_size_stored); + } + udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); + ctx->received++; +} + +void on_collision(udpard_rx_t* const rx, udpard_rx_port_t* const /*port*/, const udpard_remote_t /*remote*/) +{ + auto* ctx = static_cast(rx->user); + ctx->collisions++; +} +constexpr udpard_rx_port_vtable_t callbacks{ .on_message = &on_message, .on_collision = &on_collision }; + +// Ack port frees responses. +void on_ack_response(udpard_rx_t*, udpard_rx_port_t* port, const udpard_rx_transfer_t tr) +{ + udpard_fragment_free_all(tr.payload, udpard_make_deleter(port->memory.fragment)); +} +constexpr udpard_rx_port_vtable_t ack_callbacks{ .on_message = &on_ack_response, .on_collision = &on_collision }; + +// Reliable delivery must survive data and ack loss. +// Each node uses exactly one TX and one RX instance as per the library design. +void test_reliable_delivery_under_losses() +{ + seed_prng(); + + // Allocators - one TX and one RX per node. + // Publisher node allocators. + instrumented_allocator_t pub_tx_alloc_transfer{}; + instrumented_allocator_t pub_tx_alloc_payload{}; + instrumented_allocator_t pub_rx_alloc_frag{}; + instrumented_allocator_t pub_rx_alloc_session{}; + instrumented_allocator_new(&pub_tx_alloc_transfer); + instrumented_allocator_new(&pub_tx_alloc_payload); + instrumented_allocator_new(&pub_rx_alloc_frag); + instrumented_allocator_new(&pub_rx_alloc_session); + + // Subscriber node allocators. + instrumented_allocator_t sub_tx_alloc_transfer{}; + instrumented_allocator_t sub_tx_alloc_payload{}; + instrumented_allocator_t sub_rx_alloc_frag{}; + instrumented_allocator_t sub_rx_alloc_session{}; + instrumented_allocator_new(&sub_tx_alloc_transfer); + instrumented_allocator_new(&sub_tx_alloc_payload); + instrumented_allocator_new(&sub_rx_alloc_frag); + instrumented_allocator_new(&sub_rx_alloc_session); + + // Memory resources. + udpard_tx_mem_resources_t pub_tx_mem{}; + pub_tx_mem.transfer = instrumented_allocator_make_resource(&pub_tx_alloc_transfer); + for (auto& res : pub_tx_mem.payload) { + res = instrumented_allocator_make_resource(&pub_tx_alloc_payload); + } + const udpard_rx_mem_resources_t pub_rx_mem{ .session = instrumented_allocator_make_resource(&pub_rx_alloc_session), + .fragment = instrumented_allocator_make_resource(&pub_rx_alloc_frag) }; + + udpard_tx_mem_resources_t sub_tx_mem{}; + sub_tx_mem.transfer = instrumented_allocator_make_resource(&sub_tx_alloc_transfer); + for (auto& res : sub_tx_mem.payload) { + res = instrumented_allocator_make_resource(&sub_tx_alloc_payload); + } + const udpard_rx_mem_resources_t sub_rx_mem{ .session = instrumented_allocator_make_resource(&sub_rx_alloc_session), + .fragment = instrumented_allocator_make_resource(&sub_rx_alloc_frag) }; + + // Publisher node: single TX, single RX (linked to TX for ACK processing). + constexpr uint64_t pub_uid = 0x1111222233334444ULL; + udpard_tx_t pub_tx{}; + std::vector pub_frames; + TEST_ASSERT_TRUE(udpard_tx_new(&pub_tx, pub_uid, 10U, 64, pub_tx_mem, &tx_vtable)); + pub_tx.user = &pub_frames; + pub_tx.ack_baseline_timeout = 8000; + + udpard_rx_t pub_rx{}; + udpard_rx_new(&pub_rx, &pub_tx); + udpard_rx_port_t pub_p2p_port{}; + TEST_ASSERT_TRUE( + udpard_rx_port_new(&pub_p2p_port, pub_uid, 16, udpard_rx_unordered, 0, pub_rx_mem, &ack_callbacks)); + + // Subscriber node: single TX, single RX (linked to TX for sending ACKs). + constexpr uint64_t sub_uid = 0xABCDEF0012345678ULL; + udpard_tx_t sub_tx{}; + std::vector sub_frames; + TEST_ASSERT_TRUE(udpard_tx_new(&sub_tx, sub_uid, 77U, 8, sub_tx_mem, &tx_vtable)); + sub_tx.user = &sub_frames; + + udpard_rx_t sub_rx{}; + udpard_rx_new(&sub_rx, &sub_tx); + udpard_rx_port_t sub_port{}; + const uint64_t topic_hash = 0x0123456789ABCDEFULL; + TEST_ASSERT_TRUE(udpard_rx_port_new(&sub_port, topic_hash, 6000, udpard_rx_unordered, 0, sub_rx_mem, &callbacks)); + + // Endpoints. + const std::array publisher_sources{ + udpard_udpip_ep_t{ .ip = 0x0A000001U, .port = 7400U }, + udpard_udpip_ep_t{ .ip = 0x0A000002U, .port = 7401U }, + udpard_udpip_ep_t{ .ip = 0x0A000003U, .port = 7402U }, + }; + const std::array subscriber_sources{ + udpard_udpip_ep_t{ .ip = 0x0A000010U, .port = 7600U }, + udpard_udpip_ep_t{ .ip = 0x0A000011U, .port = 7601U }, + udpard_udpip_ep_t{ .ip = 0x0A000012U, .port = 7602U }, + }; + // Payload and context. + std::vector payload(4096); + fill_random(payload); + RxContext ctx{}; + ctx.expected = payload; + ctx.sources = publisher_sources; + ctx.remote_uid = pub_uid; + sub_rx.user = &ctx; + + // Reliable transfer with staged losses. + FeedbackState fb{}; + const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); + pub_tx.mtu[0] = 600; + pub_tx.mtu[1] = 900; + pub_tx.mtu[2] = 500; + const udpard_us_t start = 0; + const udpard_us_t deadline = start + 200000; + const uint16_t iface_bitmap_all = UDPARD_IFACE_BITMAP_ALL; + const udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr }; + TEST_ASSERT_TRUE(udpard_tx_push(&pub_tx, + start, + deadline, + iface_bitmap_all, + udpard_prio_fast, + topic_hash, + 1U, + payload_view, + &record_feedback, + make_user_context(&fb))); + + // Send until acked; drop first data frame and first ack. + bool first_round = true; + udpard_us_t now = start; + size_t attempts = 0; + const size_t attempt_cap = 6; + while ((fb.count == 0) && (attempts < attempt_cap)) { + // Publisher transmits topic message. + pub_frames.clear(); + udpard_tx_poll(&pub_tx, now, UDPARD_IFACE_BITMAP_ALL); + bool data_loss_done = false; + for (const auto& frame : pub_frames) { + const bool drop = first_round && !data_loss_done && (frame.iface_index == 1U); + if (drop) { + drop_frame(frame); + data_loss_done = true; + continue; + } + TEST_ASSERT_TRUE(udpard_rx_port_push(&sub_rx, + &sub_port, + now, + publisher_sources[frame.iface_index], + frame.datagram, + tx_payload_deleter, + frame.iface_index)); + } + udpard_rx_poll(&sub_rx, now); + + // Subscriber transmits ACKs (via sub_tx since sub_rx is linked to it). + sub_frames.clear(); + udpard_tx_poll(&sub_tx, now, UDPARD_IFACE_BITMAP_ALL); + bool ack_sent = false; + for (const auto& ack : sub_frames) { + const bool drop_ack = first_round && !ack_sent; + if (drop_ack) { + drop_frame(ack); + continue; + } + ack_sent = true; + TEST_ASSERT_TRUE(udpard_rx_port_push(&pub_rx, + &pub_p2p_port, + now, + subscriber_sources[ack.iface_index], + ack.datagram, + tx_payload_deleter, + ack.iface_index)); + } + udpard_rx_poll(&pub_rx, now); + first_round = false; + attempts++; + now += pub_tx.ack_baseline_timeout + 5000; + } + + TEST_ASSERT_EQUAL_size_t(1, fb.count); + TEST_ASSERT_EQUAL_UINT32(1, fb.acknowledgements); + TEST_ASSERT_EQUAL_size_t(1, ctx.received); + TEST_ASSERT_EQUAL_size_t(0, ctx.collisions); + + // Cleanup. + udpard_rx_port_free(&sub_rx, &sub_port); + udpard_rx_port_free(&pub_rx, &pub_p2p_port); + udpard_tx_free(&pub_tx); + udpard_tx_free(&sub_tx); + + TEST_ASSERT_EQUAL_size_t(0, pub_tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, pub_tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, pub_rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, pub_rx_alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, sub_tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, sub_tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, sub_rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, sub_rx_alloc_session.allocated_fragments); + + instrumented_allocator_reset(&pub_tx_alloc_transfer); + instrumented_allocator_reset(&pub_tx_alloc_payload); + instrumented_allocator_reset(&pub_rx_alloc_frag); + instrumented_allocator_reset(&pub_rx_alloc_session); + instrumented_allocator_reset(&sub_tx_alloc_transfer); + instrumented_allocator_reset(&sub_tx_alloc_payload); + instrumented_allocator_reset(&sub_rx_alloc_frag); + instrumented_allocator_reset(&sub_rx_alloc_session); +} + +// Counters must reflect expired deliveries and ack failures. +void test_reliable_stats_and_failures() +{ + seed_prng(); + + // Expiration path. + instrumented_allocator_t exp_alloc_transfer{}; + instrumented_allocator_t exp_alloc_payload{}; + instrumented_allocator_new(&exp_alloc_transfer); + instrumented_allocator_new(&exp_alloc_payload); + udpard_tx_mem_resources_t exp_mem{}; + exp_mem.transfer = instrumented_allocator_make_resource(&exp_alloc_transfer); + for (auto& res : exp_mem.payload) { + res = instrumented_allocator_make_resource(&exp_alloc_payload); + } + udpard_tx_t exp_tx{}; + std::vector exp_frames; + TEST_ASSERT_TRUE(udpard_tx_new(&exp_tx, 0x9999000011112222ULL, 2U, 4, exp_mem, &tx_vtable)); + exp_tx.user = &exp_frames; + FeedbackState fb_fail{}; + const uint16_t iface_bitmap_1 = (1U << 0U); + const udpard_bytes_scattered_t exp_payload = make_scattered("ping", 4); + TEST_ASSERT_TRUE(udpard_tx_push(&exp_tx, + 0, + 10, + iface_bitmap_1, + udpard_prio_fast, + 0xABCULL, + 5U, + exp_payload, + &record_feedback, + make_user_context(&fb_fail))); + udpard_tx_poll(&exp_tx, 0, UDPARD_IFACE_BITMAP_ALL); + for (const auto& f : exp_frames) { + drop_frame(f); + } + exp_frames.clear(); + udpard_tx_poll(&exp_tx, 20, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_EQUAL_size_t(1, fb_fail.count); + TEST_ASSERT_EQUAL_UINT32(0, fb_fail.acknowledgements); + TEST_ASSERT_GREATER_THAN_UINT64(0, exp_tx.errors_expiration); + udpard_tx_free(&exp_tx); + TEST_ASSERT_EQUAL_size_t(0, exp_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, exp_alloc_payload.allocated_fragments); + instrumented_allocator_reset(&exp_alloc_transfer); + instrumented_allocator_reset(&exp_alloc_payload); + + // Ack push failure increments counters. + instrumented_allocator_t rx_alloc_frag{}; + instrumented_allocator_t rx_alloc_session{}; + instrumented_allocator_t src_alloc_transfer{}; + instrumented_allocator_t src_alloc_payload{}; + instrumented_allocator_new(&rx_alloc_frag); + instrumented_allocator_new(&rx_alloc_session); + instrumented_allocator_new(&src_alloc_transfer); + instrumented_allocator_new(&src_alloc_payload); + const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), + .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; + udpard_tx_mem_resources_t src_mem{}; + src_mem.transfer = instrumented_allocator_make_resource(&src_alloc_transfer); + for (auto& res : src_mem.payload) { + res = instrumented_allocator_make_resource(&src_alloc_payload); + } + + udpard_tx_t src_tx{}; + std::vector src_frames; + TEST_ASSERT_TRUE(udpard_tx_new(&src_tx, 0x5555AAAABBBBCCCCULL, 3U, 4, src_mem, &tx_vtable)); + src_tx.user = &src_frames; + udpard_rx_t rx{}; + udpard_rx_port_t port{}; + RxContext ctx{}; + ctx.remote_uid = src_tx.local_uid; + ctx.sources = { udpard_udpip_ep_t{ .ip = 0x0A000021U, .port = 7700U }, udpard_udpip_ep_t{}, udpard_udpip_ep_t{} }; + ctx.expected.assign({ 1U, 2U, 3U, 4U }); + udpard_rx_new(&rx, nullptr); + rx.user = &ctx; + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 0x12340000ULL, 64, udpard_rx_unordered, 0, rx_mem, &callbacks)); + + const udpard_bytes_scattered_t src_payload = make_scattered(ctx.expected.data(), ctx.expected.size()); + FeedbackState fb_ignore{}; + TEST_ASSERT_TRUE(udpard_tx_push(&src_tx, + 0, + 1000, + iface_bitmap_1, + udpard_prio_fast, + port.topic_hash, + 7U, + src_payload, + &record_feedback, + make_user_context(&fb_ignore))); + udpard_tx_poll(&src_tx, 0, UDPARD_IFACE_BITMAP_ALL); + const udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr }; + for (const auto& f : src_frames) { + TEST_ASSERT_TRUE(udpard_rx_port_push( + &rx, &port, 0, ctx.sources[f.iface_index], f.datagram, tx_payload_deleter, f.iface_index)); + } + udpard_rx_poll(&rx, 0); + TEST_ASSERT_GREATER_THAN_UINT64(0, rx.errors_ack_tx); + TEST_ASSERT_EQUAL_size_t(1, ctx.received); + + udpard_rx_port_free(&rx, &port); + udpard_tx_free(&src_tx); + TEST_ASSERT_EQUAL_size_t(0, rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, rx_alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, src_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, src_alloc_payload.allocated_fragments); + instrumented_allocator_reset(&rx_alloc_frag); + instrumented_allocator_reset(&rx_alloc_session); + instrumented_allocator_reset(&src_alloc_transfer); + instrumented_allocator_reset(&src_alloc_payload); +} + +} // namespace + +extern "C" void setUp() {} + +extern "C" void tearDown() {} + +int main() +{ + UNITY_BEGIN(); + RUN_TEST(test_reliable_delivery_under_losses); + RUN_TEST(test_reliable_stats_and_failures); + return UNITY_END(); +} diff --git a/tests/src/test_e2e_edge.cpp b/tests/src/test_e2e_edge.cpp new file mode 100644 index 0000000..c161a54 --- /dev/null +++ b/tests/src/test_e2e_edge.cpp @@ -0,0 +1,915 @@ +/// This software is distributed under the terms of the MIT License. +/// Copyright (C) OpenCyphal Development Team +/// Copyright Amazon.com Inc. or its affiliates. +/// SPDX-License-Identifier: MIT + +// ReSharper disable CppPassValueParameterByConstReference + +#include +#include "helpers.h" +#include +#include +#include + +namespace { + +void on_message(udpard_rx_t* rx, udpard_rx_port_t* port, udpard_rx_transfer_t transfer); +void on_collision(udpard_rx_t* rx, udpard_rx_port_t* port, udpard_remote_t remote); +constexpr udpard_rx_port_vtable_t callbacks{ .on_message = &on_message, .on_collision = &on_collision }; + +struct FbState +{ + size_t count = 0; + uint16_t acknowledgements = 0; +}; + +struct CapturedFrame +{ + udpard_bytes_mut_t datagram; + uint_fast8_t iface_index; +}; + +void tx_refcount_free(void* const user, const size_t size, void* const payload) +{ + (void)user; + udpard_tx_refcount_dec(udpard_bytes_t{ .size = size, .data = payload }); +} + +// Shared deleter for captured TX frames. +constexpr udpard_deleter_vtable_t tx_refcount_deleter_vt{ .free = &tx_refcount_free }; + +bool capture_tx_frame_impl(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) +{ + auto* frames = static_cast*>(tx->user); + if (frames == nullptr) { + return false; + } + udpard_tx_refcount_inc(ejection->datagram); + void* const data = const_cast(ejection->datagram.data); // NOLINT(cppcoreguidelines-pro-type-const-cast) + frames->push_back(CapturedFrame{ .datagram = { .size = ejection->datagram.size, .data = data }, + .iface_index = ejection->iface_index }); + return true; +} + +bool capture_tx_frame_subject(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) +{ + return capture_tx_frame_impl(tx, ejection); +} + +bool capture_tx_frame_p2p(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection, udpard_udpip_ep_t /*dest*/) +{ + return capture_tx_frame_impl(tx, ejection); +} + +constexpr udpard_tx_vtable_t tx_vtable{ .eject_subject = &capture_tx_frame_subject, + .eject_p2p = &capture_tx_frame_p2p }; + +void fb_record(udpard_tx_t*, const udpard_tx_feedback_t fb) +{ + auto* st = static_cast(fb.user.ptr[0]); + if (st != nullptr) { + st->count++; + st->acknowledgements = fb.acknowledgements; + } +} + +void release_frames(std::vector& frames) +{ + for (const auto& [datagram, iface_index] : frames) { + udpard_tx_refcount_dec(udpard_bytes_t{ .size = datagram.size, .data = datagram.data }); + } + frames.clear(); +} + +struct Context +{ + std::vector ids; + size_t collisions = 0; + uint64_t expected_uid = 0; + udpard_udpip_ep_t source{}; +}; + +struct Fixture +{ + instrumented_allocator_t tx_alloc_transfer{}; + instrumented_allocator_t tx_alloc_payload{}; + instrumented_allocator_t rx_alloc_frag{}; + instrumented_allocator_t rx_alloc_session{}; + udpard_tx_t tx{}; + udpard_rx_t rx{}; + udpard_rx_port_t port{}; + udpard_deleter_t tx_payload_deleter{}; + std::vector frames; + Context ctx{}; + udpard_udpip_ep_t dest{}; + udpard_udpip_ep_t source{}; + uint64_t topic_hash{ 0x90AB12CD34EF5678ULL }; + + Fixture(const Fixture&) = delete; + Fixture& operator=(const Fixture&) = delete; + Fixture(Fixture&&) = delete; + Fixture& operator=(Fixture&&) = delete; + + explicit Fixture(const udpard_rx_mode_t mode, const udpard_us_t reordering_window) + { + instrumented_allocator_new(&tx_alloc_transfer); + instrumented_allocator_new(&tx_alloc_payload); + instrumented_allocator_new(&rx_alloc_frag); + instrumented_allocator_new(&rx_alloc_session); + udpard_tx_mem_resources_t tx_mem{}; + tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer); + for (auto& res : tx_mem.payload) { + res = instrumented_allocator_make_resource(&tx_alloc_payload); + } + const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), + .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; + tx_payload_deleter = udpard_deleter_t{ .vtable = &tx_refcount_deleter_vt, .context = nullptr }; + source = { .ip = 0x0A000001U, .port = 7501U }; + dest = udpard_make_subject_endpoint(222U); + + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x0A0B0C0D0E0F1011ULL, 42U, 16, tx_mem, &tx_vtable)); + tx.user = &frames; + udpard_rx_new(&rx, nullptr); + ctx.expected_uid = tx.local_uid; + ctx.source = source; + rx.user = &ctx; + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, topic_hash, 1024, mode, reordering_window, rx_mem, &callbacks)); + } + + ~Fixture() + { + udpard_rx_port_free(&rx, &port); + udpard_tx_free(&tx); + TEST_ASSERT_EQUAL_size_t(0, rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, rx_alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, tx_alloc_payload.allocated_fragments); + instrumented_allocator_reset(&rx_alloc_frag); + instrumented_allocator_reset(&rx_alloc_session); + instrumented_allocator_reset(&tx_alloc_transfer); + instrumented_allocator_reset(&tx_alloc_payload); + } + + void push_single(const udpard_us_t ts, const uint64_t transfer_id) + { + frames.clear(); + std::array payload_buf{}; + for (size_t i = 0; i < payload_buf.size(); i++) { + payload_buf[i] = static_cast(transfer_id >> (i * 8U)); + } + const udpard_bytes_scattered_t payload = make_scattered(payload_buf.data(), payload_buf.size()); + const udpard_us_t deadline = ts + 1000000; + for (auto& mtu_value : tx.mtu) { + mtu_value = UDPARD_MTU_DEFAULT; + } + constexpr uint16_t iface_bitmap_1 = (1U << 0U); + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + ts, + deadline, + iface_bitmap_1, + udpard_prio_slow, + topic_hash, + transfer_id, + payload, + nullptr, + UDPARD_USER_CONTEXT_NULL)); + udpard_tx_poll(&tx, ts, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_GREATER_THAN_UINT32(0U, static_cast(frames.size())); + for (const auto& [datagram, iface_index] : frames) { + TEST_ASSERT_TRUE(udpard_rx_port_push(&rx, &port, ts, source, datagram, tx_payload_deleter, iface_index)); + } + } +}; + +/// Callbacks keep the payload memory under control. +void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) +{ + auto* const ctx = static_cast(rx->user); + ctx->ids.push_back(transfer.transfer_id); + TEST_ASSERT_EQUAL_UINT64(ctx->expected_uid, transfer.remote.uid); + TEST_ASSERT_EQUAL_UINT32(ctx->source.ip, transfer.remote.endpoints[0].ip); + TEST_ASSERT_EQUAL_UINT16(ctx->source.port, transfer.remote.endpoints[0].port); + udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); +} + +void on_collision(udpard_rx_t* const rx, udpard_rx_port_t* const /*port*/, const udpard_remote_t /*remote*/) +{ + auto* const ctx = static_cast(rx->user); + ctx->collisions++; +} + +/// UNORDERED mode should drop duplicates while keeping arrival order. +void test_udpard_rx_unordered_duplicates() +{ + Fixture fix{ udpard_rx_unordered, 0 }; + udpard_us_t now = 0; + + constexpr std::array ids{ 100, 20000, 10100, 5000, 20000, 100 }; + for (const auto id : ids) { + fix.push_single(now, id); + udpard_rx_poll(&fix.rx, now); + now++; + } + udpard_rx_poll(&fix.rx, now + 100); + + constexpr std::array expected{ 100, 20000, 10100, 5000 }; + TEST_ASSERT_EQUAL_size_t(expected.size(), fix.ctx.ids.size()); + for (size_t i = 0; i < expected.size(); i++) { + TEST_ASSERT_EQUAL_UINT64(expected[i], fix.ctx.ids[i]); + } + TEST_ASSERT_EQUAL_size_t(0, fix.ctx.collisions); +} + +/// ORDERED mode waits for the window, then rejects late arrivals. +void test_udpard_rx_ordered_out_of_order() +{ + Fixture fix{ udpard_rx_ordered, 50 }; + udpard_us_t now = 0; + + // First batch builds the ordered baseline. + fix.push_single(now, 100); + udpard_rx_poll(&fix.rx, now); + fix.push_single(++now, 300); + udpard_rx_poll(&fix.rx, now); + fix.push_single(++now, 200); + udpard_rx_poll(&fix.rx, now); + + // Let the reordering window close for the early transfers. + now = 60; + udpard_rx_poll(&fix.rx, now); + + // Queue far-future IDs while keeping the head at 300. + fix.push_single(now + 1, 10100); + udpard_rx_poll(&fix.rx, now + 1); + fix.push_single(now + 2, 10200); + udpard_rx_poll(&fix.rx, now + 2); + + // Late arrivals inside the window shall be dropped. + fix.push_single(now + 3, 250); + udpard_rx_poll(&fix.rx, now + 3); + fix.push_single(now + 4, 150); + udpard_rx_poll(&fix.rx, now + 4); + + // Allow the window to expire so the remaining interned transfers eject. + udpard_rx_poll(&fix.rx, now + 70); + + constexpr std::array expected{ 100, 200, 300, 10100, 10200 }; + TEST_ASSERT_EQUAL_size_t(expected.size(), fix.ctx.ids.size()); + for (size_t i = 0; i < expected.size(); i++) { + TEST_ASSERT_EQUAL_UINT64(expected[i], fix.ctx.ids[i]); + } + TEST_ASSERT_EQUAL_size_t(0, fix.ctx.collisions); +} + +/// ORDERED mode after head advance should reject late IDs arriving after window expiry. +void test_udpard_rx_ordered_head_advanced_late() +{ + Fixture fix{ udpard_rx_ordered, 50 }; + udpard_us_t now = 0; + + fix.push_single(now, 100); + udpard_rx_poll(&fix.rx, now); + fix.push_single(++now, 300); + udpard_rx_poll(&fix.rx, now); + fix.push_single(++now, 200); + udpard_rx_poll(&fix.rx, now); + now = 60; + udpard_rx_poll(&fix.rx, now); // head -> 300 + + fix.push_single(++now, 420); + udpard_rx_poll(&fix.rx, now); + fix.push_single(++now, 450); + udpard_rx_poll(&fix.rx, now); + now = 120; + udpard_rx_poll(&fix.rx, now); // head -> 450 + + fix.push_single(++now, 320); + udpard_rx_poll(&fix.rx, now); + fix.push_single(++now, 310); + udpard_rx_poll(&fix.rx, now); + + constexpr std::array expected{ 100, 200, 300, 420, 450 }; + TEST_ASSERT_EQUAL_size_t(expected.size(), fix.ctx.ids.size()); + for (size_t i = 0; i < expected.size(); i++) { + TEST_ASSERT_EQUAL_UINT64(expected[i], fix.ctx.ids[i]); + } + TEST_ASSERT_EQUAL_size_t(0, fix.ctx.collisions); +} + +/// ORDERED mode rejects transfer-IDs far behind the recent history window. +void test_udpard_rx_ordered_reject_far_past() +{ + Fixture fix{ udpard_rx_ordered, 50 }; + udpard_us_t now = 0; + + fix.push_single(now, 200000); + udpard_rx_poll(&fix.rx, now); + + now = 60; + udpard_rx_poll(&fix.rx, now); + + const uint64_t late_tid_close = 200000 - 1000; + fix.push_single(++now, late_tid_close); + udpard_rx_poll(&fix.rx, now); + udpard_rx_poll(&fix.rx, now + 100); + + const uint64_t far_past_tid = 200000 - 100000; + fix.push_single(++now, far_past_tid); + udpard_rx_poll(&fix.rx, now); + udpard_rx_poll(&fix.rx, now + 50); + + const uint64_t recent_tid = 200001; + fix.push_single(++now, recent_tid); + udpard_rx_poll(&fix.rx, now); + udpard_rx_poll(&fix.rx, now + 50); + + constexpr std::array expected{ 200000, far_past_tid, recent_tid }; + TEST_ASSERT_EQUAL_size_t(expected.size(), fix.ctx.ids.size()); + for (size_t i = 0; i < expected.size(); i++) { + TEST_ASSERT_EQUAL_UINT64(expected[i], fix.ctx.ids[i]); + } + TEST_ASSERT_EQUAL_size_t(0, fix.ctx.collisions); +} + +// Feedback must fire regardless of disposal path. +void test_udpard_tx_feedback_always_called() +{ + instrumented_allocator_t tx_alloc_transfer{}; + instrumented_allocator_t tx_alloc_payload{}; + instrumented_allocator_new(&tx_alloc_transfer); + instrumented_allocator_new(&tx_alloc_payload); + udpard_tx_mem_resources_t tx_mem{}; + tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer); + for (auto& res : tx_mem.payload) { + res = instrumented_allocator_make_resource(&tx_alloc_payload); + } + constexpr uint16_t iface_bitmap_1 = (1U << 0U); + + // Expiration path triggers feedback=false. + { + std::vector frames; + udpard_tx_t tx{}; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 1U, 1U, 4, tx_mem, &tx_vtable)); + tx.user = &frames; + FbState fb{}; + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + 10, + 10, + iface_bitmap_1, + udpard_prio_fast, + 1, + 11, + make_scattered(nullptr, 0), + fb_record, + make_user_context(&fb))); + udpard_tx_poll(&tx, 11, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_EQUAL_size_t(1, fb.count); + TEST_ASSERT_EQUAL_UINT32(0, fb.acknowledgements); + release_frames(frames); + udpard_tx_free(&tx); + } + + // Sacrifice path should also emit feedback. + { + std::vector frames; + udpard_tx_t tx{}; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 2U, 1U, 1, tx_mem, &tx_vtable)); + tx.user = &frames; + FbState fb_old{}; + FbState fb_new{}; + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + 0, + 1000, + iface_bitmap_1, + udpard_prio_fast, + 2, + 21, + make_scattered(nullptr, 0), + fb_record, + make_user_context(&fb_old))); + (void)udpard_tx_push(&tx, + 0, + 1000, + iface_bitmap_1, + udpard_prio_fast, + 3, + 22, + make_scattered(nullptr, 0), + fb_record, + make_user_context(&fb_new)); + TEST_ASSERT_EQUAL_size_t(1, fb_old.count); + TEST_ASSERT_EQUAL_UINT32(0, fb_old.acknowledgements); + TEST_ASSERT_GREATER_OR_EQUAL_UINT64(1, tx.errors_sacrifice); + TEST_ASSERT_EQUAL_size_t(0, fb_new.count); + release_frames(frames); + udpard_tx_free(&tx); + } + + // Destroying a TX with pending transfers still calls feedback. + { + std::vector frames; + udpard_tx_t tx{}; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 3U, 1U, 4, tx_mem, &tx_vtable)); + tx.user = &frames; + FbState fb{}; + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + 0, + 1000, + iface_bitmap_1, + udpard_prio_fast, + 4, + 33, + make_scattered(nullptr, 0), + fb_record, + make_user_context(&fb))); + udpard_tx_free(&tx); + TEST_ASSERT_EQUAL_size_t(1, fb.count); + TEST_ASSERT_EQUAL_UINT32(0, fb.acknowledgements); + release_frames(frames); + } + + instrumented_allocator_reset(&tx_alloc_transfer); + instrumented_allocator_reset(&tx_alloc_payload); +} + +/// P2P helper should emit frames with auto transfer-ID and proper addressing. +void test_udpard_tx_push_p2p() +{ + instrumented_allocator_t tx_alloc_transfer{}; + instrumented_allocator_t tx_alloc_payload{}; + instrumented_allocator_t rx_alloc_frag{}; + instrumented_allocator_t rx_alloc_session{}; + instrumented_allocator_new(&tx_alloc_transfer); + instrumented_allocator_new(&tx_alloc_payload); + instrumented_allocator_new(&rx_alloc_frag); + instrumented_allocator_new(&rx_alloc_session); + udpard_tx_mem_resources_t tx_mem{}; + tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer); + for (auto& res : tx_mem.payload) { + res = instrumented_allocator_make_resource(&tx_alloc_payload); + } + udpard_tx_t tx{}; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x1122334455667788ULL, 5U, 8, tx_mem, &tx_vtable)); + std::vector frames; + tx.user = &frames; + + const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), + .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; + udpard_rx_t rx{}; + udpard_rx_port_t port{}; + Context ctx{}; + const udpard_udpip_ep_t source{ .ip = 0x0A0000AAU, .port = 7600U }; + const udpard_udpip_ep_t dest{ .ip = 0x0A000010U, .port = 7400U }; + const uint64_t local_uid = 0xCAFEBABECAFED00DULL; + ctx.expected_uid = tx.local_uid; + ctx.source = source; + rx.user = &ctx; + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, local_uid, 1024, udpard_rx_unordered, 0, rx_mem, &callbacks)); + + udpard_remote_t remote{}; + remote.uid = local_uid; + remote.endpoints[0U] = dest; + + const std::array user_payload{ 0xAAU, 0xBBU, 0xCCU }; + const udpard_bytes_scattered_t payload = make_scattered(user_payload.data(), user_payload.size()); + const udpard_us_t now = 0; + uint64_t out_tid = 0; + TEST_ASSERT_TRUE(udpard_tx_push_p2p( + &tx, now, now + 1000000, udpard_prio_nominal, remote, payload, nullptr, UDPARD_USER_CONTEXT_NULL, &out_tid)); + udpard_tx_poll(&tx, now, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_FALSE(frames.empty()); + + const udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr }; + for (const auto& f : frames) { + TEST_ASSERT_TRUE(udpard_rx_port_push(&rx, &port, now, source, f.datagram, tx_payload_deleter, f.iface_index)); + } + udpard_rx_poll(&rx, now); + TEST_ASSERT_EQUAL_size_t(1, ctx.ids.size()); + TEST_ASSERT_EQUAL_UINT64(out_tid, ctx.ids[0]); + TEST_ASSERT_EQUAL_size_t(0, ctx.collisions); + + udpard_rx_port_free(&rx, &port); + udpard_tx_free(&tx); + TEST_ASSERT_EQUAL(0, tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL(0, tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL(0, rx_alloc_session.allocated_fragments); + instrumented_allocator_reset(&tx_alloc_transfer); + instrumented_allocator_reset(&tx_alloc_payload); + instrumented_allocator_reset(&rx_alloc_frag); + instrumented_allocator_reset(&rx_alloc_session); +} + +/// Test TX with minimum MTU to verify fragmentation at the edge. +void test_udpard_tx_minimum_mtu() +{ + instrumented_allocator_t tx_alloc_transfer{}; + instrumented_allocator_t tx_alloc_payload{}; + instrumented_allocator_t rx_alloc_frag{}; + instrumented_allocator_t rx_alloc_session{}; + instrumented_allocator_new(&tx_alloc_transfer); + instrumented_allocator_new(&tx_alloc_payload); + instrumented_allocator_new(&rx_alloc_frag); + instrumented_allocator_new(&rx_alloc_session); + + udpard_tx_mem_resources_t tx_mem{}; + tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer); + for (auto& res : tx_mem.payload) { + res = instrumented_allocator_make_resource(&tx_alloc_payload); + } + const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), + .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; + + udpard_tx_t tx{}; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0xDEADBEEF12345678ULL, 100U, 256, tx_mem, &tx_vtable)); + std::vector frames; + tx.user = &frames; + + // Set MTU to minimum value + for (auto& mtu : tx.mtu) { + mtu = UDPARD_MTU_MIN; + } + + udpard_rx_t rx{}; + udpard_rx_port_t port{}; + Context ctx{}; + const uint64_t topic_hash = 0x1234567890ABCDEFULL; + ctx.expected_uid = tx.local_uid; + ctx.source = { .ip = 0x0A000001U, .port = 7501U }; + udpard_rx_new(&rx, nullptr); + rx.user = &ctx; + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, topic_hash, 4096, udpard_rx_unordered, 0, rx_mem, &callbacks)); + + // Send a payload that will require fragmentation at minimum MTU + std::array payload{}; + for (size_t i = 0; i < payload.size(); i++) { + payload[i] = static_cast(i & 0xFFU); + } + + const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); + constexpr uint16_t iface_bitmap_1 = (1U << 0U); + + const udpard_us_t now = 0; + frames.clear(); + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + now, + now + 1000000, + iface_bitmap_1, + udpard_prio_nominal, + topic_hash, + 1U, + payload_view, + nullptr, + UDPARD_USER_CONTEXT_NULL)); + udpard_tx_poll(&tx, now, UDPARD_IFACE_BITMAP_ALL); + + // With minimum MTU, we should have multiple frames + TEST_ASSERT_TRUE(frames.size() > 1); + + // Deliver frames to RX + const udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr }; + for (const auto& f : frames) { + TEST_ASSERT_TRUE( + udpard_rx_port_push(&rx, &port, now, ctx.source, f.datagram, tx_payload_deleter, f.iface_index)); + } + udpard_rx_poll(&rx, now); + + // Verify the transfer was received correctly + TEST_ASSERT_EQUAL_size_t(1, ctx.ids.size()); + TEST_ASSERT_EQUAL_UINT64(1U, ctx.ids[0]); + + // Cleanup + udpard_rx_port_free(&rx, &port); + udpard_tx_free(&tx); + TEST_ASSERT_EQUAL(0, tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL(0, tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL(0, rx_alloc_session.allocated_fragments); + instrumented_allocator_reset(&tx_alloc_transfer); + instrumented_allocator_reset(&tx_alloc_payload); + instrumented_allocator_reset(&rx_alloc_frag); + instrumented_allocator_reset(&rx_alloc_session); +} + +/// Test with transfer-ID at uint64 boundary values (0, large values) +void test_udpard_transfer_id_boundaries() +{ + Fixture fix{ udpard_rx_unordered, 0 }; + + // Test transfer-ID = 0 (first valid value) + fix.push_single(0, 0); + udpard_rx_poll(&fix.rx, 0); + TEST_ASSERT_EQUAL_size_t(1, fix.ctx.ids.size()); + TEST_ASSERT_EQUAL_UINT64(0U, fix.ctx.ids[0]); + + // Test a large transfer-ID value + fix.push_single(1, 0x7FFFFFFFFFFFFFFFULL); // Large but not at the extreme edge + udpard_rx_poll(&fix.rx, 1); + TEST_ASSERT_EQUAL_size_t(2, fix.ctx.ids.size()); + TEST_ASSERT_EQUAL_UINT64(0x7FFFFFFFFFFFFFFFULL, fix.ctx.ids[1]); + + // Test another large value to verify the history doesn't reject it + fix.push_single(2, 0x8000000000000000ULL); + udpard_rx_poll(&fix.rx, 2); + TEST_ASSERT_EQUAL_size_t(3, fix.ctx.ids.size()); + TEST_ASSERT_EQUAL_UINT64(0x8000000000000000ULL, fix.ctx.ids[2]); + + TEST_ASSERT_EQUAL_size_t(0, fix.ctx.collisions); +} + +/// Test zero extent handling - should accept transfers but truncate payload +void test_udpard_rx_zero_extent() +{ + instrumented_allocator_t tx_alloc_transfer{}; + instrumented_allocator_t tx_alloc_payload{}; + instrumented_allocator_t rx_alloc_frag{}; + instrumented_allocator_t rx_alloc_session{}; + instrumented_allocator_new(&tx_alloc_transfer); + instrumented_allocator_new(&tx_alloc_payload); + instrumented_allocator_new(&rx_alloc_frag); + instrumented_allocator_new(&rx_alloc_session); + + udpard_tx_mem_resources_t tx_mem{}; + tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer); + for (auto& res : tx_mem.payload) { + res = instrumented_allocator_make_resource(&tx_alloc_payload); + } + const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), + .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; + + udpard_tx_t tx{}; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0xAAAABBBBCCCCDDDDULL, 200U, 64, tx_mem, &tx_vtable)); + std::vector frames; + tx.user = &frames; + + udpard_rx_t rx{}; + udpard_rx_port_t port{}; + const uint64_t topic_hash = 0xFEDCBA9876543210ULL; + udpard_rx_new(&rx, nullptr); + + // Create port with zero extent + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, topic_hash, 0, udpard_rx_unordered, 0, rx_mem, &callbacks)); + + // Track received transfers + struct ZeroExtentContext + { + size_t count = 0; + size_t payload_size_stored = 0; + size_t payload_size_wire = 0; + }; + ZeroExtentContext zctx{}; + + // Custom callback for zero extent test + struct ZeroExtentCallbacks + { + static void on_message(udpard_rx_t* const rx_arg, + udpard_rx_port_t* const port_arg, + const udpard_rx_transfer_t transfer) + { + auto* z = static_cast(rx_arg->user); + z->count++; + z->payload_size_stored = transfer.payload_size_stored; + z->payload_size_wire = transfer.payload_size_wire; + udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port_arg->memory.fragment)); + } + static void on_collision(udpard_rx_t*, udpard_rx_port_t*, udpard_remote_t) {} + }; + static constexpr udpard_rx_port_vtable_t zero_callbacks{ .on_message = &ZeroExtentCallbacks::on_message, + .on_collision = &ZeroExtentCallbacks::on_collision }; + port.vtable = &zero_callbacks; + rx.user = &zctx; + + // Send a small single-frame transfer + std::array payload{}; + for (size_t i = 0; i < payload.size(); i++) { + payload[i] = static_cast(i); + } + + const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); + constexpr uint16_t iface_bitmap_1 = (1U << 0U); + const udpard_udpip_ep_t source{ .ip = 0x0A000002U, .port = 7502U }; + + const udpard_us_t now = 0; + frames.clear(); + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + now, + now + 1000000, + iface_bitmap_1, + udpard_prio_nominal, + topic_hash, + 5U, + payload_view, + nullptr, + UDPARD_USER_CONTEXT_NULL)); + udpard_tx_poll(&tx, now, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_FALSE(frames.empty()); + + // Deliver to RX with zero extent + const udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr }; + for (const auto& f : frames) { + TEST_ASSERT_TRUE(udpard_rx_port_push(&rx, &port, now, source, f.datagram, tx_payload_deleter, f.iface_index)); + } + udpard_rx_poll(&rx, now); + + // Transfer should be received - zero extent means minimal/no truncation for single-frame + // The library may still store some payload for single-frame transfers even with zero extent + TEST_ASSERT_EQUAL_size_t(1, zctx.count); + TEST_ASSERT_TRUE(zctx.payload_size_stored <= payload.size()); // At most the original size + TEST_ASSERT_EQUAL_size_t(payload.size(), zctx.payload_size_wire); // Wire size is original + + // Cleanup + udpard_rx_port_free(&rx, &port); + udpard_tx_free(&tx); + TEST_ASSERT_EQUAL(0, tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL(0, tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL(0, rx_alloc_session.allocated_fragments); + instrumented_allocator_reset(&tx_alloc_transfer); + instrumented_allocator_reset(&tx_alloc_payload); + instrumented_allocator_reset(&rx_alloc_frag); + instrumented_allocator_reset(&rx_alloc_session); +} + +/// Test empty payload transfer (zero-size payload) +void test_udpard_empty_payload() +{ + Fixture fix{ udpard_rx_unordered, 0 }; + + // Send an empty payload + fix.frames.clear(); + const udpard_bytes_scattered_t empty_payload = make_scattered(nullptr, 0); + const udpard_us_t deadline = 1000000; + constexpr uint16_t iface_bitmap_1 = (1U << 0U); + + TEST_ASSERT_TRUE(udpard_tx_push(&fix.tx, + 0, + deadline, + iface_bitmap_1, + udpard_prio_nominal, + fix.topic_hash, + 10U, + empty_payload, + nullptr, + UDPARD_USER_CONTEXT_NULL)); + udpard_tx_poll(&fix.tx, 0, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_FALSE(fix.frames.empty()); + + // Deliver to RX + for (const auto& f : fix.frames) { + TEST_ASSERT_TRUE( + udpard_rx_port_push(&fix.rx, &fix.port, 0, fix.source, f.datagram, fix.tx_payload_deleter, f.iface_index)); + } + udpard_rx_poll(&fix.rx, 0); + + // Empty transfer should be received + TEST_ASSERT_EQUAL_size_t(1, fix.ctx.ids.size()); + TEST_ASSERT_EQUAL_UINT64(10U, fix.ctx.ids[0]); +} + +/// Test priority levels from exceptional (0) to optional (7) +void test_udpard_all_priority_levels() +{ + Fixture fix{ udpard_rx_unordered, 0 }; + udpard_us_t now = 0; + + constexpr uint16_t iface_bitmap_1 = (1U << 0U); + + // Test all 8 priority levels + for (uint8_t prio = 0; prio < UDPARD_PRIORITY_COUNT; prio++) { + fix.frames.clear(); + std::array payload{}; + payload[0] = prio; + const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); + + TEST_ASSERT_TRUE(udpard_tx_push(&fix.tx, + now, + now + 1000000, + iface_bitmap_1, + static_cast(prio), + fix.topic_hash, + 100U + prio, + payload_view, + nullptr, + UDPARD_USER_CONTEXT_NULL)); + udpard_tx_poll(&fix.tx, now, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_FALSE(fix.frames.empty()); + + for (const auto& f : fix.frames) { + TEST_ASSERT_TRUE(udpard_rx_port_push( + &fix.rx, &fix.port, now, fix.source, f.datagram, fix.tx_payload_deleter, f.iface_index)); + } + udpard_rx_poll(&fix.rx, now); + now++; + } + + // All 8 transfers should be received + TEST_ASSERT_EQUAL_size_t(UDPARD_PRIORITY_COUNT, fix.ctx.ids.size()); + for (uint8_t prio = 0; prio < UDPARD_PRIORITY_COUNT; prio++) { + TEST_ASSERT_EQUAL_UINT64(100U + prio, fix.ctx.ids[prio]); + } +} + +/// Test collision detection (topic hash mismatch) +void test_udpard_topic_hash_collision() +{ + instrumented_allocator_t tx_alloc_transfer{}; + instrumented_allocator_t tx_alloc_payload{}; + instrumented_allocator_t rx_alloc_frag{}; + instrumented_allocator_t rx_alloc_session{}; + instrumented_allocator_new(&tx_alloc_transfer); + instrumented_allocator_new(&tx_alloc_payload); + instrumented_allocator_new(&rx_alloc_frag); + instrumented_allocator_new(&rx_alloc_session); + + udpard_tx_mem_resources_t tx_mem{}; + tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer); + for (auto& res : tx_mem.payload) { + res = instrumented_allocator_make_resource(&tx_alloc_payload); + } + const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), + .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; + + udpard_tx_t tx{}; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x1111222233334444ULL, 300U, 64, tx_mem, &tx_vtable)); + std::vector frames; + tx.user = &frames; + + udpard_rx_t rx{}; + udpard_rx_port_t port{}; + Context ctx{}; + const uint64_t rx_topic_hash = 0xAAAAAAAAAAAAAAAAULL; // Different from TX + const uint64_t tx_topic_hash = 0xBBBBBBBBBBBBBBBBULL; // Different from RX + ctx.expected_uid = tx.local_uid; + ctx.source = { .ip = 0x0A000003U, .port = 7503U }; + udpard_rx_new(&rx, nullptr); + rx.user = &ctx; + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, rx_topic_hash, 1024, udpard_rx_unordered, 0, rx_mem, &callbacks)); + + // Send with mismatched topic hash + std::array payload{}; + const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); + constexpr uint16_t iface_bitmap_1 = (1U << 0U); + + const udpard_us_t now = 0; + frames.clear(); + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + now, + now + 1000000, + iface_bitmap_1, + udpard_prio_nominal, + tx_topic_hash, // Different from port's topic_hash + 1U, + payload_view, + nullptr, + UDPARD_USER_CONTEXT_NULL)); + udpard_tx_poll(&tx, now, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_FALSE(frames.empty()); + + // Deliver to RX - should trigger collision callback + const udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr }; + for (const auto& f : frames) { + TEST_ASSERT_TRUE( + udpard_rx_port_push(&rx, &port, now, ctx.source, f.datagram, tx_payload_deleter, f.iface_index)); + } + udpard_rx_poll(&rx, now); + + // No transfers received, but collision detected + TEST_ASSERT_EQUAL_size_t(0, ctx.ids.size()); + TEST_ASSERT_EQUAL_size_t(1, ctx.collisions); + + // Cleanup + udpard_rx_port_free(&rx, &port); + udpard_tx_free(&tx); + TEST_ASSERT_EQUAL(0, tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL(0, tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL(0, rx_alloc_session.allocated_fragments); + instrumented_allocator_reset(&tx_alloc_transfer); + instrumented_allocator_reset(&tx_alloc_payload); + instrumented_allocator_reset(&rx_alloc_frag); + instrumented_allocator_reset(&rx_alloc_session); +} + +} // namespace + +extern "C" void setUp() {} + +extern "C" void tearDown() {} + +int main() +{ + UNITY_BEGIN(); + RUN_TEST(test_udpard_rx_unordered_duplicates); + RUN_TEST(test_udpard_rx_ordered_out_of_order); + RUN_TEST(test_udpard_rx_ordered_head_advanced_late); + RUN_TEST(test_udpard_rx_ordered_reject_far_past); + RUN_TEST(test_udpard_tx_feedback_always_called); + RUN_TEST(test_udpard_tx_push_p2p); + RUN_TEST(test_udpard_tx_minimum_mtu); + RUN_TEST(test_udpard_transfer_id_boundaries); + RUN_TEST(test_udpard_rx_zero_extent); + RUN_TEST(test_udpard_empty_payload); + RUN_TEST(test_udpard_all_priority_levels); + RUN_TEST(test_udpard_topic_hash_collision); + return UNITY_END(); +} diff --git a/tests/src/test_e2e_random.cpp b/tests/src/test_e2e_random.cpp new file mode 100644 index 0000000..7dc6926 --- /dev/null +++ b/tests/src/test_e2e_random.cpp @@ -0,0 +1,418 @@ +/// This software is distributed under the terms of the MIT License. +/// Copyright (C) OpenCyphal Development Team +/// Copyright Amazon.com Inc. or its affiliates. +/// SPDX-License-Identifier: MIT + +// ReSharper disable CppPassValueParameterByConstReference + +#include +#include "helpers.h" +#include +#include +#include +#include +#include + +namespace { + +struct TransferKey +{ + uint64_t transfer_id; + uint64_t topic_hash; + bool operator==(const TransferKey& other) const + { + return (transfer_id == other.transfer_id) && (topic_hash == other.topic_hash); + } +}; + +struct TransferKeyHash +{ + size_t operator()(const TransferKey& key) const + { + return (std::hash{}(key.transfer_id) << 1U) ^ std::hash{}(key.topic_hash); + } +}; + +struct ExpectedPayload +{ + std::vector payload; + size_t payload_size_wire; +}; + +struct Context +{ + std::unordered_map expected; + size_t received = 0; + size_t collisions = 0; + size_t truncated = 0; + uint64_t remote_uid = 0; + size_t reliable_feedback_success = 0; + size_t reliable_feedback_failure = 0; + std::array remote_endpoints{}; +}; + +struct Arrival +{ + udpard_bytes_mut_t datagram; + uint_fast8_t iface_index; +}; + +struct CapturedFrame +{ + udpard_bytes_mut_t datagram; + uint_fast8_t iface_index; +}; + +size_t random_range(const size_t min, const size_t max) +{ + const size_t span = max - min + 1U; + return min + (static_cast(rand()) % span); +} + +void fill_random(std::vector& data) +{ + for (auto& byte : data) { + byte = static_cast(random_range(0, UINT8_MAX)); + } +} + +void shuffle_frames(std::vector& frames) +{ + for (size_t i = frames.size(); i > 1; i--) { + const size_t j = random_range(0, i - 1); + std::swap(frames[i - 1U], frames[j]); + } +} + +void tx_refcount_free(void* const user, const size_t size, void* const payload) +{ + (void)user; + udpard_tx_refcount_dec(udpard_bytes_t{ .size = size, .data = payload }); +} + +// Shared deleter for captured TX frames. +constexpr udpard_deleter_vtable_t tx_refcount_deleter_vt{ .free = &tx_refcount_free }; + +bool capture_tx_frame_impl(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) +{ + auto* frames = static_cast*>(tx->user); + if (frames == nullptr) { + return false; + } + udpard_tx_refcount_inc(ejection->datagram); + void* const data = const_cast(ejection->datagram.data); // NOLINT(cppcoreguidelines-pro-type-const-cast) + frames->push_back(CapturedFrame{ .datagram = { .size = ejection->datagram.size, .data = data }, + .iface_index = ejection->iface_index }); + return true; +} + +bool capture_tx_frame_subject(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) +{ + return capture_tx_frame_impl(tx, ejection); +} + +bool capture_tx_frame_p2p(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection, udpard_udpip_ep_t /*dest*/) +{ + return capture_tx_frame_impl(tx, ejection); +} + +constexpr udpard_tx_vtable_t tx_vtable{ .eject_subject = &capture_tx_frame_subject, + .eject_p2p = &capture_tx_frame_p2p }; + +void record_feedback(udpard_tx_t*, const udpard_tx_feedback_t fb) +{ + auto* ctx = static_cast(fb.user.ptr[0]); + if (ctx != nullptr) { + if (fb.acknowledgements > 0U) { + ctx->reliable_feedback_success++; + } else { + ctx->reliable_feedback_failure++; + } + } +} + +void on_ack_response(udpard_rx_t*, udpard_rx_port_t* port, const udpard_rx_transfer_t tr) +{ + udpard_fragment_free_all(tr.payload, udpard_make_deleter(port->memory.fragment)); +} +constexpr udpard_rx_port_vtable_t ack_callbacks{ .on_message = &on_ack_response, .on_collision = nullptr }; + +void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) +{ + auto* const ctx = static_cast(rx->user); + + // Match the incoming transfer against the expected table keyed by topic hash and transfer-ID. + const TransferKey key{ .transfer_id = transfer.transfer_id, .topic_hash = port->topic_hash }; + const auto it = ctx->expected.find(key); + if (it == ctx->expected.end()) { + udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); + return; + } + + // Gather fragments into a contiguous buffer so we can compare the stored prefix (payload may be truncated). + std::vector assembled(transfer.payload_size_stored); + const udpard_fragment_t* payload_cursor = transfer.payload; + const size_t gathered = udpard_fragment_gather(&payload_cursor, 0, transfer.payload_size_stored, assembled.data()); + TEST_ASSERT_EQUAL_size_t(transfer.payload_size_stored, gathered); + TEST_ASSERT_TRUE(transfer.payload_size_stored <= it->second.payload.size()); + TEST_ASSERT_EQUAL_size_t(it->second.payload_size_wire, transfer.payload_size_wire); + if (transfer.payload_size_stored > 0U) { + TEST_ASSERT_EQUAL_MEMORY(it->second.payload.data(), assembled.data(), transfer.payload_size_stored); + } + + // Verify remote and the return path discovery. + TEST_ASSERT_EQUAL_UINT64(ctx->remote_uid, transfer.remote.uid); + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + if ((transfer.remote.endpoints[i].ip != 0U) || (transfer.remote.endpoints[i].port != 0U)) { + TEST_ASSERT_EQUAL_UINT32(ctx->remote_endpoints[i].ip, transfer.remote.endpoints[i].ip); + TEST_ASSERT_EQUAL_UINT16(ctx->remote_endpoints[i].port, transfer.remote.endpoints[i].port); + } + } + if (transfer.payload_size_stored < transfer.payload_size_wire) { + ctx->truncated++; + } + + // Clean up. + udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); + ctx->expected.erase(it); + ctx->received++; +} + +void on_collision(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_remote_t remote) +{ + auto* ctx = static_cast(rx->user); + (void)port; + (void)remote; + ctx->collisions++; +} +constexpr udpard_rx_port_vtable_t callbacks{ .on_message = &on_message, .on_collision = &on_collision }; + +/// Randomized end-to-end TX/RX covering fragmentation, reordering, and extent-driven truncation. +void test_udpard_tx_rx_end_to_end() +{ + seed_prng(); + + // TX allocator setup and pipeline initialization. + instrumented_allocator_t tx_alloc_transfer{}; + instrumented_allocator_new(&tx_alloc_transfer); + instrumented_allocator_t tx_alloc_payload{}; + instrumented_allocator_new(&tx_alloc_payload); + udpard_tx_mem_resources_t tx_mem{}; + tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer); + for (auto& res : tx_mem.payload) { + res = instrumented_allocator_make_resource(&tx_alloc_payload); + } + udpard_tx_t tx{}; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x0A0B0C0D0E0F1011ULL, 123U, 256, tx_mem, &tx_vtable)); + instrumented_allocator_t ack_alloc_transfer{}; + instrumented_allocator_t ack_alloc_payload{}; + instrumented_allocator_new(&ack_alloc_transfer); + instrumented_allocator_new(&ack_alloc_payload); + udpard_tx_mem_resources_t ack_mem{}; + ack_mem.transfer = instrumented_allocator_make_resource(&ack_alloc_transfer); + for (auto& res : ack_mem.payload) { + res = instrumented_allocator_make_resource(&ack_alloc_payload); + } + udpard_tx_t ack_tx{}; + TEST_ASSERT_TRUE(udpard_tx_new(&ack_tx, 0x1020304050607080ULL, 321U, 256, ack_mem, &tx_vtable)); + + // RX allocator setup and shared RX instance with callbacks. + instrumented_allocator_t rx_alloc_frag{}; + instrumented_allocator_new(&rx_alloc_frag); + instrumented_allocator_t rx_alloc_session{}; + instrumented_allocator_new(&rx_alloc_session); + const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), + .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; + udpard_rx_t rx; + udpard_rx_new(&rx, &ack_tx); + instrumented_allocator_t ack_rx_alloc_frag{}; + instrumented_allocator_t ack_rx_alloc_session{}; + instrumented_allocator_new(&ack_rx_alloc_frag); + instrumented_allocator_new(&ack_rx_alloc_session); + const udpard_rx_mem_resources_t ack_rx_mem{ .session = instrumented_allocator_make_resource(&ack_rx_alloc_session), + .fragment = instrumented_allocator_make_resource(&ack_rx_alloc_frag) }; + udpard_rx_t ack_rx{}; + udpard_rx_port_t ack_port{}; + udpard_rx_new(&ack_rx, &tx); + + // Test parameters. + constexpr std::array topic_hashes{ 0x123456789ABCDEF0ULL, + 0x0FEDCBA987654321ULL, + 0x00ACE00ACE00ACEULL }; + constexpr std::array modes{ udpard_rx_ordered, udpard_rx_unordered, udpard_rx_ordered }; + constexpr std::array windows{ 2000, 0, 5000 }; + constexpr std::array extents{ 1000, 5000, SIZE_MAX }; + + // Configure ports with varied extents and reordering windows to cover truncation and different RX modes. + std::array ports{}; + for (size_t i = 0; i < ports.size(); i++) { + TEST_ASSERT_TRUE( + udpard_rx_port_new(&ports[i], topic_hashes[i], extents[i], modes[i], windows[i], rx_mem, &callbacks)); + } + + // Setup the context. + Context ctx{}; + ctx.remote_uid = tx.local_uid; + for (size_t i = 0; i < ports.size(); i++) { + ctx.remote_endpoints[i] = { .ip = static_cast(0x0A000001U + i), + .port = static_cast(7400U + i) }; + } + rx.user = &ctx; + constexpr udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr }; + // Ack path wiring. + std::vector frames; + tx.user = &frames; + std::vector ack_frames; + ack_tx.user = &ack_frames; + TEST_ASSERT_TRUE( + udpard_rx_port_new(&ack_port, tx.local_uid, 16, udpard_rx_unordered, 0, ack_rx_mem, &ack_callbacks)); + std::array ack_sources{}; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + ack_sources[i] = { .ip = static_cast(0x0A000020U + i), .port = static_cast(7700U + i) }; + } + + // Main test loop: generate transfers, push into TX, drain and shuffle frames, push into RX. + std::array transfer_ids{ static_cast(rand()), + static_cast(rand()), + static_cast(rand()) }; + size_t reliable_total = 0; + udpard_us_t now = 0; + for (size_t transfer_index = 0; transfer_index < 1000; transfer_index++) { + now += static_cast(random_range(1000, 5000)); + frames.clear(); + + // Pick a port, build a random payload, and remember what to expect on that topic. + const size_t port_index = random_range(0, ports.size() - 1U); + const uint64_t transfer_id = transfer_ids[port_index]++; + const size_t payload_size = random_range(0, 10000); + std::vector payload(payload_size); + fill_random(payload); + const bool reliable = (random_range(0, 3) == 0); // About a quarter reliable. + if (reliable) { + reliable_total++; + } + + // Each transfer is sent on all redundant interfaces with different MTUs to exercise fragmentation variety. + const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); + const auto priority = static_cast(random_range(0, UDPARD_PRIORITY_COUNT - 1U)); + const TransferKey key{ .transfer_id = transfer_id, .topic_hash = topic_hashes[port_index] }; + const bool inserted = + ctx.expected.emplace(key, ExpectedPayload{ .payload = payload, .payload_size_wire = payload.size() }).second; + TEST_ASSERT_TRUE(inserted); + + // Generate MTUs per redundant interface. + std::array mtu_values{}; + for (auto& x : mtu_values) { + x = random_range(UDPARD_MTU_MIN, 3000U); + } + for (size_t iface = 0; iface < UDPARD_IFACE_COUNT_MAX; iface++) { + tx.mtu[iface] = mtu_values[iface]; + } + // Enqueue one transfer spanning all interfaces. + const udpard_us_t deadline = now + 1000000; + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + now, + deadline, + UDPARD_IFACE_BITMAP_ALL, + priority, + topic_hashes[port_index], + transfer_id, + payload_view, + reliable ? &record_feedback : nullptr, + reliable ? make_user_context(&ctx) : UDPARD_USER_CONTEXT_NULL)); + udpard_tx_poll(&tx, now, UDPARD_IFACE_BITMAP_ALL); + + // Shuffle and push frames into the RX pipeline, simulating out-of-order redundant arrival. + std::vector arrivals; + arrivals.reserve(frames.size()); + for (const auto& [datagram, iface_index] : frames) { + arrivals.push_back(Arrival{ .datagram = datagram, .iface_index = iface_index }); + } + shuffle_frames(arrivals); + const size_t keep_iface = reliable ? random_range(0, UDPARD_IFACE_COUNT_MAX - 1U) : 0U; + const size_t loss_iface = reliable ? ((keep_iface + 1U) % UDPARD_IFACE_COUNT_MAX) : UDPARD_IFACE_COUNT_MAX; + const size_t ack_loss_iface = loss_iface; + for (const auto& [datagram, iface_index] : arrivals) { + const bool drop = reliable && (iface_index == loss_iface) && ((rand() % 3) == 0); + if (drop) { + udpard_tx_refcount_dec(udpard_bytes_t{ .size = datagram.size, .data = datagram.data }); + } else { + TEST_ASSERT_TRUE(udpard_rx_port_push(&rx, + &ports[port_index], + now, + ctx.remote_endpoints[iface_index], + datagram, + tx_payload_deleter, + iface_index)); + } + now += 1; + } + + // Let the RX pipeline purge timeouts and deliver ready transfers. + udpard_rx_poll(&rx, now); + ack_frames.clear(); + udpard_tx_poll(&ack_tx, now, UDPARD_IFACE_BITMAP_ALL); + bool ack_delivered = false; + for (const auto& [datagram, iface_index] : ack_frames) { + const bool drop_ack = reliable && (iface_index == ack_loss_iface); + if (drop_ack) { + udpard_tx_refcount_dec(udpard_bytes_t{ .size = datagram.size, .data = datagram.data }); + continue; + } + ack_delivered = true; + TEST_ASSERT_TRUE(udpard_rx_port_push( + &ack_rx, &ack_port, now, ack_sources[iface_index], datagram, tx_payload_deleter, iface_index)); + } + if (reliable && !ack_delivered && !ack_frames.empty()) { + const auto& [datagram, iface_index] = ack_frames.front(); + TEST_ASSERT_TRUE(udpard_rx_port_push( + &ack_rx, &ack_port, now, ack_sources[iface_index], datagram, tx_payload_deleter, iface_index)); + } + udpard_rx_poll(&ack_rx, now); + } + + // Final poll/validation and cleanup. + udpard_rx_poll(&rx, now + 1000000); + udpard_rx_poll(&ack_rx, now + 1000000); + TEST_ASSERT_TRUE(ctx.expected.empty()); + TEST_ASSERT_EQUAL_size_t(1000, ctx.received); + TEST_ASSERT_TRUE(ctx.truncated > 0); + TEST_ASSERT_EQUAL_size_t(0, ctx.collisions); + TEST_ASSERT_EQUAL_size_t(reliable_total, ctx.reliable_feedback_success); + TEST_ASSERT_EQUAL_size_t(0, ctx.reliable_feedback_failure); + for (auto& port : ports) { + udpard_rx_port_free(&rx, &port); + } + udpard_rx_port_free(&ack_rx, &ack_port); + udpard_tx_free(&tx); + udpard_tx_free(&ack_tx); + TEST_ASSERT_EQUAL_size_t(0, rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, rx_alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, ack_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, ack_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, ack_rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, ack_rx_alloc_session.allocated_fragments); + instrumented_allocator_reset(&rx_alloc_frag); + instrumented_allocator_reset(&rx_alloc_session); + instrumented_allocator_reset(&tx_alloc_transfer); + instrumented_allocator_reset(&tx_alloc_payload); + instrumented_allocator_reset(&ack_alloc_transfer); + instrumented_allocator_reset(&ack_alloc_payload); + instrumented_allocator_reset(&ack_rx_alloc_frag); + instrumented_allocator_reset(&ack_rx_alloc_session); +} + +} // namespace + +extern "C" void setUp() {} + +extern "C" void tearDown() {} + +int main() +{ + UNITY_BEGIN(); + RUN_TEST(test_udpard_tx_rx_end_to_end); + return UNITY_END(); +} diff --git a/tests/src/test_e2e_reliable_ordered.cpp b/tests/src/test_e2e_reliable_ordered.cpp new file mode 100644 index 0000000..009c1d7 --- /dev/null +++ b/tests/src/test_e2e_reliable_ordered.cpp @@ -0,0 +1,463 @@ +/// This software is distributed under the terms of the MIT License. +/// Copyright (C) OpenCyphal Development Team +/// Copyright Amazon.com Inc. or its affiliates. +/// SPDX-License-Identifier: MIT +/// This test validates reliable delivery with ORDERED mode under packet loss and reordering. + +#include +#include "helpers.h" +#include +#include +#include + +namespace { + +constexpr size_t CyphalHeaderSize = 48; // Cyphal/UDP header size + +struct CapturedFrame +{ + udpard_bytes_mut_t datagram; + uint_fast8_t iface_index; +}; + +void tx_refcount_free(void* const user, const size_t size, void* const payload) +{ + (void)user; + udpard_tx_refcount_dec(udpard_bytes_t{ .size = size, .data = payload }); +} + +constexpr udpard_deleter_vtable_t tx_refcount_deleter_vt{ .free = &tx_refcount_free }; +constexpr udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr }; + +void drop_frame(const CapturedFrame& frame) +{ + udpard_tx_refcount_dec(udpard_bytes_t{ .size = frame.datagram.size, .data = frame.datagram.data }); +} + +// Extract transfer_id from Cyphal/UDP header (bytes 16-23 of datagram). +uint64_t extract_transfer_id(const udpard_bytes_mut_t& datagram) +{ + if (datagram.size < 24) { + return 0; + } + const auto* p = static_cast(datagram.data); + uint64_t tid = 0; + for (size_t i = 0; i < 8; i++) { + tid |= static_cast(p[16 + i]) << (i * 8U); + } + return tid; +} + +// Extract the transfer_id being ACKed from ACK payload. +// ACK payload format: topic_hash(8) + transfer_id(8). +uint64_t extract_acked_transfer_id(const udpard_bytes_mut_t& datagram) +{ + constexpr size_t p2p_tid_offset = CyphalHeaderSize + 8; + if (datagram.size < p2p_tid_offset + 8) { + return 0; + } + const auto* p = static_cast(datagram.data); + uint64_t tid = 0; + for (size_t i = 0; i < 8; i++) { + tid |= static_cast(p[p2p_tid_offset + i]) << (i * 8U); + } + return tid; +} + +bool capture_tx_frame_impl(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) +{ + auto* frames = static_cast*>(tx->user); + if (frames == nullptr) { + return false; + } + udpard_tx_refcount_inc(ejection->datagram); + void* const data = const_cast(ejection->datagram.data); // NOLINT + const udpard_bytes_mut_t dgram{ .size = ejection->datagram.size, .data = data }; + frames->push_back(CapturedFrame{ .datagram = dgram, .iface_index = ejection->iface_index }); + return true; +} + +bool capture_tx_frame_subject(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) +{ + return capture_tx_frame_impl(tx, ejection); +} + +bool capture_tx_frame_p2p(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection, udpard_udpip_ep_t /*dest*/) +{ + return capture_tx_frame_impl(tx, ejection); +} + +constexpr udpard_tx_vtable_t tx_vtable{ .eject_subject = &capture_tx_frame_subject, + .eject_p2p = &capture_tx_frame_p2p }; + +struct FeedbackState +{ + size_t count = 0; + uint16_t acknowledgements = 0; +}; + +void record_feedback(udpard_tx_t*, const udpard_tx_feedback_t fb) +{ + auto* st = static_cast(fb.user.ptr[0]); + if (st != nullptr) { + st->count++; + st->acknowledgements = fb.acknowledgements; + } +} + +struct ReceiverContext +{ + std::vector received_transfer_ids; +}; + +void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) +{ + auto* ctx = static_cast(rx->user); + ctx->received_transfer_ids.push_back(transfer.transfer_id); + udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); +} + +void on_collision(udpard_rx_t*, udpard_rx_port_t*, udpard_remote_t) {} + +constexpr udpard_rx_port_vtable_t topic_callbacks{ .on_message = &on_message, .on_collision = &on_collision }; + +void on_ack_only(udpard_rx_t*, udpard_rx_port_t* port, const udpard_rx_transfer_t tr) +{ + udpard_fragment_free_all(tr.payload, udpard_make_deleter(port->memory.fragment)); +} + +constexpr udpard_rx_port_vtable_t ack_only_callbacks{ .on_message = &on_ack_only, .on_collision = &on_collision }; + +/// Test scenario: +/// - Sender publishes messages A, B, C (tid=100, 101, 102) in reliable mode, in quick succession. +/// - A is delivered successfully, establishing the session baseline for the receiver in ORDERED mode. +/// - First attempt to deliver B fails (lost). +/// - Every first ACK for B and C is lost, forcing sender to retransmit. +/// +/// The receiver first sees A, then C (tid=102), which gets interned waiting for lower transfer IDs. +/// When B (tid=101) arrives via retransmission, it gets delivered first, then C is ejected in order. +/// +/// Transmission sequence: +/// 1. A (tid=100) delivered successfully -- establishes ordered session +/// 2. B (tid=101) lost +/// 3. C (tid=102) delivered but ACK lost -- interned, waiting for tid < 102 +/// 4. B (tid=101) delivered but ACK lost -- delivered first, then C ejected +/// 5. C (tid=102) re-delivered, duplicate ignored, ACK delivered +/// 6. B (tid=101) re-delivered, duplicate ignored, ACK delivered +/// +/// Receiver must validate: receives A, then B, then C, in correct order without duplicates. +void test_reliable_ordered_with_loss_and_reordering() +{ + seed_prng(); + + // Allocators + instrumented_allocator_t sender_tx_alloc_transfer{}; + instrumented_allocator_t sender_tx_alloc_payload{}; + instrumented_allocator_t receiver_rx_alloc_frag{}; + instrumented_allocator_t receiver_rx_alloc_session{}; + instrumented_allocator_t receiver_tx_alloc_transfer{}; + instrumented_allocator_t receiver_tx_alloc_payload{}; + instrumented_allocator_t sender_rx_alloc_frag{}; + instrumented_allocator_t sender_rx_alloc_session{}; + instrumented_allocator_new(&sender_tx_alloc_transfer); + instrumented_allocator_new(&sender_tx_alloc_payload); + instrumented_allocator_new(&receiver_rx_alloc_frag); + instrumented_allocator_new(&receiver_rx_alloc_session); + instrumented_allocator_new(&receiver_tx_alloc_transfer); + instrumented_allocator_new(&receiver_tx_alloc_payload); + instrumented_allocator_new(&sender_rx_alloc_frag); + instrumented_allocator_new(&sender_rx_alloc_session); + + // Memory resources + udpard_tx_mem_resources_t sender_tx_mem{}; + sender_tx_mem.transfer = instrumented_allocator_make_resource(&sender_tx_alloc_transfer); + for (auto& res : sender_tx_mem.payload) { + res = instrumented_allocator_make_resource(&sender_tx_alloc_payload); + } + const udpard_rx_mem_resources_t sender_rx_mem{ .session = + instrumented_allocator_make_resource(&sender_rx_alloc_session), + .fragment = + instrumented_allocator_make_resource(&sender_rx_alloc_frag) }; + + udpard_tx_mem_resources_t receiver_tx_mem{}; + receiver_tx_mem.transfer = instrumented_allocator_make_resource(&receiver_tx_alloc_transfer); + for (auto& res : receiver_tx_mem.payload) { + res = instrumented_allocator_make_resource(&receiver_tx_alloc_payload); + } + const udpard_rx_mem_resources_t receiver_rx_mem{ .session = + instrumented_allocator_make_resource(&receiver_rx_alloc_session), + .fragment = + instrumented_allocator_make_resource(&receiver_rx_alloc_frag) }; + + // Node identifiers + constexpr uint64_t sender_uid = 0xAAAA1111BBBB2222ULL; + constexpr uint64_t receiver_uid = 0xCCCC3333DDDD4444ULL; + const udpard_udpip_ep_t sender_source{ .ip = 0x0A000001U, .port = 7400U }; + const udpard_udpip_ep_t receiver_source{ .ip = 0x0A000011U, .port = 7500U }; + constexpr uint64_t topic_hash = 0x0123456789ABCDEFULL; + constexpr uint64_t tid_a = 100; + constexpr uint64_t tid_b = 101; + constexpr uint64_t tid_c = 102; + constexpr uint16_t iface_bitmap_1 = (1U << 0U); + + // Use a large reordering window to ensure retransmissions arrive within the window. + // With exponential backoff, retransmissions can take significant time. + constexpr udpard_us_t reordering_window = 1000000; // 1 second + constexpr udpard_us_t ack_timeout = 10000; // 10ms baseline + + // Sender TX/RX + udpard_tx_t sender_tx{}; + std::vector sender_frames; + TEST_ASSERT_TRUE(udpard_tx_new(&sender_tx, sender_uid, 100, 64, sender_tx_mem, &tx_vtable)); + sender_tx.user = &sender_frames; + sender_tx.ack_baseline_timeout = ack_timeout; + + udpard_rx_t sender_rx{}; + udpard_rx_new(&sender_rx, &sender_tx); + + udpard_rx_port_t sender_p2p_port{}; + TEST_ASSERT_TRUE( + udpard_rx_port_new(&sender_p2p_port, sender_uid, 16, udpard_rx_unordered, 0, sender_rx_mem, &ack_only_callbacks)); + + // Receiver TX/RX + udpard_tx_t receiver_tx{}; + std::vector receiver_frames; + TEST_ASSERT_TRUE(udpard_tx_new(&receiver_tx, receiver_uid, 200, 64, receiver_tx_mem, &tx_vtable)); + receiver_tx.user = &receiver_frames; + receiver_tx.ack_baseline_timeout = ack_timeout; + + udpard_rx_t receiver_rx{}; + ReceiverContext receiver_ctx{}; + udpard_rx_new(&receiver_rx, &receiver_tx); + receiver_rx.user = &receiver_ctx; + + udpard_rx_port_t receiver_topic_port{}; + TEST_ASSERT_TRUE(udpard_rx_port_new( + &receiver_topic_port, topic_hash, 4096, udpard_rx_ordered, reordering_window, receiver_rx_mem, &topic_callbacks)); + + // Payloads + const std::array payload_a{ 0xAA, 0xAA, 0xAA, 0xAA }; + const std::array payload_b{ 0xBB, 0xBB, 0xBB, 0xBB }; + const std::array payload_c{ 0xCC, 0xCC, 0xCC, 0xCC }; + + // Feedback states + FeedbackState fb_a{}; + FeedbackState fb_b{}; + FeedbackState fb_c{}; + + udpard_us_t now = 0; + const udpard_us_t deadline = now + 2000000; // 2 second deadline + + // Step 1: Send transfer A that is delivered successfully (establishes the session baseline). + TEST_ASSERT_TRUE(udpard_tx_push(&sender_tx, + now, + deadline, + iface_bitmap_1, + udpard_prio_nominal, + topic_hash, + tid_a, + make_scattered(payload_a.data(), payload_a.size()), + &record_feedback, + make_user_context(&fb_a))); + + // Deliver A + sender_frames.clear(); + udpard_tx_poll(&sender_tx, now, UDPARD_IFACE_BITMAP_ALL); + for (const auto& frame : sender_frames) { + TEST_ASSERT_TRUE(udpard_rx_port_push(&receiver_rx, + &receiver_topic_port, + now, + sender_source, + frame.datagram, + tx_payload_deleter, + frame.iface_index)); + } + sender_frames.clear(); + udpard_rx_poll(&receiver_rx, now); + + // Deliver A's ACK back to sender + receiver_frames.clear(); + udpard_tx_poll(&receiver_tx, now, UDPARD_IFACE_BITMAP_ALL); + for (const auto& frame : receiver_frames) { + TEST_ASSERT_TRUE(udpard_rx_port_push( + &sender_rx, &sender_p2p_port, now, receiver_source, frame.datagram, tx_payload_deleter, frame.iface_index)); + } + receiver_frames.clear(); + udpard_rx_poll(&sender_rx, now); + + // Verify A was received + TEST_ASSERT_EQUAL_size_t(1, receiver_ctx.received_transfer_ids.size()); + TEST_ASSERT_EQUAL_UINT64(tid_a, receiver_ctx.received_transfer_ids[0]); + TEST_ASSERT_EQUAL_size_t(1, fb_a.count); + TEST_ASSERT_EQUAL_UINT32(1, fb_a.acknowledgements); + + // Step 2: Push transfers B and C + TEST_ASSERT_TRUE(udpard_tx_push(&sender_tx, + now, + deadline, + iface_bitmap_1, + udpard_prio_nominal, + topic_hash, + tid_b, + make_scattered(payload_b.data(), payload_b.size()), + &record_feedback, + make_user_context(&fb_b))); + + TEST_ASSERT_TRUE(udpard_tx_push(&sender_tx, + now, + deadline, + iface_bitmap_1, + udpard_prio_nominal, + topic_hash, + tid_c, + make_scattered(payload_c.data(), payload_c.size()), + &record_feedback, + make_user_context(&fb_c))); + + // Simulation state tracking + bool b_first_tx_dropped = false; + bool c_first_tx_done = false; + bool b_first_ack_dropped = false; + bool c_first_ack_dropped = false; + size_t iterations = 0; + constexpr size_t max_iterations = 100; + + // Main simulation loop + while (iterations < max_iterations) { + iterations++; + + // Sender transmits frames + sender_frames.clear(); + udpard_tx_poll(&sender_tx, now, UDPARD_IFACE_BITMAP_ALL); + + for (const auto& frame : sender_frames) { + const uint64_t tid = extract_transfer_id(frame.datagram); + + // First transmission of B is lost + if ((tid == tid_b) && !b_first_tx_dropped) { + b_first_tx_dropped = true; + drop_frame(frame); + continue; + } + + // Track first transmission of C + if ((tid == tid_c) && !c_first_tx_done) { + c_first_tx_done = true; + } + + // Deliver frame to receiver + TEST_ASSERT_TRUE(udpard_rx_port_push(&receiver_rx, + &receiver_topic_port, + now, + sender_source, + frame.datagram, + tx_payload_deleter, + frame.iface_index)); + } + sender_frames.clear(); + udpard_rx_poll(&receiver_rx, now); + + // Receiver transmits ACKs + receiver_frames.clear(); + udpard_tx_poll(&receiver_tx, now, UDPARD_IFACE_BITMAP_ALL); + + for (const auto& frame : receiver_frames) { + const uint64_t acked_tid = extract_acked_transfer_id(frame.datagram); + + // First ACK for B is lost + if ((acked_tid == tid_b) && !b_first_ack_dropped) { + b_first_ack_dropped = true; + drop_frame(frame); + continue; + } + + // First ACK for C is lost + if ((acked_tid == tid_c) && !c_first_ack_dropped) { + c_first_ack_dropped = true; + drop_frame(frame); + continue; + } + + // Deliver ACK to sender + TEST_ASSERT_TRUE(udpard_rx_port_push(&sender_rx, + &sender_p2p_port, + now, + receiver_source, + frame.datagram, + tx_payload_deleter, + frame.iface_index)); + } + receiver_frames.clear(); + udpard_rx_poll(&sender_rx, now); + + // Check termination condition: both B and C feedbacks received + if ((fb_b.count > 0) && (fb_c.count > 0)) { + break; + } + + // Advance time to trigger retransmission (2x baseline timeout) + now += ack_timeout * 2; + } + + // Wait for reordering window to close and eject pending transfers + now += reordering_window + 10000; + udpard_rx_poll(&receiver_rx, now); + + // Verify the simulation exercised all loss paths + TEST_ASSERT_TRUE(b_first_tx_dropped); + TEST_ASSERT_TRUE(c_first_tx_done); + TEST_ASSERT_TRUE(b_first_ack_dropped); + TEST_ASSERT_TRUE(c_first_ack_dropped); + TEST_ASSERT_LESS_THAN_size_t(max_iterations, iterations); + + // Verify sender received ACKs for all transfers + TEST_ASSERT_EQUAL_size_t(1, fb_b.count); + TEST_ASSERT_EQUAL_UINT32(1, fb_b.acknowledgements); + + TEST_ASSERT_EQUAL_size_t(1, fb_c.count); + TEST_ASSERT_EQUAL_UINT32(1, fb_c.acknowledgements); + + // CRITICAL: Verify receiver got exactly 3 transfers in correct order: A, B, then C + // This validates that ORDERED mode correctly reorders out-of-order arrivals. + TEST_ASSERT_EQUAL_size_t(3, receiver_ctx.received_transfer_ids.size()); + TEST_ASSERT_EQUAL_UINT64(tid_a, receiver_ctx.received_transfer_ids[0]); + TEST_ASSERT_EQUAL_UINT64(tid_b, receiver_ctx.received_transfer_ids[1]); + TEST_ASSERT_EQUAL_UINT64(tid_c, receiver_ctx.received_transfer_ids[2]); + + // Cleanup + udpard_rx_port_free(&receiver_rx, &receiver_topic_port); + udpard_rx_port_free(&sender_rx, &sender_p2p_port); + udpard_tx_free(&sender_tx); + udpard_tx_free(&receiver_tx); + + TEST_ASSERT_EQUAL_size_t(0, sender_tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, sender_tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, sender_rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, sender_rx_alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, receiver_tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, receiver_tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, receiver_rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, receiver_rx_alloc_session.allocated_fragments); + + instrumented_allocator_reset(&sender_tx_alloc_transfer); + instrumented_allocator_reset(&sender_tx_alloc_payload); + instrumented_allocator_reset(&sender_rx_alloc_frag); + instrumented_allocator_reset(&sender_rx_alloc_session); + instrumented_allocator_reset(&receiver_tx_alloc_transfer); + instrumented_allocator_reset(&receiver_tx_alloc_payload); + instrumented_allocator_reset(&receiver_rx_alloc_frag); + instrumented_allocator_reset(&receiver_rx_alloc_session); +} + +} // namespace + +extern "C" void setUp() {} + +extern "C" void tearDown() {} + +int main() +{ + UNITY_BEGIN(); + RUN_TEST(test_reliable_ordered_with_loss_and_reordering); + return UNITY_END(); +} diff --git a/tests/src/test_e2e_responses.cpp b/tests/src/test_e2e_responses.cpp new file mode 100644 index 0000000..708a269 --- /dev/null +++ b/tests/src/test_e2e_responses.cpp @@ -0,0 +1,784 @@ +/// This software is distributed under the terms of the MIT License. +/// Copyright (C) OpenCyphal Development Team +/// Copyright Amazon.com Inc. or its affiliates. +/// SPDX-License-Identifier: MIT + +#include +#include "helpers.h" +#include +#include +#include +#include + +namespace { + +// -------------------------------------------------------------------------------------------------------------------- +// COMMON INFRASTRUCTURE +// -------------------------------------------------------------------------------------------------------------------- + +struct CapturedFrame +{ + udpard_bytes_mut_t datagram; + uint_fast8_t iface_index; +}; + +void tx_refcount_free(void* const user, const size_t size, void* const payload) +{ + (void)user; + udpard_tx_refcount_dec(udpard_bytes_t{ .size = size, .data = payload }); +} + +bool capture_tx_frame_impl(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) +{ + auto* frames = static_cast*>(tx->user); + if (frames == nullptr) { + return false; + } + udpard_tx_refcount_inc(ejection->datagram); + void* const data = const_cast(ejection->datagram.data); // NOLINT + frames->push_back(CapturedFrame{ .datagram = { .size = ejection->datagram.size, .data = data }, + .iface_index = ejection->iface_index }); + return true; +} + +bool capture_tx_frame_subject(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) +{ + return capture_tx_frame_impl(tx, ejection); +} + +bool capture_tx_frame_p2p(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection, udpard_udpip_ep_t /*dest*/) +{ + return capture_tx_frame_impl(tx, ejection); +} + +void drop_frame(const CapturedFrame& frame) +{ + udpard_tx_refcount_dec(udpard_bytes_t{ .size = frame.datagram.size, .data = frame.datagram.data }); +} + +constexpr udpard_tx_vtable_t tx_vtable{ .eject_subject = &capture_tx_frame_subject, + .eject_p2p = &capture_tx_frame_p2p }; +// Shared deleter for captured TX frames. +constexpr udpard_deleter_vtable_t tx_refcount_deleter_vt{ .free = &tx_refcount_free }; +constexpr udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr }; + +// Check the ACK flag in the Cyphal/UDP header. +constexpr size_t HeaderSizeBytes = 48U; +bool is_ack_frame(const udpard_bytes_mut_t& datagram) +{ + if (datagram.size < HeaderSizeBytes) { + return false; + } + const auto* p = static_cast(datagram.data); + return (p[1] & 0x02U) != 0U; +} + +// -------------------------------------------------------------------------------------------------------------------- +// FEEDBACK AND CONTEXT STRUCTURES +// -------------------------------------------------------------------------------------------------------------------- + +struct FeedbackState +{ + size_t count = 0; + uint16_t acknowledgements = 0; +}; + +void record_feedback(udpard_tx_t*, const udpard_tx_feedback_t fb) +{ + auto* st = static_cast(fb.user.ptr[0]); + if (st != nullptr) { + st->count++; + st->acknowledgements = fb.acknowledgements; + } +} + +struct NodeBTopicContext +{ + std::vector received_payload; + std::array sender_sources{}; + uint64_t sender_uid = 0; + uint64_t received_topic = 0; + uint64_t received_tid = 0; + size_t message_count = 0; +}; + +struct NodeAResponseContext +{ + std::vector received_response; + uint64_t transfer_id = 0; + size_t response_count = 0; +}; + +// Combined context for a node's RX instance +struct NodeContext +{ + NodeBTopicContext* topic_ctx = nullptr; + NodeAResponseContext* response_ctx = nullptr; +}; + +// -------------------------------------------------------------------------------------------------------------------- +// CALLBACK IMPLEMENTATIONS +// -------------------------------------------------------------------------------------------------------------------- + +// Node B's message reception callback - receives the topic message from A +void node_b_on_topic_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) +{ + auto* node_ctx = static_cast(rx->user); + auto* ctx = node_ctx->topic_ctx; + if (ctx == nullptr) { + udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); + return; + } + ctx->message_count++; + ctx->sender_uid = transfer.remote.uid; + ctx->sender_sources = {}; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + ctx->sender_sources[i] = transfer.remote.endpoints[i]; + } + ctx->received_topic = port->topic_hash; + ctx->received_tid = transfer.transfer_id; + + ctx->received_payload.resize(transfer.payload_size_stored); + const udpard_fragment_t* cursor = transfer.payload; + (void)udpard_fragment_gather(&cursor, 0, transfer.payload_size_stored, ctx->received_payload.data()); + + udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); +} + +void on_collision(udpard_rx_t* const, udpard_rx_port_t* const, const udpard_remote_t) {} + +constexpr udpard_rx_port_vtable_t topic_callbacks{ .on_message = &node_b_on_topic_message, + .on_collision = &on_collision }; + +// Node A's P2P response reception callback - receives the response from B +void node_a_on_p2p_response(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) +{ + auto* node_ctx = static_cast(rx->user); + auto* ctx = node_ctx->response_ctx; + if (ctx == nullptr) { + udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); + return; + } + ctx->response_count++; + ctx->transfer_id = transfer.transfer_id; + + ctx->received_response.resize(transfer.payload_size_stored); + const udpard_fragment_t* cursor = transfer.payload; + (void)udpard_fragment_gather(&cursor, 0, transfer.payload_size_stored, ctx->received_response.data()); + + udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); +} + +constexpr udpard_rx_port_vtable_t p2p_response_callbacks{ .on_message = &node_a_on_p2p_response, + .on_collision = &on_collision }; + +// ACK-only P2P port callback (for receiving ACKs, which have no user payload) +void on_ack_only(udpard_rx_t*, udpard_rx_port_t* port, const udpard_rx_transfer_t tr) +{ + udpard_fragment_free_all(tr.payload, udpard_make_deleter(port->memory.fragment)); +} + +constexpr udpard_rx_port_vtable_t ack_only_callbacks{ .on_message = &on_ack_only, .on_collision = &on_collision }; + +// -------------------------------------------------------------------------------------------------------------------- +// TEST: Basic topic message with P2P response flow +// -------------------------------------------------------------------------------------------------------------------- + +/// Node A publishes a reliable topic message, Node B receives it and sends a reliable P2P response. +/// Both nodes verify that their delivery callbacks are correctly invoked. +/// Each node uses exactly one TX and one RX instance. +void test_topic_with_p2p_response() +{ + seed_prng(); + + // ================================================================================================================ + // ALLOCATORS - One TX and one RX per node + // ================================================================================================================ + instrumented_allocator_t a_tx_alloc_transfer{}; + instrumented_allocator_t a_tx_alloc_payload{}; + instrumented_allocator_t a_rx_alloc_frag{}; + instrumented_allocator_t a_rx_alloc_session{}; + instrumented_allocator_new(&a_tx_alloc_transfer); + instrumented_allocator_new(&a_tx_alloc_payload); + instrumented_allocator_new(&a_rx_alloc_frag); + instrumented_allocator_new(&a_rx_alloc_session); + + instrumented_allocator_t b_tx_alloc_transfer{}; + instrumented_allocator_t b_tx_alloc_payload{}; + instrumented_allocator_t b_rx_alloc_frag{}; + instrumented_allocator_t b_rx_alloc_session{}; + instrumented_allocator_new(&b_tx_alloc_transfer); + instrumented_allocator_new(&b_tx_alloc_payload); + instrumented_allocator_new(&b_rx_alloc_frag); + instrumented_allocator_new(&b_rx_alloc_session); + + // ================================================================================================================ + // MEMORY RESOURCES + // ================================================================================================================ + udpard_tx_mem_resources_t a_tx_mem{}; + a_tx_mem.transfer = instrumented_allocator_make_resource(&a_tx_alloc_transfer); + for (auto& res : a_tx_mem.payload) { + res = instrumented_allocator_make_resource(&a_tx_alloc_payload); + } + const udpard_rx_mem_resources_t a_rx_mem{ .session = instrumented_allocator_make_resource(&a_rx_alloc_session), + .fragment = instrumented_allocator_make_resource(&a_rx_alloc_frag) }; + + udpard_tx_mem_resources_t b_tx_mem{}; + b_tx_mem.transfer = instrumented_allocator_make_resource(&b_tx_alloc_transfer); + for (auto& res : b_tx_mem.payload) { + res = instrumented_allocator_make_resource(&b_tx_alloc_payload); + } + const udpard_rx_mem_resources_t b_rx_mem{ .session = instrumented_allocator_make_resource(&b_rx_alloc_session), + .fragment = instrumented_allocator_make_resource(&b_rx_alloc_frag) }; + + // ================================================================================================================ + // NODE UIDs AND ENDPOINTS + // ================================================================================================================ + constexpr uint64_t node_a_uid = 0xAAAA1111BBBB2222ULL; + constexpr uint64_t node_b_uid = 0xCCCC3333DDDD4444ULL; + + const std::array node_a_sources{ + udpard_udpip_ep_t{ .ip = 0x0A000001U, .port = 7400U }, + udpard_udpip_ep_t{ .ip = 0x0A000002U, .port = 7401U }, + udpard_udpip_ep_t{ .ip = 0x0A000003U, .port = 7402U }, + }; + const std::array node_b_sources{ + udpard_udpip_ep_t{ .ip = 0x0A000011U, .port = 7500U }, + udpard_udpip_ep_t{ .ip = 0x0A000012U, .port = 7501U }, + udpard_udpip_ep_t{ .ip = 0x0A000013U, .port = 7502U }, + }; + + constexpr uint64_t topic_hash = 0x0123456789ABCDEFULL; + constexpr uint64_t transfer_id = 42; + + // ================================================================================================================ + // TX/RX PIPELINES - One TX and one RX per node + // ================================================================================================================ + // Node A: single TX, single RX (linked to TX for ACK processing) + udpard_tx_t a_tx{}; + std::vector a_frames; + TEST_ASSERT_TRUE(udpard_tx_new(&a_tx, node_a_uid, 100, 64, a_tx_mem, &tx_vtable)); + a_tx.user = &a_frames; + a_tx.ack_baseline_timeout = 10000; + + udpard_rx_t a_rx{}; + udpard_rx_new(&a_rx, &a_tx); + NodeAResponseContext a_response_ctx{}; + NodeContext a_node_ctx{ .topic_ctx = nullptr, .response_ctx = &a_response_ctx }; + a_rx.user = &a_node_ctx; + + // A's P2P port for receiving responses and ACKs + udpard_rx_port_t a_p2p_port{}; + TEST_ASSERT_TRUE( + udpard_rx_port_new(&a_p2p_port, node_a_uid, 4096, udpard_rx_unordered, 0, a_rx_mem, &p2p_response_callbacks)); + + // Node B: single TX, single RX (linked to TX for ACK processing) + udpard_tx_t b_tx{}; + std::vector b_frames; + TEST_ASSERT_TRUE(udpard_tx_new(&b_tx, node_b_uid, 200, 64, b_tx_mem, &tx_vtable)); + b_tx.user = &b_frames; + b_tx.ack_baseline_timeout = 10000; + + udpard_rx_t b_rx{}; + udpard_rx_new(&b_rx, &b_tx); + NodeBTopicContext b_topic_ctx{}; + NodeContext b_node_ctx{ .topic_ctx = &b_topic_ctx, .response_ctx = nullptr }; + b_rx.user = &b_node_ctx; + + // B's topic subscription port + udpard_rx_port_t b_topic_port{}; + TEST_ASSERT_TRUE( + udpard_rx_port_new(&b_topic_port, topic_hash, 4096, udpard_rx_unordered, 0, b_rx_mem, &topic_callbacks)); + + // B's P2P port for receiving response ACKs + udpard_rx_port_t b_p2p_port{}; + TEST_ASSERT_TRUE( + udpard_rx_port_new(&b_p2p_port, node_b_uid, 16, udpard_rx_unordered, 0, b_rx_mem, &ack_only_callbacks)); + + // ================================================================================================================ + // PAYLOADS AND FEEDBACK STATES + // ================================================================================================================ + const std::vector topic_payload = { 0x01, 0x02, 0x03, 0x04, 0x05 }; + const std::vector response_payload = { 0xAA, 0xBB, 0xCC, 0xDD }; + const udpard_bytes_scattered_t topic_payload_scat = make_scattered(topic_payload.data(), topic_payload.size()); + + FeedbackState a_topic_fb{}; + FeedbackState b_response_fb{}; + + // ================================================================================================================ + // STEP 1: Node A publishes a reliable topic message + // ================================================================================================================ + udpard_us_t now = 0; + constexpr uint16_t iface_bitmap_1 = (1U << 0U); + TEST_ASSERT_TRUE(udpard_tx_push(&a_tx, + now, + now + 1000000, + iface_bitmap_1, + udpard_prio_nominal, + topic_hash, + transfer_id, + topic_payload_scat, + &record_feedback, + make_user_context(&a_topic_fb))); + a_frames.clear(); + udpard_tx_poll(&a_tx, now, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_FALSE(a_frames.empty()); + + // ================================================================================================================ + // STEP 2: Deliver topic message to Node B + // ================================================================================================================ + for (const auto& frame : a_frames) { + TEST_ASSERT_TRUE(udpard_rx_port_push(&b_rx, + &b_topic_port, + now, + node_a_sources[frame.iface_index], + frame.datagram, + tx_payload_deleter, + frame.iface_index)); + } + udpard_rx_poll(&b_rx, now); + a_frames.clear(); + + // Verify B received the message + TEST_ASSERT_EQUAL_size_t(1, b_topic_ctx.message_count); + TEST_ASSERT_EQUAL_UINT64(node_a_uid, b_topic_ctx.sender_uid); + TEST_ASSERT_EQUAL_size_t(topic_payload.size(), b_topic_ctx.received_payload.size()); + TEST_ASSERT_EQUAL_MEMORY(topic_payload.data(), b_topic_ctx.received_payload.data(), topic_payload.size()); + + // ================================================================================================================ + // STEP 3: Node B sends ACK back to A (for the topic message) - via b_tx since b_rx is linked to it + // ================================================================================================================ + b_frames.clear(); + udpard_tx_poll(&b_tx, now, UDPARD_IFACE_BITMAP_ALL); + + // Deliver ACK frames to A + for (const auto& frame : b_frames) { + TEST_ASSERT_TRUE(udpard_rx_port_push(&a_rx, + &a_p2p_port, + now, + node_b_sources[frame.iface_index], + frame.datagram, + tx_payload_deleter, + frame.iface_index)); + } + udpard_rx_poll(&a_rx, now); + b_frames.clear(); + + // Now A should have received the ACK - poll to process feedback + now += 100; + udpard_tx_poll(&a_tx, now, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_EQUAL_size_t(1, a_topic_fb.count); + TEST_ASSERT_EQUAL_UINT32(1, a_topic_fb.acknowledgements); + + // ================================================================================================================ + // STEP 4: Node B sends a reliable P2P response to A + // ================================================================================================================ + udpard_remote_t remote_a{}; + remote_a.uid = b_topic_ctx.sender_uid; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + remote_a.endpoints[i] = node_a_sources[i]; + } + + const udpard_bytes_scattered_t response_scat = make_scattered(response_payload.data(), response_payload.size()); + uint64_t b_response_tid = 0; + TEST_ASSERT_TRUE(udpard_tx_push_p2p(&b_tx, + now, + now + 1000000, + udpard_prio_nominal, + remote_a, + response_scat, + &record_feedback, + make_user_context(&b_response_fb), + &b_response_tid)); + + b_frames.clear(); + udpard_tx_poll(&b_tx, now, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_FALSE(b_frames.empty()); + + // Deliver response frames to A + for (const auto& frame : b_frames) { + TEST_ASSERT_TRUE(udpard_rx_port_push(&a_rx, + &a_p2p_port, + now, + node_b_sources[frame.iface_index], + frame.datagram, + tx_payload_deleter, + frame.iface_index)); + } + udpard_rx_poll(&a_rx, now); + b_frames.clear(); + + // Verify A received the response + TEST_ASSERT_EQUAL_size_t(1, a_response_ctx.response_count); + TEST_ASSERT_EQUAL_UINT64(b_response_tid, a_response_ctx.transfer_id); + TEST_ASSERT_EQUAL_size_t(response_payload.size(), a_response_ctx.received_response.size()); + TEST_ASSERT_EQUAL_MEMORY(response_payload.data(), a_response_ctx.received_response.data(), response_payload.size()); + + // ================================================================================================================ + // STEP 5: A sends ACK for the response back to B - via a_tx since a_rx is linked to it + // ================================================================================================================ + a_frames.clear(); + udpard_tx_poll(&a_tx, now, UDPARD_IFACE_BITMAP_ALL); + + // Deliver ACK frames to B + for (const auto& frame : a_frames) { + TEST_ASSERT_TRUE(udpard_rx_port_push(&b_rx, + &b_p2p_port, + now, + node_a_sources[frame.iface_index], + frame.datagram, + tx_payload_deleter, + frame.iface_index)); + } + udpard_rx_poll(&b_rx, now); + a_frames.clear(); + + // Now B should have received the ACK for the response + now += 100; + udpard_tx_poll(&b_tx, now, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_EQUAL_size_t(1, b_response_fb.count); + TEST_ASSERT_EQUAL_UINT32(1, b_response_fb.acknowledgements); + + // ================================================================================================================ + // CLEANUP + // ================================================================================================================ + udpard_rx_port_free(&b_rx, &b_topic_port); + udpard_rx_port_free(&b_rx, &b_p2p_port); + udpard_rx_port_free(&a_rx, &a_p2p_port); + udpard_tx_free(&a_tx); + udpard_tx_free(&b_tx); + + TEST_ASSERT_EQUAL_size_t(0, a_tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, a_tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, a_rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, a_rx_alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, b_tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, b_tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, b_rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, b_rx_alloc_session.allocated_fragments); + + instrumented_allocator_reset(&a_tx_alloc_transfer); + instrumented_allocator_reset(&a_tx_alloc_payload); + instrumented_allocator_reset(&a_rx_alloc_frag); + instrumented_allocator_reset(&a_rx_alloc_session); + instrumented_allocator_reset(&b_tx_alloc_transfer); + instrumented_allocator_reset(&b_tx_alloc_payload); + instrumented_allocator_reset(&b_rx_alloc_frag); + instrumented_allocator_reset(&b_rx_alloc_session); +} + +// -------------------------------------------------------------------------------------------------------------------- +// TEST: Topic message and response with simulated losses +// -------------------------------------------------------------------------------------------------------------------- + +/// Same as above, but with simulated packet loss on both the response and the response ACK. +/// Tests that reliable delivery works correctly with retransmissions. +/// Each node uses exactly one TX and one RX instance. +void test_topic_with_p2p_response_under_loss() +{ + seed_prng(); + + // ================================================================================================================ + // ALLOCATORS - One TX and one RX per node + // ================================================================================================================ + instrumented_allocator_t a_tx_alloc_transfer{}; + instrumented_allocator_t a_tx_alloc_payload{}; + instrumented_allocator_t a_rx_alloc_frag{}; + instrumented_allocator_t a_rx_alloc_session{}; + instrumented_allocator_new(&a_tx_alloc_transfer); + instrumented_allocator_new(&a_tx_alloc_payload); + instrumented_allocator_new(&a_rx_alloc_frag); + instrumented_allocator_new(&a_rx_alloc_session); + + instrumented_allocator_t b_tx_alloc_transfer{}; + instrumented_allocator_t b_tx_alloc_payload{}; + instrumented_allocator_t b_rx_alloc_frag{}; + instrumented_allocator_t b_rx_alloc_session{}; + instrumented_allocator_new(&b_tx_alloc_transfer); + instrumented_allocator_new(&b_tx_alloc_payload); + instrumented_allocator_new(&b_rx_alloc_frag); + instrumented_allocator_new(&b_rx_alloc_session); + + // ================================================================================================================ + // MEMORY RESOURCES + // ================================================================================================================ + udpard_tx_mem_resources_t a_tx_mem{}; + a_tx_mem.transfer = instrumented_allocator_make_resource(&a_tx_alloc_transfer); + for (auto& res : a_tx_mem.payload) { + res = instrumented_allocator_make_resource(&a_tx_alloc_payload); + } + const udpard_rx_mem_resources_t a_rx_mem{ .session = instrumented_allocator_make_resource(&a_rx_alloc_session), + .fragment = instrumented_allocator_make_resource(&a_rx_alloc_frag) }; + + udpard_tx_mem_resources_t b_tx_mem{}; + b_tx_mem.transfer = instrumented_allocator_make_resource(&b_tx_alloc_transfer); + for (auto& res : b_tx_mem.payload) { + res = instrumented_allocator_make_resource(&b_tx_alloc_payload); + } + const udpard_rx_mem_resources_t b_rx_mem{ .session = instrumented_allocator_make_resource(&b_rx_alloc_session), + .fragment = instrumented_allocator_make_resource(&b_rx_alloc_frag) }; + + // ================================================================================================================ + // NODE UIDs AND ENDPOINTS + // ================================================================================================================ + constexpr uint64_t node_a_uid = 0x1111AAAA2222BBBBULL; + constexpr uint64_t node_b_uid = 0x3333CCCC4444DDDDULL; + + const std::array node_a_sources{ + udpard_udpip_ep_t{ .ip = 0x0A000021U, .port = 8400U }, + udpard_udpip_ep_t{}, + udpard_udpip_ep_t{}, + }; + const std::array node_b_sources{ + udpard_udpip_ep_t{ .ip = 0x0A000031U, .port = 8500U }, + udpard_udpip_ep_t{}, + udpard_udpip_ep_t{}, + }; + + constexpr uint64_t topic_hash = 0xFEDCBA9876543210ULL; + constexpr uint64_t transfer_id = 99; + + // ================================================================================================================ + // TX/RX PIPELINES - One TX and one RX per node + // ================================================================================================================ + udpard_tx_t a_tx{}; + std::vector a_frames; + TEST_ASSERT_TRUE(udpard_tx_new(&a_tx, node_a_uid, 100, 64, a_tx_mem, &tx_vtable)); + a_tx.user = &a_frames; + a_tx.ack_baseline_timeout = 8000; + + udpard_rx_t a_rx{}; + udpard_rx_new(&a_rx, &a_tx); + NodeAResponseContext a_response_ctx{}; + NodeContext a_node_ctx{ .topic_ctx = nullptr, .response_ctx = &a_response_ctx }; + a_rx.user = &a_node_ctx; + + udpard_rx_port_t a_p2p_port{}; + TEST_ASSERT_TRUE( + udpard_rx_port_new(&a_p2p_port, node_a_uid, 4096, udpard_rx_unordered, 0, a_rx_mem, &p2p_response_callbacks)); + + udpard_tx_t b_tx{}; + std::vector b_frames; + TEST_ASSERT_TRUE(udpard_tx_new(&b_tx, node_b_uid, 200, 64, b_tx_mem, &tx_vtable)); + b_tx.user = &b_frames; + b_tx.ack_baseline_timeout = 8000; + + udpard_rx_t b_rx{}; + udpard_rx_new(&b_rx, &b_tx); + NodeBTopicContext b_topic_ctx{}; + NodeContext b_node_ctx{ .topic_ctx = &b_topic_ctx, .response_ctx = nullptr }; + b_rx.user = &b_node_ctx; + + udpard_rx_port_t b_topic_port{}; + TEST_ASSERT_TRUE( + udpard_rx_port_new(&b_topic_port, topic_hash, 4096, udpard_rx_unordered, 0, b_rx_mem, &topic_callbacks)); + + udpard_rx_port_t b_p2p_port{}; + TEST_ASSERT_TRUE( + udpard_rx_port_new(&b_p2p_port, node_b_uid, 16, udpard_rx_unordered, 0, b_rx_mem, &ack_only_callbacks)); + + // ================================================================================================================ + // PAYLOADS AND FEEDBACK STATES + // ================================================================================================================ + const std::vector topic_payload = { 0x10, 0x20, 0x30 }; + const std::vector response_payload = { 0xDE, 0xAD, 0xBE, 0xEF }; + const udpard_bytes_scattered_t topic_payload_scat = make_scattered(topic_payload.data(), topic_payload.size()); + + FeedbackState a_topic_fb{}; + FeedbackState b_response_fb{}; + + // ================================================================================================================ + // STEP 1: Node A publishes a reliable topic message + // ================================================================================================================ + udpard_us_t now = 0; + constexpr uint16_t iface_bitmap_1 = (1U << 0U); + TEST_ASSERT_TRUE(udpard_tx_push(&a_tx, + now, + now + 500000, + iface_bitmap_1, + udpard_prio_fast, + topic_hash, + transfer_id, + topic_payload_scat, + &record_feedback, + make_user_context(&a_topic_fb))); + + // ================================================================================================================ + // SIMULATION LOOP WITH LOSSES + // ================================================================================================================ + size_t iterations = 0; + constexpr size_t max_iterations = 30; + bool first_response_dropped = false; + bool first_resp_ack_dropped = false; + bool response_sent = false; + uint64_t b_response_tid = 0; + + while (iterations < max_iterations) { + iterations++; + + // --- Node A transmits (topic message, topic ACKs, or response ACKs) --- + a_frames.clear(); + udpard_tx_poll(&a_tx, now, UDPARD_IFACE_BITMAP_ALL); + + for (const auto& frame : a_frames) { + if (b_topic_ctx.message_count == 0) { + // Topic message frames go to B's topic port + (void)udpard_rx_port_push(&b_rx, + &b_topic_port, + now, + node_a_sources[frame.iface_index], + frame.datagram, + tx_payload_deleter, + frame.iface_index); + } else { + // Response ACK frames go to B's P2P port + if (!first_resp_ack_dropped && (a_response_ctx.response_count > 0) && (b_response_fb.count == 0)) { + first_resp_ack_dropped = true; + drop_frame(frame); + continue; + } + + (void)udpard_rx_port_push(&b_rx, + &b_p2p_port, + now, + node_a_sources[frame.iface_index], + frame.datagram, + tx_payload_deleter, + frame.iface_index); + } + } + a_frames.clear(); + udpard_rx_poll(&b_rx, now); + + // --- Node B transmits (topic ACKs first, before pushing response) --- + b_frames.clear(); + udpard_tx_poll(&b_tx, now, UDPARD_IFACE_BITMAP_ALL); + + // Deliver B's frames (topic ACKs) to A before pushing response + for (const auto& frame : b_frames) { + (void)udpard_rx_port_push(&a_rx, + &a_p2p_port, + now, + node_b_sources[frame.iface_index], + frame.datagram, + tx_payload_deleter, + frame.iface_index); + } + b_frames.clear(); + udpard_rx_poll(&a_rx, now); + + // --- If B received topic, send response --- + if ((b_topic_ctx.message_count > 0) && !response_sent) { + response_sent = true; + + udpard_remote_t remote_a{}; + remote_a.uid = b_topic_ctx.sender_uid; + remote_a.endpoints[0] = node_a_sources[0]; + + const udpard_bytes_scattered_t response_scat = + make_scattered(response_payload.data(), response_payload.size()); + TEST_ASSERT_TRUE(udpard_tx_push_p2p(&b_tx, + now, + now + 500000, + udpard_prio_fast, + remote_a, + response_scat, + &record_feedback, + make_user_context(&b_response_fb), + &b_response_tid)); + } + + // --- Node B transmits (responses) --- + b_frames.clear(); + udpard_tx_poll(&b_tx, now, UDPARD_IFACE_BITMAP_ALL); + + for (const auto& frame : b_frames) { + // Check if this frame is an ACK vs response. + const bool is_ack = is_ack_frame(frame.datagram); + + // Drop first response (non-ACK) to test retransmission. + if (!first_response_dropped && response_sent && !is_ack) { + first_response_dropped = true; + drop_frame(frame); + continue; + } + + (void)udpard_rx_port_push(&a_rx, + &a_p2p_port, + now, + node_b_sources[frame.iface_index], + frame.datagram, + tx_payload_deleter, + frame.iface_index); + } + b_frames.clear(); + udpard_rx_poll(&a_rx, now); + + // Check if both feedbacks have fired + if ((a_topic_fb.count > 0) && (b_response_fb.count > 0)) { + break; + } + + now += a_tx.ack_baseline_timeout + 5000; + } + + // ================================================================================================================ + // VERIFY + // ================================================================================================================ + TEST_ASSERT_LESS_THAN_size_t(max_iterations, iterations); + TEST_ASSERT_TRUE(first_response_dropped); + TEST_ASSERT_TRUE(first_resp_ack_dropped); + + TEST_ASSERT_EQUAL_size_t(1, a_topic_fb.count); + TEST_ASSERT_EQUAL_UINT32(1, a_topic_fb.acknowledgements); + + TEST_ASSERT_EQUAL_size_t(1, b_response_fb.count); + TEST_ASSERT_EQUAL_UINT32(1, b_response_fb.acknowledgements); + + TEST_ASSERT_GREATER_OR_EQUAL_size_t(1, b_topic_ctx.message_count); + TEST_ASSERT_EQUAL_size_t(1, a_response_ctx.response_count); + TEST_ASSERT_EQUAL_UINT64(b_response_tid, a_response_ctx.transfer_id); + TEST_ASSERT_EQUAL_size_t(response_payload.size(), a_response_ctx.received_response.size()); + TEST_ASSERT_EQUAL_MEMORY(response_payload.data(), a_response_ctx.received_response.data(), response_payload.size()); + + // ================================================================================================================ + // CLEANUP + // ================================================================================================================ + udpard_rx_port_free(&b_rx, &b_topic_port); + udpard_rx_port_free(&b_rx, &b_p2p_port); + udpard_rx_port_free(&a_rx, &a_p2p_port); + udpard_tx_free(&a_tx); + udpard_tx_free(&b_tx); + + TEST_ASSERT_EQUAL_size_t(0, a_tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, a_tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, a_rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, a_rx_alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, b_tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, b_tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, b_rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, b_rx_alloc_session.allocated_fragments); + + instrumented_allocator_reset(&a_tx_alloc_transfer); + instrumented_allocator_reset(&a_tx_alloc_payload); + instrumented_allocator_reset(&a_rx_alloc_frag); + instrumented_allocator_reset(&a_rx_alloc_session); + instrumented_allocator_reset(&b_tx_alloc_transfer); + instrumented_allocator_reset(&b_tx_alloc_payload); + instrumented_allocator_reset(&b_rx_alloc_frag); + instrumented_allocator_reset(&b_rx_alloc_session); +} + +} // namespace + +extern "C" void setUp() {} + +extern "C" void tearDown() {} + +int main() +{ + UNITY_BEGIN(); + RUN_TEST(test_topic_with_p2p_response); + RUN_TEST(test_topic_with_p2p_response_under_loss); + return UNITY_END(); +} diff --git a/tests/src/test_fragment.cpp b/tests/src/test_fragment.cpp new file mode 100644 index 0000000..f416f04 --- /dev/null +++ b/tests/src/test_fragment.cpp @@ -0,0 +1,325 @@ +/// This software is distributed under the terms of the MIT License. +/// Copyright (C) OpenCyphal Development Team +/// Copyright Amazon.com Inc. or its affiliates. +/// SPDX-License-Identifier: MIT + +#include +#include "helpers.h" +#include +#include +#include + +namespace { + +/// The data is copied. +udpard_fragment_t* make_test_fragment(const udpard_mem_t& fragment_memory, + const udpard_mem_t& payload_memory, + const udpard_deleter_t payload_deleter, + const size_t offset, + const size_t size, + const void* data) +{ + auto* frag = static_cast(mem_res_alloc(fragment_memory, sizeof(udpard_fragment_t))); + if (frag == nullptr) { + return nullptr; + } + void* payload_data = mem_res_alloc(payload_memory, size); + if (payload_data == nullptr) { + mem_res_free(fragment_memory, sizeof(udpard_fragment_t), frag); + return nullptr; + } + if (size > 0 && data != nullptr) { + std::memcpy(payload_data, data, size); + } + std::memset(frag, 0, sizeof(*frag)); + frag->view.data = payload_data; + frag->view.size = size; + frag->origin.data = payload_data; + frag->origin.size = size; + frag->offset = offset; + frag->payload_deleter = payload_deleter; + return frag; +} + +void test_udpard_fragment_seek() +{ + instrumented_allocator_t alloc_frag{}; + instrumented_allocator_new(&alloc_frag); + const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + + instrumented_allocator_t alloc_payload{}; + instrumented_allocator_new(&alloc_payload); + const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); + const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); + + // Test 1: Single fragment at offset 0 (root node). + // Note: udpard_fragment_seek() uses the index_offset tree structure internally, + // which can only be properly built by the RX pipeline. For public API testing, + // we can only test simple cases with manually constructed tree structures. + udpard_fragment_t* single = make_test_fragment(mem_frag, mem_payload, del_payload, 0, 5, "hello"); + TEST_ASSERT_NOT_NULL(single); + // Initialize the tree node to null (no parent, no children) - this makes it a standalone root + single->index_offset.up = nullptr; + single->index_offset.lr[0] = nullptr; + single->index_offset.lr[1] = nullptr; + single->index_offset.bf = 0; + + // Seek to offset 0 should return the fragment itself. + TEST_ASSERT_EQUAL_PTR(single, udpard_fragment_seek(single, 0)); + + // Seek within single fragment range [0-5). + TEST_ASSERT_EQUAL_PTR(single, udpard_fragment_seek(single, 0)); + TEST_ASSERT_EQUAL_PTR(single, udpard_fragment_seek(single, 1)); + TEST_ASSERT_EQUAL_PTR(single, udpard_fragment_seek(single, 2)); + TEST_ASSERT_EQUAL_PTR(single, udpard_fragment_seek(single, 4)); + + // Seek beyond single fragment should return NULL. + TEST_ASSERT_NULL(udpard_fragment_seek(single, 5)); + TEST_ASSERT_NULL(udpard_fragment_seek(single, 100)); + + // Cleanup. + mem_res_free(mem_payload, single->origin.size, single->origin.data); + mem_res_free(mem_frag, sizeof(udpard_fragment_t), single); + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); + + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_payload); + + // Test 2: Tree with root and child - to test the root-finding loop. + // Create a simple tree: root at offset 5, left child at offset 0, right child at offset 10 + udpard_fragment_t* root = make_test_fragment(mem_frag, mem_payload, del_payload, 5, 3, "mid"); + udpard_fragment_t* left = make_test_fragment(mem_frag, mem_payload, del_payload, 0, 3, "abc"); + udpard_fragment_t* right = make_test_fragment(mem_frag, mem_payload, del_payload, 10, 4, "wxyz"); + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT_NOT_NULL(left); + TEST_ASSERT_NOT_NULL(right); + + // Build tree structure: root has left and right children + root->index_offset.up = nullptr; // root has no parent + root->index_offset.lr[0] = &left->index_offset; // left child + root->index_offset.lr[1] = &right->index_offset; // right child + root->index_offset.bf = 0; + + left->index_offset.up = &root->index_offset; // parent is root + left->index_offset.lr[0] = nullptr; + left->index_offset.lr[1] = nullptr; + left->index_offset.bf = 0; + + right->index_offset.up = &root->index_offset; // parent is root + right->index_offset.lr[0] = nullptr; + right->index_offset.lr[1] = nullptr; + right->index_offset.bf = 0; + + // Test seeking from the left child (non-root) - should traverse up to root first. + // Seeking to offset 0 should find the left fragment. + TEST_ASSERT_EQUAL_PTR(left, udpard_fragment_seek(left, 0)); + TEST_ASSERT_EQUAL_PTR(left, udpard_fragment_seek(left, 1)); + TEST_ASSERT_EQUAL_PTR(left, udpard_fragment_seek(left, 2)); + + // Seeking from left child to middle fragment's range [5-8). + TEST_ASSERT_EQUAL_PTR(root, udpard_fragment_seek(left, 5)); + TEST_ASSERT_EQUAL_PTR(root, udpard_fragment_seek(left, 6)); + TEST_ASSERT_EQUAL_PTR(root, udpard_fragment_seek(left, 7)); + + // Seeking from right child (non-root) to its own range [10-14). + TEST_ASSERT_EQUAL_PTR(right, udpard_fragment_seek(right, 10)); + TEST_ASSERT_EQUAL_PTR(right, udpard_fragment_seek(right, 11)); + TEST_ASSERT_EQUAL_PTR(right, udpard_fragment_seek(right, 13)); + + // Seeking from right child back to left child - should traverse up to root first. + TEST_ASSERT_EQUAL_PTR(left, udpard_fragment_seek(right, 0)); + TEST_ASSERT_EQUAL_PTR(left, udpard_fragment_seek(right, 2)); + + // Seeking from any node to gaps should return NULL. + TEST_ASSERT_NULL(udpard_fragment_seek(left, 3)); // gap [3-5) + TEST_ASSERT_NULL(udpard_fragment_seek(root, 8)); // gap [8-10) + TEST_ASSERT_NULL(udpard_fragment_seek(right, 14)); // beyond all fragments + + // Cleanup. + mem_res_free(mem_payload, left->origin.size, left->origin.data); + mem_res_free(mem_frag, sizeof(udpard_fragment_t), left); + mem_res_free(mem_payload, root->origin.size, root->origin.data); + mem_res_free(mem_frag, sizeof(udpard_fragment_t), root); + mem_res_free(mem_payload, right->origin.size, right->origin.data); + mem_res_free(mem_frag, sizeof(udpard_fragment_t), right); + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); +} + +void test_udpard_fragment_gather() +{ + instrumented_allocator_t alloc_frag{}; + instrumented_allocator_new(&alloc_frag); + const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + + instrumented_allocator_t alloc_payload{}; + instrumented_allocator_new(&alloc_payload); + const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); + const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); + + // Test 1: NULL fragment returns 0. + char buf[100]; // NOLINT(*-avoid-c-arrays) + const udpard_fragment_t* null_frag = nullptr; + TEST_ASSERT_EQUAL_size_t(0, udpard_fragment_gather(&null_frag, 0, sizeof(buf), static_cast(buf))); + + // Test 2: NULL destination returns 0. + udpard_fragment_t* const single = make_test_fragment(mem_frag, mem_payload, del_payload, 0, 5, "hello"); + TEST_ASSERT_NOT_NULL(single); + single->index_offset.up = nullptr; + single->index_offset.lr[0] = nullptr; + single->index_offset.lr[1] = nullptr; + single->index_offset.bf = 0; + const udpard_fragment_t* cursor = single; + TEST_ASSERT_EQUAL_size_t(0, udpard_fragment_gather(&cursor, 0, sizeof(buf), nullptr)); + TEST_ASSERT_EQUAL_PTR(single, cursor); + + // Test 3: Single fragment - gather all. + (void)std::memset(static_cast(buf), 0, sizeof(buf)); + cursor = single; + TEST_ASSERT_EQUAL_size_t(5, udpard_fragment_gather(&cursor, 0, sizeof(buf), static_cast(buf))); + TEST_ASSERT_EQUAL_MEMORY("hello", buf, 5); + TEST_ASSERT_EQUAL_PTR(single, cursor); + + // Test 4: Single fragment - truncation (destination smaller than fragment). + (void)std::memset(static_cast(buf), 0, sizeof(buf)); + cursor = single; + TEST_ASSERT_EQUAL_size_t(3, udpard_fragment_gather(&cursor, 0, 3, static_cast(buf))); + TEST_ASSERT_EQUAL_MEMORY("hel", buf, 3); + TEST_ASSERT_EQUAL_PTR(single, cursor); + + // Test 5: Single fragment - offset into the payload. + (void)std::memset(static_cast(buf), 0, sizeof(buf)); + cursor = single; + TEST_ASSERT_EQUAL_size_t(2, udpard_fragment_gather(&cursor, 2, 2, static_cast(buf))); + TEST_ASSERT_EQUAL_MEMORY("ll", buf, 2); + TEST_ASSERT_EQUAL_PTR(single, cursor); + + // Cleanup single fragment. + mem_res_free(mem_payload, single->origin.size, single->origin.data); + mem_res_free(mem_frag, sizeof(udpard_fragment_t), single); + + // Test 6: Multiple fragments forming a tree. + // Create tree: root at offset 5 ("MID"), left at offset 0 ("ABCDE"), right at offset 8 ("WXYZ") + // Total payload when gathered: "ABCDE" + "MID" + "WXYZ" = "ABCDEMIDWXYZ" (12 bytes) + udpard_fragment_t* const root = make_test_fragment(mem_frag, mem_payload, del_payload, 5, 3, "MID"); + udpard_fragment_t* const left = make_test_fragment(mem_frag, mem_payload, del_payload, 0, 5, "ABCDE"); + udpard_fragment_t* const right = make_test_fragment(mem_frag, mem_payload, del_payload, 8, 4, "WXYZ"); + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT_NOT_NULL(left); + TEST_ASSERT_NOT_NULL(right); + + // Build tree structure. + root->index_offset.up = nullptr; + root->index_offset.lr[0] = &left->index_offset; + root->index_offset.lr[1] = &right->index_offset; + root->index_offset.bf = 0; + + left->index_offset.up = &root->index_offset; + left->index_offset.lr[0] = nullptr; + left->index_offset.lr[1] = nullptr; + left->index_offset.bf = 0; + + right->index_offset.up = &root->index_offset; + right->index_offset.lr[0] = nullptr; + right->index_offset.lr[1] = nullptr; + right->index_offset.bf = 0; + + // Gather from root - should collect all fragments in order. + (void)std::memset(static_cast(buf), 0, sizeof(buf)); + cursor = root; + TEST_ASSERT_EQUAL_size_t(12, udpard_fragment_gather(&cursor, 0, sizeof(buf), static_cast(buf))); + TEST_ASSERT_EQUAL_MEMORY("ABCDEMIDWXYZ", buf, 12); + TEST_ASSERT_EQUAL_PTR(right, cursor); + + // Gather from left child - should still collect all fragments (traverses to root first). + (void)std::memset(static_cast(buf), 0, sizeof(buf)); + cursor = left; + TEST_ASSERT_EQUAL_size_t(12, udpard_fragment_gather(&cursor, 0, sizeof(buf), static_cast(buf))); + TEST_ASSERT_EQUAL_MEMORY("ABCDEMIDWXYZ", buf, 12); + TEST_ASSERT_EQUAL_PTR(right, cursor); + + // Gather from right child - should still collect all fragments. + (void)std::memset(static_cast(buf), 0, sizeof(buf)); + cursor = right; + TEST_ASSERT_EQUAL_size_t(12, udpard_fragment_gather(&cursor, 0, sizeof(buf), static_cast(buf))); + TEST_ASSERT_EQUAL_MEMORY("ABCDEMIDWXYZ", buf, 12); + TEST_ASSERT_EQUAL_PTR(right, cursor); + + // Gather starting exactly at the end of the current cursor fragment. + (void)std::memset(static_cast(buf), 0, sizeof(buf)); + cursor = left; + TEST_ASSERT_EQUAL_size_t(7, udpard_fragment_gather(&cursor, 5, 7, static_cast(buf))); + TEST_ASSERT_EQUAL_MEMORY("MIDWXYZ", buf, 7); + TEST_ASSERT_EQUAL_PTR(right, cursor); + + // Test 7: Truncation with multiple fragments - buffer smaller than total. + (void)std::memset(static_cast(buf), 0, sizeof(buf)); + cursor = root; + TEST_ASSERT_EQUAL_size_t(7, udpard_fragment_gather(&cursor, 0, 7, static_cast(buf))); + TEST_ASSERT_EQUAL_MEMORY("ABCDEMI", buf, 7); + TEST_ASSERT_EQUAL_PTR(root, cursor); + + // Test 8: Truncation mid-fragment. + (void)std::memset(static_cast(buf), 0, sizeof(buf)); + cursor = root; + TEST_ASSERT_EQUAL_size_t(3, udpard_fragment_gather(&cursor, 0, 3, static_cast(buf))); + TEST_ASSERT_EQUAL_MEMORY("ABC", buf, 3); + TEST_ASSERT_EQUAL_PTR(left, cursor); + + // Test 9: Offset across fragments. + (void)std::memset(static_cast(buf), 0, sizeof(buf)); + cursor = root; + TEST_ASSERT_EQUAL_size_t(6, udpard_fragment_gather(&cursor, 2, 6, static_cast(buf))); + TEST_ASSERT_EQUAL_MEMORY("CDEMID", buf, 6); + TEST_ASSERT_EQUAL_PTR(root, cursor); + + // Test 10: Start on fragment boundary, span into next. + (void)std::memset(static_cast(buf), 0, sizeof(buf)); + cursor = root; + TEST_ASSERT_EQUAL_size_t(5, udpard_fragment_gather(&cursor, 5, 5, static_cast(buf))); + TEST_ASSERT_EQUAL_MEMORY("MIDWX", buf, 5); + TEST_ASSERT_EQUAL_PTR(right, cursor); + + // Test 11: Start inside last fragment with request beyond stored data. + (void)std::memset(static_cast(buf), 0, sizeof(buf)); + cursor = root; + TEST_ASSERT_EQUAL_size_t(3, udpard_fragment_gather(&cursor, 9, 10, static_cast(buf))); + TEST_ASSERT_EQUAL_MEMORY("XYZ", buf, 3); + TEST_ASSERT_EQUAL_PTR(right, cursor); + + // Test 12: Offset beyond available payload. + (void)std::memset(static_cast(buf), 0, sizeof(buf)); + cursor = root; + TEST_ASSERT_EQUAL_size_t(0, udpard_fragment_gather(&cursor, 100, sizeof(buf), static_cast(buf))); + TEST_ASSERT_EQUAL_PTR(root, cursor); + + // Test 13: Zero-size destination. + cursor = root; + TEST_ASSERT_EQUAL_size_t(0, udpard_fragment_gather(&cursor, 0, 0, static_cast(buf))); + TEST_ASSERT_EQUAL_PTR(root, cursor); + + // Cleanup. + mem_res_free(mem_payload, left->origin.size, left->origin.data); + mem_res_free(mem_frag, sizeof(udpard_fragment_t), left); + mem_res_free(mem_payload, root->origin.size, root->origin.data); + mem_res_free(mem_frag, sizeof(udpard_fragment_t), root); + mem_res_free(mem_payload, right->origin.size, right->origin.data); + mem_res_free(mem_frag, sizeof(udpard_fragment_t), right); + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); +} + +} // namespace + +extern "C" void setUp() {} + +extern "C" void tearDown() {} + +int main() +{ + UNITY_BEGIN(); + RUN_TEST(test_udpard_fragment_seek); + RUN_TEST(test_udpard_fragment_gather); + return UNITY_END(); +} diff --git a/tests/src/test_helpers.c b/tests/src/test_helpers.c index 0fe09ec..bfc6598 100644 --- a/tests/src/test_helpers.c +++ b/tests/src/test_helpers.c @@ -4,58 +4,80 @@ #include "helpers.h" #include -static void testInstrumentedAllocator(void) +static void test_instrumented_allocator(void) { - InstrumentedAllocator al; - instrumentedAllocatorNew(&al); + instrumented_allocator_t al; + instrumented_allocator_new(&al); TEST_ASSERT_EQUAL_size_t(0, al.allocated_fragments); TEST_ASSERT_EQUAL_size_t(SIZE_MAX, al.limit_bytes); + TEST_ASSERT_EQUAL_UINT64(0, al.count_alloc); + TEST_ASSERT_EQUAL_UINT64(0, al.count_free); - const struct UdpardMemoryResource resource = instrumentedAllocatorMakeMemoryResource(&al); + const udpard_mem_t resource = instrumented_allocator_make_resource(&al); - void* a = resource.allocate(resource.user_reference, 123); + void* a = mem_res_alloc(resource, 123); TEST_ASSERT_EQUAL_size_t(1, al.allocated_fragments); TEST_ASSERT_EQUAL_size_t(123, al.allocated_bytes); + TEST_ASSERT_EQUAL_UINT64(1, al.count_alloc); + TEST_ASSERT_EQUAL_UINT64(0, al.count_free); - void* b = resource.allocate(resource.user_reference, 456); + void* b = mem_res_alloc(resource, 456); TEST_ASSERT_EQUAL_size_t(2, al.allocated_fragments); TEST_ASSERT_EQUAL_size_t(579, al.allocated_bytes); + TEST_ASSERT_EQUAL_UINT64(2, al.count_alloc); + TEST_ASSERT_EQUAL_UINT64(0, al.count_free); al.limit_bytes = 600; al.limit_fragments = 2; - TEST_ASSERT_EQUAL_PTR(NULL, resource.allocate(resource.user_reference, 100)); + TEST_ASSERT_EQUAL_PTR(NULL, mem_res_alloc(resource, 100)); TEST_ASSERT_EQUAL_size_t(2, al.allocated_fragments); TEST_ASSERT_EQUAL_size_t(579, al.allocated_bytes); + TEST_ASSERT_EQUAL_UINT64(3, al.count_alloc); + TEST_ASSERT_EQUAL_UINT64(0, al.count_free); - TEST_ASSERT_EQUAL_PTR(NULL, resource.allocate(resource.user_reference, 21)); + TEST_ASSERT_EQUAL_PTR(NULL, mem_res_alloc(resource, 21)); TEST_ASSERT_EQUAL_size_t(2, al.allocated_fragments); TEST_ASSERT_EQUAL_size_t(579, al.allocated_bytes); + TEST_ASSERT_EQUAL_UINT64(4, al.count_alloc); + TEST_ASSERT_EQUAL_UINT64(0, al.count_free); al.limit_fragments = 4; - void* c = resource.allocate(resource.user_reference, 21); + void* c = mem_res_alloc(resource, 21); TEST_ASSERT_EQUAL_size_t(3, al.allocated_fragments); TEST_ASSERT_EQUAL_size_t(600, al.allocated_bytes); + TEST_ASSERT_EQUAL_UINT64(5, al.count_alloc); + TEST_ASSERT_EQUAL_UINT64(0, al.count_free); - resource.deallocate(resource.user_reference, 123, a); + mem_res_free(resource, 123, a); TEST_ASSERT_EQUAL_size_t(2, al.allocated_fragments); TEST_ASSERT_EQUAL_size_t(477, al.allocated_bytes); + TEST_ASSERT_EQUAL_UINT64(5, al.count_alloc); + TEST_ASSERT_EQUAL_UINT64(1, al.count_free); - void* d = resource.allocate(resource.user_reference, 100); + void* d = mem_res_alloc(resource, 100); TEST_ASSERT_EQUAL_size_t(3, al.allocated_fragments); TEST_ASSERT_EQUAL_size_t(577, al.allocated_bytes); + TEST_ASSERT_EQUAL_UINT64(6, al.count_alloc); + TEST_ASSERT_EQUAL_UINT64(1, al.count_free); - resource.deallocate(resource.user_reference, 21, c); + mem_res_free(resource, 21, c); TEST_ASSERT_EQUAL_size_t(2, al.allocated_fragments); TEST_ASSERT_EQUAL_size_t(556, al.allocated_bytes); + TEST_ASSERT_EQUAL_UINT64(6, al.count_alloc); + TEST_ASSERT_EQUAL_UINT64(2, al.count_free); - resource.deallocate(resource.user_reference, 100, d); + mem_res_free(resource, 100, d); TEST_ASSERT_EQUAL_size_t(1, al.allocated_fragments); TEST_ASSERT_EQUAL_size_t(456, al.allocated_bytes); + TEST_ASSERT_EQUAL_UINT64(6, al.count_alloc); + TEST_ASSERT_EQUAL_UINT64(3, al.count_free); - resource.deallocate(resource.user_reference, 456, b); + mem_res_free(resource, 456, b); TEST_ASSERT_EQUAL_size_t(0, al.allocated_fragments); TEST_ASSERT_EQUAL_size_t(0, al.allocated_bytes); + TEST_ASSERT_EQUAL_UINT64(6, al.count_alloc); + TEST_ASSERT_EQUAL_UINT64(4, al.count_free); } void setUp(void) {} @@ -65,6 +87,6 @@ void tearDown(void) {} int main(void) { UNITY_BEGIN(); - RUN_TEST(testInstrumentedAllocator); + RUN_TEST(test_instrumented_allocator); return UNITY_END(); } diff --git a/tests/src/test_integration_sockets.cpp b/tests/src/test_integration_sockets.cpp new file mode 100644 index 0000000..d9255bb --- /dev/null +++ b/tests/src/test_integration_sockets.cpp @@ -0,0 +1,703 @@ +/// This software is distributed under the terms of the MIT License. +/// Copyright (C) OpenCyphal Development Team +/// Copyright Amazon.com Inc. or its affiliates. +/// SPDX-License-Identifier: MIT +/// +/// Integration test that verifies end-to-end behavior with frame capture/injection, +/// random packet loss, and reordering simulation. + +#include +#include "helpers.h" +#include + +#include +#include +#include +#include +#include +#include + +namespace { + +// Brief network simulator with loss/reorder support. +class NetworkSimulator +{ + public: + NetworkSimulator(const double loss_rate, const bool enable_reorder, const uint32_t seed = 1U) + : loss_rate_(std::clamp(loss_rate, 0.0, 1.0)) + , enable_reorder_(enable_reorder) + , rng_(seed) + , drop_(loss_rate_) + { + } + + // Shuffle frames to simulate reordering. + template + void shuffle(std::vector& items) + { + if (enable_reorder_ && (items.size() > 1U)) { + std::shuffle(items.begin(), items.end(), rng_); + reordered_ = true; + } + } + + // Decide whether to drop; guarantee at least one drop if loss is enabled. + bool drop_next(const size_t frames_left) + { + bool drop = (loss_rate_ > 0.0) && drop_(rng_); + if ((!drop) && (loss_rate_ > 0.0) && (frames_left == 1U) && (dropped_ == 0U)) { + drop = true; + } + if (drop) { + dropped_++; + } + return drop; + } + + [[nodiscard]] size_t dropped() const { return dropped_; } + [[nodiscard]] bool reordered() const { return reordered_; } + + private: + double loss_rate_; + bool enable_reorder_; + std::mt19937 rng_; + std::bernoulli_distribution drop_; + size_t dropped_ = 0; + bool reordered_ = false; +}; + +// ===================================================================================================================== +// Test context for tracking received transfers +// ===================================================================================================================== + +struct ReceivedTransfer +{ + std::vector payload; + uint64_t transfer_id; + uint64_t topic_hash; + uint64_t remote_uid; + size_t payload_size_wire; +}; + +struct TestContext +{ + std::vector received_transfers; + size_t collisions = 0; +}; + +// ===================================================================================================================== +// Captured frame for TX ejection +// ===================================================================================================================== + +struct CapturedFrame +{ + std::vector data; + uint_fast8_t iface_index; +}; + +// ===================================================================================================================== +// Callbacks +// ===================================================================================================================== + +bool capture_frame_impl(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) +{ + auto* frames = static_cast*>(tx->user); + if (frames == nullptr) { + return false; + } + + CapturedFrame frame{}; + frame.data.assign(static_cast(ejection->datagram.data), + static_cast(ejection->datagram.data) + ejection->datagram.size); + frame.iface_index = ejection->iface_index; + frames->push_back(frame); + + return true; +} +bool capture_frame_subject(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) +{ + return capture_frame_impl(tx, ejection); +} +bool capture_frame_p2p(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection, udpard_udpip_ep_t /*dest*/) +{ + return capture_frame_impl(tx, ejection); +} + +constexpr udpard_tx_vtable_t tx_vtable{ .eject_subject = &capture_frame_subject, .eject_p2p = &capture_frame_p2p }; + +void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) +{ + auto* ctx = static_cast(rx->user); + if (ctx != nullptr) { + ReceivedTransfer rt{}; + rt.transfer_id = transfer.transfer_id; + rt.topic_hash = port->topic_hash; + rt.remote_uid = transfer.remote.uid; + rt.payload_size_wire = transfer.payload_size_wire; + + rt.payload.resize(transfer.payload_size_stored); + const udpard_fragment_t* cursor = transfer.payload; + (void)udpard_fragment_gather(&cursor, 0, transfer.payload_size_stored, rt.payload.data()); + + ctx->received_transfers.push_back(std::move(rt)); + } + + udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); +} + +void on_collision(udpard_rx_t* const rx, udpard_rx_port_t* const, const udpard_remote_t) +{ + auto* ctx = static_cast(rx->user); + if (ctx != nullptr) { + ctx->collisions++; + } +} + +constexpr udpard_rx_port_vtable_t rx_port_vtable{ .on_message = &on_message, .on_collision = &on_collision }; + +// ===================================================================================================================== +// Fixtures and helpers +// ===================================================================================================================== + +// Build a random payload of requested size. +std::vector make_payload(const size_t size) +{ + std::vector payload(size); + for (auto& byte : payload) { + byte = static_cast(rand() % 256); + } + return payload; +} + +// Simple TX owner that captures frames. +struct TxFixture +{ + instrumented_allocator_t transfer{}; + instrumented_allocator_t payload{}; + udpard_tx_mem_resources_t mem{}; + udpard_tx_t tx{}; + std::vector frames; + + void init(const uint64_t uid, const uint64_t timeout, const uint16_t mtu) + { + instrumented_allocator_new(&transfer); + instrumented_allocator_new(&payload); + + mem.transfer = instrumented_allocator_make_resource(&transfer); + for (auto& res : mem.payload) { + res = instrumented_allocator_make_resource(&payload); + } + + TEST_ASSERT_TRUE(udpard_tx_new(&tx, uid, timeout, mtu, mem, &tx_vtable)); + tx.user = &frames; + } + + void fini() + { + udpard_tx_free(&tx); + TEST_ASSERT_EQUAL_size_t(0, transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, payload.allocated_fragments); + } +}; + +// Simple RX owner with context. +struct RxFixture +{ + instrumented_allocator_t session{}; + instrumented_allocator_t fragment{}; + udpard_rx_mem_resources_t mem{}; + udpard_rx_t rx{}; + TestContext ctx{}; + + void init() + { + instrumented_allocator_new(&session); + instrumented_allocator_new(&fragment); + mem.session = instrumented_allocator_make_resource(&session); + mem.fragment = instrumented_allocator_make_resource(&fragment); + udpard_rx_new(&rx, nullptr); + rx.user = &ctx; + } + + void fini() const + { + TEST_ASSERT_EQUAL_size_t(0, session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, fragment.allocated_fragments); + } +}; + +// Create a subject port. +udpard_rx_port_t make_subject_port(const uint64_t topic_hash, const size_t extent, RxFixture& rx) +{ + udpard_rx_port_t port{}; + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, topic_hash, extent, udpard_rx_unordered, 0, rx.mem, &rx_port_vtable)); + return port; +} + +// ===================================================================================================================== +// Helper to deliver frames with optional loss/reorder. +void deliver_frames(std::vector frames, + udpard_rx_t* rx, + udpard_rx_port_t* port, + const udpard_rx_mem_resources_t& rx_mem, + const udpard_udpip_ep_t& src_ep, + udpard_us_t now, + NetworkSimulator* sim = nullptr) +{ + if (sim != nullptr) { + sim->shuffle(frames); + } + const size_t total = frames.size(); + for (size_t i = 0; i < total; i++) { + if ((sim != nullptr) && sim->drop_next(total - i)) { + now++; + continue; + } + + const auto& frame = frames[i]; + const udpard_deleter_t deleter{ .vtable = &rx_mem.fragment.vtable->base, .context = rx_mem.fragment.context }; + void* dgram = mem_res_alloc(rx_mem.fragment, frame.data.size()); + TEST_ASSERT_NOT_NULL(dgram); + std::memcpy(dgram, frame.data.data(), frame.data.size()); + + const udpard_bytes_mut_t dgram_view{ frame.data.size(), dgram }; + + TEST_ASSERT_TRUE(udpard_rx_port_push(rx, port, now, src_ep, dgram_view, deleter, frame.iface_index)); + now++; + } + udpard_rx_poll(rx, now); +} + +// ===================================================================================================================== +// Tests +// ===================================================================================================================== + +/// Basic single-frame transfer end-to-end +void test_single_frame_transfer() +{ + seed_prng(); + + constexpr uint64_t publisher_uid = 0x1111222233334444ULL; + constexpr uint64_t topic_hash = 0x0123456789ABCDEFULL; + constexpr uint64_t transfer_id = 42U; + + // Set up publisher. + TxFixture pub{}; + pub.init(publisher_uid, 100U, 256); + + // Set up subscriber. + RxFixture sub{}; + sub.init(); + udpard_rx_port_t sub_port = make_subject_port(topic_hash, 4096, sub); + + // Send a small payload. + const std::vector payload = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08 }; + const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); + + const udpard_us_t now = 1000000; + const udpard_us_t deadline = now + 1000000; + + TEST_ASSERT_TRUE(udpard_tx_push(&pub.tx, + now, + deadline, + 1U, // iface_bitmap: interface 0 only + udpard_prio_nominal, + topic_hash, + transfer_id, + payload_view, + nullptr, + UDPARD_USER_CONTEXT_NULL)); + + udpard_tx_poll(&pub.tx, now, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_EQUAL_size_t(1, pub.frames.size()); + + // Deliver frames to subscriber. + const udpard_udpip_ep_t src_ep{ .ip = 0x7F000001, .port = 12345 }; + deliver_frames(pub.frames, &sub.rx, &sub_port, sub.mem, src_ep, now); + + // Verify transfer. + TEST_ASSERT_EQUAL_size_t(1, sub.ctx.received_transfers.size()); + TEST_ASSERT_EQUAL_UINT64(transfer_id, sub.ctx.received_transfers[0].transfer_id); + TEST_ASSERT_EQUAL_UINT64(topic_hash, sub.ctx.received_transfers[0].topic_hash); + TEST_ASSERT_EQUAL_UINT64(publisher_uid, sub.ctx.received_transfers[0].remote_uid); + TEST_ASSERT_EQUAL_size_t(payload.size(), sub.ctx.received_transfers[0].payload.size()); + TEST_ASSERT_EQUAL_MEMORY(payload.data(), sub.ctx.received_transfers[0].payload.data(), payload.size()); + TEST_ASSERT_EQUAL_size_t(0, sub.ctx.collisions); + + // Cleanup. + udpard_rx_port_free(&sub.rx, &sub_port); + pub.fini(); + sub.fini(); +} + +/// Large multi-frame transfer end-to-end +void test_multi_frame_transfer() +{ + seed_prng(); + + constexpr uint64_t publisher_uid = 0x5555666677778888ULL; + constexpr uint64_t topic_hash = 0xFEDCBA9876543210ULL; + constexpr size_t payload_size = 50000; // Large enough to require many frames + + // Set up publisher. + TxFixture pub{}; + pub.init(publisher_uid, 200U, 512); + + // Set up subscriber. + RxFixture sub{}; + sub.init(); + udpard_rx_port_t sub_port = make_subject_port(topic_hash, payload_size + 1024, sub); + + // Generate random payload. + const std::vector payload = make_payload(payload_size); + const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); + + const udpard_us_t now = 1000000; + const udpard_us_t deadline = now + 5000000; + + TEST_ASSERT_TRUE(udpard_tx_push(&pub.tx, + now, + deadline, + 1U, // iface_bitmap + udpard_prio_nominal, + topic_hash, + 100, + payload_view, + nullptr, + UDPARD_USER_CONTEXT_NULL)); + + udpard_tx_poll(&pub.tx, now, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_TRUE(pub.frames.size() > 1U); + + // Deliver frames to subscriber. + const udpard_udpip_ep_t src_ep{ .ip = 0x7F000001, .port = 12345 }; + deliver_frames(pub.frames, &sub.rx, &sub_port, sub.mem, src_ep, now); + + // Verify full transfer. + TEST_ASSERT_EQUAL_size_t(1, sub.ctx.received_transfers.size()); + TEST_ASSERT_EQUAL_size_t(payload_size, sub.ctx.received_transfers[0].payload.size()); + TEST_ASSERT_EQUAL_MEMORY(payload.data(), sub.ctx.received_transfers[0].payload.data(), payload_size); + TEST_ASSERT_EQUAL_size_t(0, sub.ctx.collisions); + + // Cleanup. + udpard_rx_port_free(&sub.rx, &sub_port); + pub.fini(); + sub.fini(); +} + +/// Multi-frame transfer with random reordering +void test_multi_frame_with_reordering() +{ + seed_prng(); + + constexpr uint64_t publisher_uid = 0xABCDEF0123456789ULL; + constexpr uint64_t topic_hash = 0x1234ABCD5678EF00ULL; + constexpr size_t payload_size = 20000; + + NetworkSimulator sim(0.0, true, static_cast(rand())); // No loss, deterministic shuffle + + // Set up publisher. + TxFixture pub{}; + pub.init(publisher_uid, 300U, 256); + + // Set up subscriber. + RxFixture sub{}; + sub.init(); + udpard_rx_port_t sub_port = make_subject_port(topic_hash, payload_size + 1024, sub); + + // Generate random payload and send. + const std::vector payload = make_payload(payload_size); + const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); + + const udpard_us_t now = 1000000; + TEST_ASSERT_TRUE(udpard_tx_push(&pub.tx, + now, + now + 5000000, + 1U, // iface_bitmap + udpard_prio_nominal, + topic_hash, + 50, + payload_view, + nullptr, + UDPARD_USER_CONTEXT_NULL)); + + udpard_tx_poll(&pub.tx, now, UDPARD_IFACE_BITMAP_ALL); + + // Deliver reordered frames. + const udpard_udpip_ep_t src_ep{ .ip = 0x7F000001, .port = 12345 }; + deliver_frames(pub.frames, &sub.rx, &sub_port, sub.mem, src_ep, now, &sim); + + // Verify reordering recovery. + TEST_ASSERT_EQUAL_size_t(1, sub.ctx.received_transfers.size()); + TEST_ASSERT_EQUAL_size_t(payload_size, sub.ctx.received_transfers[0].payload.size()); + TEST_ASSERT_EQUAL_MEMORY(payload.data(), sub.ctx.received_transfers[0].payload.data(), payload_size); + TEST_ASSERT_TRUE((pub.frames.size() < 2U) || sim.reordered()); + TEST_ASSERT_EQUAL_size_t(0, sub.ctx.collisions); + + // Cleanup. + udpard_rx_port_free(&sub.rx, &sub_port); + pub.fini(); + sub.fini(); +} + +/// Multiple publishers sending to single subscriber +void test_multiple_publishers() +{ + seed_prng(); + + constexpr uint64_t topic_hash = 0x1234567890ABCDEFULL; + constexpr size_t num_publishers = 3; + constexpr size_t num_transfers_per_pub = 5; + constexpr size_t payload_size = 100; + + // Set up subscriber. + RxFixture sub{}; + sub.init(); + udpard_rx_port_t sub_port = make_subject_port(topic_hash, 1024, sub); + + // Set up publishers and send. + std::array publishers{}; + std::array>, num_publishers> expected_payloads{}; + + for (size_t i = 0; i < num_publishers; i++) { + const uint64_t uid = 0x1000000000000000ULL + i; + publishers[i].init(uid, static_cast(rand()), 256); + + for (size_t tid = 0; tid < num_transfers_per_pub; tid++) { + std::vector payload = make_payload(payload_size); + payload[0] = static_cast(i); + payload[1] = static_cast(tid); + expected_payloads[i].push_back(payload); + + const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); + const udpard_us_t now = + 1000000LL + (static_cast(i) * 10000LL) + (static_cast(tid) * 100LL); + const uint64_t transfer_id = (static_cast(i) * 1000ULL) + static_cast(tid); + + TEST_ASSERT_TRUE(udpard_tx_push(&publishers[i].tx, + now, + now + 1000000, + 1U, // iface_bitmap + udpard_prio_nominal, + topic_hash, + transfer_id, + payload_view, + nullptr, + UDPARD_USER_CONTEXT_NULL)); + + udpard_tx_poll(&publishers[i].tx, now, UDPARD_IFACE_BITMAP_ALL); + } + } + + // Deliver all frames in publisher order. + udpard_us_t now = 2000000; + for (size_t pub = 0; pub < num_publishers; pub++) { + const udpard_udpip_ep_t src_ep{ static_cast(0x7F000001U + pub), static_cast(12345U + pub) }; + deliver_frames(publishers[pub].frames, &sub.rx, &sub_port, sub.mem, src_ep, now); + now += publishers[pub].frames.size(); + } + + // Verify every transfer and payload. + const size_t expected_transfers = num_publishers * num_transfers_per_pub; + TEST_ASSERT_EQUAL_size_t(expected_transfers, sub.ctx.received_transfers.size()); + for (size_t i = 0; i < num_publishers; i++) { + const uint64_t uid = 0x1000000000000000ULL + i; + for (size_t tid = 0; tid < num_transfers_per_pub; tid++) { + const uint64_t transfer_id = (static_cast(i) * 1000ULL) + static_cast(tid); + const auto it = std::find_if( + sub.ctx.received_transfers.begin(), sub.ctx.received_transfers.end(), [=](const ReceivedTransfer& rt) { + return (rt.remote_uid == uid) && (rt.transfer_id == transfer_id); + }); + TEST_ASSERT_TRUE(it != sub.ctx.received_transfers.end()); + TEST_ASSERT_EQUAL_size_t(payload_size, it->payload.size()); + TEST_ASSERT_EQUAL_MEMORY(expected_payloads[i][tid].data(), it->payload.data(), payload_size); + } + } + TEST_ASSERT_EQUAL_size_t(0, sub.ctx.collisions); + + // Cleanup. + udpard_rx_port_free(&sub.rx, &sub_port); + for (auto& pub : publishers) { + pub.fini(); + } + sub.fini(); +} + +/// Multi-frame transfer with simulated packet loss (all frames except one lost = incomplete transfer) +void test_partial_frame_loss() +{ + seed_prng(); + + constexpr uint64_t publisher_uid = 0xDEADBEEFCAFEBABEULL; + constexpr uint64_t topic_hash = 0xABCDEF0123456789ULL; + constexpr size_t payload_size = 5000; // Multi-frame transfer + + NetworkSimulator sim(0.35, false, static_cast(rand())); // Ensure some loss + + // Set up publisher. + TxFixture pub{}; + pub.init(publisher_uid, 300U, 256); + + // Set up subscriber. + RxFixture sub{}; + sub.init(); + udpard_rx_port_t sub_port = make_subject_port(topic_hash, payload_size + 1024, sub); + + // Generate payload and send. + const std::vector payload = make_payload(payload_size); + const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); + + const udpard_us_t now = 1000000; + TEST_ASSERT_TRUE(udpard_tx_push(&pub.tx, + now, + now + 5000000, + 1U, // iface_bitmap + udpard_prio_nominal, + topic_hash, + 50, + payload_view, + nullptr, + UDPARD_USER_CONTEXT_NULL)); + + udpard_tx_poll(&pub.tx, now, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_TRUE(pub.frames.size() > 1U); + + // Deliver with packet loss. + const udpard_udpip_ep_t src_ep{ .ip = 0x7F000001, .port = 12345 }; + deliver_frames(pub.frames, &sub.rx, &sub_port, sub.mem, src_ep, now, &sim); + + // Verify incomplete transfer is dropped. + TEST_ASSERT_TRUE(sim.dropped() > 0U); + TEST_ASSERT_EQUAL_size_t(0, sub.ctx.received_transfers.size()); + TEST_ASSERT_EQUAL_size_t(0, sub.ctx.collisions); + + // Cleanup. + udpard_rx_port_free(&sub.rx, &sub_port); + pub.fini(); + sub.fini(); +} + +/// Test with all frames delivered - no loss (baseline for loss tests) +void test_no_loss_baseline() +{ + seed_prng(); + + constexpr uint64_t publisher_uid = 0xAAAABBBBCCCCDDDDULL; + constexpr uint64_t topic_hash = 0x9999888877776666ULL; + constexpr size_t payload_size = 10000; + + // Set up publisher. + TxFixture pub{}; + pub.init(publisher_uid, 400U, 256); + + // Set up subscriber. + RxFixture sub{}; + sub.init(); + udpard_rx_port_t sub_port = make_subject_port(topic_hash, payload_size + 1024, sub); + + // Generate payload and send. + const std::vector payload = make_payload(payload_size); + const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); + + const udpard_us_t now = 1000000; + TEST_ASSERT_TRUE(udpard_tx_push(&pub.tx, + now, + now + 5000000, + 1U, // iface_bitmap + udpard_prio_nominal, + topic_hash, + 75, + payload_view, + nullptr, + UDPARD_USER_CONTEXT_NULL)); + + udpard_tx_poll(&pub.tx, now, UDPARD_IFACE_BITMAP_ALL); + + // Deliver all frames. + const udpard_udpip_ep_t src_ep{ .ip = 0x7F000001, .port = 12345 }; + deliver_frames(pub.frames, &sub.rx, &sub_port, sub.mem, src_ep, now); + + // Verify success path. + TEST_ASSERT_EQUAL_size_t(1, sub.ctx.received_transfers.size()); + TEST_ASSERT_EQUAL_size_t(payload_size, sub.ctx.received_transfers[0].payload.size()); + TEST_ASSERT_EQUAL_MEMORY(payload.data(), sub.ctx.received_transfers[0].payload.data(), payload_size); + TEST_ASSERT_EQUAL_size_t(0, sub.ctx.collisions); + + // Cleanup. + udpard_rx_port_free(&sub.rx, &sub_port); + pub.fini(); + sub.fini(); +} + +/// Test with extent-based truncation +void test_extent_truncation() +{ + seed_prng(); + + constexpr uint64_t publisher_uid = 0x1234567890ABCDEFULL; + constexpr uint64_t topic_hash = 0xFEDCBA0987654321ULL; + constexpr size_t payload_size = 5000; + constexpr size_t extent = 1000; // Less than payload_size + + // Set up publisher. + TxFixture pub{}; + pub.init(publisher_uid, 500U, 256); + + // Set up subscriber with limited extent. + RxFixture sub{}; + sub.init(); + udpard_rx_port_t sub_port = make_subject_port(topic_hash, extent, sub); + + // Generate payload and send. + const std::vector payload = make_payload(payload_size); + const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); + + const udpard_us_t now = 1000000; + TEST_ASSERT_TRUE(udpard_tx_push(&pub.tx, + now, + now + 5000000, + 1U, // iface_bitmap + udpard_prio_nominal, + topic_hash, + 100, + payload_view, + nullptr, + UDPARD_USER_CONTEXT_NULL)); + + udpard_tx_poll(&pub.tx, now, UDPARD_IFACE_BITMAP_ALL); + + // Deliver all frames. + const udpard_udpip_ep_t src_ep{ .ip = 0x7F000001, .port = 12345 }; + deliver_frames(pub.frames, &sub.rx, &sub_port, sub.mem, src_ep, now); + + // Verify truncation. + TEST_ASSERT_EQUAL_size_t(1, sub.ctx.received_transfers.size()); + TEST_ASSERT_TRUE(sub.ctx.received_transfers[0].payload.size() <= extent + UDPARD_MTU_DEFAULT); + TEST_ASSERT_EQUAL_size_t(payload_size, sub.ctx.received_transfers[0].payload_size_wire); + TEST_ASSERT_EQUAL_MEMORY( + payload.data(), sub.ctx.received_transfers[0].payload.data(), sub.ctx.received_transfers[0].payload.size()); + TEST_ASSERT_EQUAL_size_t(0, sub.ctx.collisions); + + // Cleanup. + udpard_rx_port_free(&sub.rx, &sub_port); + pub.fini(); + sub.fini(); +} + +} // namespace + +extern "C" void setUp() {} +extern "C" void tearDown() {} + +int main() +{ + UNITY_BEGIN(); + RUN_TEST(test_single_frame_transfer); + RUN_TEST(test_multi_frame_transfer); + RUN_TEST(test_multi_frame_with_reordering); + RUN_TEST(test_multiple_publishers); + RUN_TEST(test_partial_frame_loss); + RUN_TEST(test_no_loss_baseline); + RUN_TEST(test_extent_truncation); + return UNITY_END(); +} diff --git a/tests/src/test_intrusive_crc.c b/tests/src/test_intrusive_crc.c deleted file mode 100644 index d871865..0000000 --- a/tests/src/test_intrusive_crc.c +++ /dev/null @@ -1,40 +0,0 @@ -/// This software is distributed under the terms of the MIT License. -/// Copyright (C) OpenCyphal Development Team -/// Copyright Amazon.com Inc. or its affiliates. -/// SPDX-License-Identifier: MIT - -#include // NOLINT(bugprone-suspicious-include) -#include - -static void testHeaderCRC(void) -{ - TEST_ASSERT_EQUAL_UINT16(0x29B1U, headerCRCCompute(9, "123456789")); -} - -static void testTransferCRC(void) -{ - uint32_t crc = transferCRCAdd(TRANSFER_CRC_INITIAL, 3, "123"); - crc = transferCRCAdd(crc, 6, "456789"); - TEST_ASSERT_EQUAL_UINT32(0x1CF96D7CUL, crc); - TEST_ASSERT_EQUAL_UINT32(0xE3069283UL, crc ^ TRANSFER_CRC_OUTPUT_XOR); - crc = transferCRCAdd(crc, - 4, - "\x83" // Least significant byte first. - "\x92" - "\x06" - "\xE3"); - TEST_ASSERT_EQUAL_UINT32(0xB798B438UL, crc); - TEST_ASSERT_EQUAL_UINT32(0x48674BC7UL, crc ^ TRANSFER_CRC_OUTPUT_XOR); -} - -void setUp(void) {} - -void tearDown(void) {} - -int main(void) -{ - UNITY_BEGIN(); - RUN_TEST(testHeaderCRC); - RUN_TEST(testTransferCRC); - return UNITY_END(); -} diff --git a/tests/src/test_intrusive_guards.c b/tests/src/test_intrusive_guards.c new file mode 100644 index 0000000..3a37e93 --- /dev/null +++ b/tests/src/test_intrusive_guards.c @@ -0,0 +1,296 @@ +/// This software is distributed under the terms of the MIT License. +/// Copyright (C) OpenCyphal Development Team +/// Copyright Amazon.com Inc. or its affiliates. +/// SPDX-License-Identifier: MIT + +#include // NOLINT(bugprone-suspicious-include) +#include + +// Minimal helpers to avoid heap use in guard paths. +static void free_noop(void* const user, const size_t size, void* const pointer) +{ + (void)user; + (void)size; + (void)pointer; +} + +static void* alloc_stub(void* const user, const size_t size) +{ + (void)size; + return (size > 0U) ? user : NULL; +} + +static void* alloc_alt(void* const user, const size_t size) +{ + (void)size; + return (byte_t*)user + 1; +} + +// Minimal vtables for guard-path allocators. +static const udpard_mem_vtable_t mem_vtable_stub = { .base = { .free = free_noop }, .alloc = alloc_stub }; +static const udpard_mem_vtable_t mem_vtable_alt = { .base = { .free = free_noop }, .alloc = alloc_alt }; +static const udpard_deleter_vtable_t deleter_vtable = { .free = free_noop }; + +static udpard_mem_t make_mem(void* const tag) +{ + const udpard_mem_t out = { .vtable = &mem_vtable_stub, .context = tag }; + return out; +} + +static bool eject_subject_stub(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) +{ + (void)tx; + (void)ejection; + return true; +} + +static bool eject_p2p_stub(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection, udpard_udpip_ep_t dest) +{ + (void)tx; + (void)ejection; + (void)dest; + return true; +} + +static void on_message_stub(udpard_rx_t* const rx, udpard_rx_port_t* const port, udpard_rx_transfer_t transfer) +{ + (void)rx; + (void)port; + (void)transfer; +} + +static void on_collision_stub(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_remote_t remote) +{ + (void)rx; + (void)port; + (void)remote; +} + +static void test_mem_endpoint_list_guards(void) +{ + // mem_same covers identical and divergent resources. + static char tag_a; + static char tag_b; + const udpard_mem_t mem_a = make_mem(&tag_a); + const udpard_mem_t mem_b = make_mem(&tag_b); + const udpard_mem_t mem_c = { .vtable = &mem_vtable_alt, .context = &tag_a }; + TEST_ASSERT_TRUE(mem_same(mem_a, mem_a)); + TEST_ASSERT_FALSE(mem_same(mem_a, mem_b)); + TEST_ASSERT_FALSE(mem_same(mem_a, mem_c)); + + // Endpoint validation handles invalid inputs. + TEST_ASSERT_TRUE(udpard_is_valid_endpoint((udpard_udpip_ep_t){ .ip = 1U, .port = UDP_PORT })); + TEST_ASSERT_FALSE(udpard_is_valid_endpoint((udpard_udpip_ep_t){ .ip = 0U, .port = UDP_PORT })); + TEST_ASSERT_FALSE(udpard_is_valid_endpoint((udpard_udpip_ep_t){ .ip = UINT32_MAX, .port = UDP_PORT })); + TEST_ASSERT_FALSE(udpard_is_valid_endpoint((udpard_udpip_ep_t){ .ip = 1U, .port = 0U })); + + // is_listed covers empty and populated state. + udpard_list_t list = { 0 }; + udpard_listed_t member = { 0 }; + TEST_ASSERT_FALSE(is_listed(&list, &member)); + enlist_head(&list, &member); + TEST_ASSERT_TRUE(is_listed(&list, &member)); + // is_listed returns true for non-head members too. + udpard_listed_t tail = { 0 }; + enlist_head(&list, &tail); + TEST_ASSERT_TRUE(is_listed(&list, &member)); + + // NULL endpoint list yields empty bitmap. + TEST_ASSERT_EQUAL_UINT16(0U, valid_ep_bitmap(NULL)); +} + +static void test_fragment_guards(void) +{ + // Null fragments return NULL paths cleanly. + TEST_ASSERT_NULL(udpard_fragment_seek(NULL, 0)); + TEST_ASSERT_NULL(udpard_fragment_next(NULL)); + + // Offsets past the end yield no data. + static const byte_t payload[] = { 1U, 2U }; + udpard_fragment_t frag = { .index_offset = { NULL, { NULL, NULL }, 0 }, + .offset = 4U, + .view = { .size = sizeof(payload), .data = payload }, + .origin = { .size = 0U, .data = NULL }, + .payload_deleter = { 0 } }; + const udpard_fragment_t* cursor = &frag; + byte_t out[2] = { 0 }; + TEST_ASSERT_NULL(udpard_fragment_seek(&frag, frag.offset + frag.view.size)); + TEST_ASSERT_EQUAL_UINT(0, udpard_fragment_gather(NULL, 0, 1, out)); + TEST_ASSERT_EQUAL_UINT(0, udpard_fragment_gather(&cursor, frag.offset + frag.view.size, 1, out)); + // Offsets inside yield the fragment. + TEST_ASSERT_EQUAL_PTR(&frag, udpard_fragment_seek(&frag, frag.offset)); +} + +static void test_header_guard(void) +{ + // Deserializer rejects missing payload pointers. + meta_t meta = { 0 }; + udpard_bytes_t payload; + uint32_t frame_index = 0; + uint32_t frame_offset = 0; + uint32_t prefix_crc = 0; + TEST_ASSERT_FALSE(header_deserialize((udpard_bytes_mut_t){ .size = HEADER_SIZE_BYTES, .data = NULL }, + &meta, + &frame_index, + &frame_offset, + &prefix_crc, + &payload)); +} + +static void test_tx_guards(void) +{ + // Prepare reusable TX resources. + static char tx_tag; + static char payload_tags[UDPARD_IFACE_COUNT_MAX]; + udpard_tx_mem_resources_t mem = { .transfer = make_mem(&tx_tag) }; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + mem.payload[i] = make_mem(&payload_tags[i]); + } + const udpard_tx_vtable_t vt_ok = { .eject_subject = eject_subject_stub, .eject_p2p = eject_p2p_stub }; + + // Reject bad initialization inputs. + udpard_tx_t tx = { 0 }; + TEST_ASSERT_FALSE(udpard_tx_new(NULL, 1U, 0U, 1U, mem, &vt_ok)); + TEST_ASSERT_FALSE(udpard_tx_new(&tx, 0U, 0U, 1U, mem, &vt_ok)); + TEST_ASSERT_FALSE(udpard_tx_new(&tx, 1U, 0U, 1U, mem, NULL)); + udpard_tx_mem_resources_t mem_bad = mem; + mem_bad.payload[0].vtable = NULL; + TEST_ASSERT_FALSE(udpard_tx_new(&tx, 1U, 0U, 1U, mem_bad, &vt_ok)); + const udpard_tx_vtable_t vt_bad_subject = { .eject_subject = NULL, .eject_p2p = eject_p2p_stub }; + TEST_ASSERT_FALSE(udpard_tx_new(&tx, 1U, 0U, 1U, mem, &vt_bad_subject)); + const udpard_tx_vtable_t vt_bad_p2p = { .eject_subject = eject_subject_stub, .eject_p2p = NULL }; + TEST_ASSERT_FALSE(udpard_tx_new(&tx, 1U, 0U, 1U, mem, &vt_bad_p2p)); + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 1U, 0U, 2U, mem, &vt_ok)); + + // Push helpers reject invalid timing and null handles. + const uint16_t iface_bitmap_1 = (1U << 0U); + const udpard_bytes_scattered_t empty_payload = { .bytes = { .size = 0U, .data = NULL }, .next = NULL }; + TEST_ASSERT_FALSE(udpard_tx_push( + &tx, 10, 5, iface_bitmap_1, udpard_prio_fast, 1U, 1U, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL)); + TEST_ASSERT_FALSE(udpard_tx_push( + NULL, 0, 0, iface_bitmap_1, udpard_prio_fast, 1U, 1U, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL)); + TEST_ASSERT_FALSE(udpard_tx_push_p2p( + NULL, 0, 0, udpard_prio_fast, (udpard_remote_t){ 0 }, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL, NULL)); + // Reject invalid payload pointer and empty interface bitmap. + const udpard_bytes_scattered_t bad_payload = { .bytes = { .size = 1U, .data = NULL }, .next = NULL }; + TEST_ASSERT_FALSE( + udpard_tx_push(&tx, 0, 1, iface_bitmap_1, udpard_prio_fast, 1U, 1U, bad_payload, NULL, UDPARD_USER_CONTEXT_NULL)); + TEST_ASSERT_FALSE( + udpard_tx_push(&tx, 0, 1, 0U, udpard_prio_fast, 1U, 1U, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL)); + const udpard_remote_t remote_bad = { .uid = 1, .endpoints = { { 0 } } }; + TEST_ASSERT_FALSE( + udpard_tx_push_p2p(&tx, 0, 1, udpard_prio_fast, remote_bad, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL, NULL)); + + // Poll and refcount no-ops on null data. + udpard_tx_poll(NULL, 0, 0); + udpard_tx_poll(&tx, (udpard_us_t)-1, 0); + // Pending ifaces are zero for NULL. + TEST_ASSERT_EQUAL_UINT16(0U, udpard_tx_pending_ifaces(NULL)); + udpard_tx_refcount_inc((udpard_bytes_t){ .size = 0U, .data = NULL }); + udpard_tx_refcount_dec((udpard_bytes_t){ .size = 0U, .data = NULL }); + udpard_tx_free(NULL); + udpard_tx_free(&tx); +} + +static void test_tx_predictor_sharing(void) +{ + // Shared spool suppresses duplicate frame counts. + static char shared_tag[2]; + const udpard_mem_t mem_shared = make_mem(&shared_tag[0]); + const udpard_mem_t mem_arr[UDPARD_IFACE_COUNT_MAX] = { mem_shared, mem_shared, make_mem(&shared_tag[1]) }; + const size_t mtu[UDPARD_IFACE_COUNT_MAX] = { 64U, 64U, 128U }; + const uint16_t iface_bitmap_12 = (1U << 0U) | (1U << 1U); + TEST_ASSERT_EQUAL_size_t(1U, tx_predict_frame_count(mtu, mem_arr, iface_bitmap_12, 16U)); + // Non-shared spool counts each interface. + const udpard_mem_t mem_arr_split[UDPARD_IFACE_COUNT_MAX] = { make_mem(&shared_tag[0]), + make_mem(&shared_tag[1]), + make_mem(&shared_tag[1]) }; + TEST_ASSERT_EQUAL_size_t(2U, tx_predict_frame_count(mtu, mem_arr_split, iface_bitmap_12, 16U)); +} + +static void test_rx_guards(void) +{ + // RX port creation guards reject invalid parameters. + static char rx_tag_a; + static char rx_tag_b; + const udpard_rx_mem_resources_t rx_mem = { .session = make_mem(&rx_tag_a), .fragment = make_mem(&rx_tag_b) }; + const udpard_rx_port_vtable_t rx_vtb = { .on_message = on_message_stub, .on_collision = on_collision_stub }; + udpard_rx_port_t port; + TEST_ASSERT_FALSE(udpard_rx_port_new(NULL, 0, 0, udpard_rx_ordered, 0, rx_mem, &rx_vtb)); + TEST_ASSERT_FALSE(udpard_rx_port_new(&port, 0, 0, udpard_rx_ordered, 0, rx_mem, NULL)); + const udpard_rx_port_vtable_t rx_vtb_no_msg = { .on_message = NULL, .on_collision = on_collision_stub }; + TEST_ASSERT_FALSE(udpard_rx_port_new(&port, 0, 0, udpard_rx_ordered, 0, rx_mem, &rx_vtb_no_msg)); + udpard_rx_mem_resources_t bad_rx_mem = rx_mem; + bad_rx_mem.session.vtable = NULL; + TEST_ASSERT_FALSE(udpard_rx_port_new(&port, 0, 0, udpard_rx_unordered, 0, bad_rx_mem, &rx_vtb)); + // rx_validate_mem_resources rejects missing hooks. + const udpard_mem_vtable_t vtable_no_free = { .base = { .free = NULL }, .alloc = alloc_stub }; + const udpard_mem_vtable_t vtable_no_alloc = { .base = { .free = free_noop }, .alloc = NULL }; + udpard_rx_mem_resources_t bad_session = rx_mem; + bad_session.session.vtable = &vtable_no_free; + TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_session)); + bad_session.session.vtable = &vtable_no_alloc; + TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_session)); + udpard_rx_mem_resources_t bad_fragment = rx_mem; + bad_fragment.fragment.vtable = &vtable_no_free; + TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_fragment)); + bad_fragment.fragment.vtable = &vtable_no_alloc; + TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_fragment)); + // NOLINTNEXTLINE(clang-analyzer-optin.core.EnumCastOutOfRange) + TEST_ASSERT_FALSE(udpard_rx_port_new(&port, 0, 0, (udpard_rx_mode_t)99, 0, rx_mem, &rx_vtb)); + TEST_ASSERT_FALSE(udpard_rx_port_new(&port, 0, 0, udpard_rx_ordered, (udpard_us_t)-1, rx_mem, &rx_vtb)); + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 0xAA, 8U, udpard_rx_stateless, 0, rx_mem, &rx_vtb)); + + // Invalid datagram inputs are rejected without processing. + udpard_rx_t rx; + udpard_rx_new(&rx, NULL); + TEST_ASSERT_FALSE(udpard_rx_port_push(&rx, + &port, + 0, + (udpard_udpip_ep_t){ 0U, 0U }, + (udpard_bytes_mut_t){ .size = 0U, .data = NULL }, + (udpard_deleter_t){ .vtable = NULL, .context = NULL }, + UDPARD_IFACE_COUNT_MAX)); + const udpard_bytes_mut_t small_payload = { .size = 1U, .data = (void*)1 }; + TEST_ASSERT_FALSE( + udpard_rx_port_push(&rx, + &port, + 0, + (udpard_udpip_ep_t){ .ip = 1U, .port = 1U }, + small_payload, + (udpard_deleter_t){ .vtable = &(udpard_deleter_vtable_t){ .free = NULL }, .context = NULL }, + 0)); + + // Port freeing should tolerate null rx. + udpard_rx_port_free(NULL, &port); + udpard_rx_port_free(&rx, NULL); + + // Fragments past extent are discarded early. + udpard_tree_t* root = NULL; + byte_t buf[1] = { 0 }; + size_t covered = 0; + const rx_frame_base_t frame = { .offset = 1U, + .payload = { .size = sizeof(buf), .data = buf }, + .origin = { .size = sizeof(buf), .data = buf } }; + static char frag_tag; + const udpard_mem_t frag_mem = make_mem(&frag_tag); + const udpard_deleter_t deleter = { .vtable = &deleter_vtable, .context = NULL }; + TEST_ASSERT_EQUAL(rx_fragment_tree_rejected, + rx_fragment_tree_update(&root, frag_mem, deleter, frame, 0U, 0U, &covered)); +} + +void setUp(void) {} + +void tearDown(void) {} + +int main(void) +{ + UNITY_BEGIN(); + RUN_TEST(test_mem_endpoint_list_guards); + RUN_TEST(test_fragment_guards); + RUN_TEST(test_header_guard); + RUN_TEST(test_tx_guards); + RUN_TEST(test_tx_predictor_sharing); + RUN_TEST(test_rx_guards); + return UNITY_END(); +} diff --git a/tests/src/test_intrusive_header.c b/tests/src/test_intrusive_header.c new file mode 100644 index 0000000..0aa1eb7 --- /dev/null +++ b/tests/src/test_intrusive_header.c @@ -0,0 +1,198 @@ +/// This software is distributed under the terms of the MIT License. +/// Copyright (C) OpenCyphal Development Team +/// Copyright Amazon.com Inc. or its affiliates. +/// SPDX-License-Identifier: MIT + +#include // NOLINT(bugprone-suspicious-include) +#include + +static void test_header_v2(void) +{ + byte_t buffer[64]; + meta_t meta_in = { + .priority = udpard_prio_high, + .flag_reliable = false, + .transfer_payload_size = 0xDEADBEEF, + .transfer_id = 0xAABBCCDDEEFF0011ULL, + .sender_uid = 0x1122334455667788ULL, + .topic_hash = 0x99AABBCCDDEEFF00ULL, + }; + // For a first frame (frame_payload_offset=0), frame_index must also be 0 + // Compute the correct prefix_crc from the payload + memset(&buffer[HEADER_SIZE_BYTES], 0, sizeof(buffer) - HEADER_SIZE_BYTES); // Initialize payload + const uint32_t payload_crc = crc_full(sizeof(buffer) - HEADER_SIZE_BYTES, &buffer[HEADER_SIZE_BYTES]); + header_serialize(buffer, meta_in, 0, 0, payload_crc); // frame_index=0, frame_payload_offset=0 for first frame + memset(&buffer[HEADER_SIZE_BYTES], 0, sizeof(buffer) - HEADER_SIZE_BYTES); // Re-initialize payload to match + + // We don't validate the exact byte layout anymore since we compute prefix_crc dynamically + // Just verify deserialization works correctly + + meta_t meta_out; + udpard_bytes_t payload_out; + uint32_t frame_index = 0; + uint32_t frame_payload_offset = 0; + uint32_t prefix_crc = 0; + TEST_ASSERT(header_deserialize((udpard_bytes_mut_t){ .size = sizeof(buffer), .data = buffer }, + &meta_out, + &frame_index, + &frame_payload_offset, + &prefix_crc, + &payload_out)); + TEST_ASSERT_EQUAL(sizeof(buffer) - HEADER_SIZE_BYTES, payload_out.size); + TEST_ASSERT_EQUAL(&buffer[HEADER_SIZE_BYTES], payload_out.data); + + TEST_ASSERT_EQUAL_UINT8(meta_in.priority, meta_out.priority); + TEST_ASSERT_FALSE(meta_out.flag_reliable); + TEST_ASSERT_EQUAL_UINT32(0, frame_index); // First frame has index 0 + TEST_ASSERT_EQUAL_UINT32(0, frame_payload_offset); // First frame has offset 0 + TEST_ASSERT_EQUAL_UINT32(payload_crc, prefix_crc); // For first frame, prefix_crc equals payload CRC + TEST_ASSERT_EQUAL_UINT32(meta_in.transfer_payload_size, meta_out.transfer_payload_size); + TEST_ASSERT_EQUAL_UINT64(meta_in.transfer_id, meta_out.transfer_id); + TEST_ASSERT_EQUAL_UINT64(meta_in.sender_uid, meta_out.sender_uid); + TEST_ASSERT_EQUAL_UINT64(meta_in.topic_hash, meta_out.topic_hash); + + TEST_ASSERT_FALSE(header_deserialize((udpard_bytes_mut_t){ .size = 23, .data = buffer }, + &meta_out, + &frame_index, + &frame_payload_offset, + &prefix_crc, + &payload_out)); + + TEST_ASSERT(header_deserialize((udpard_bytes_mut_t){ .size = sizeof(buffer), .data = buffer }, + &meta_out, + &frame_index, + &frame_payload_offset, + &prefix_crc, + &payload_out)); + buffer[HEADER_SIZE_BYTES - 1] ^= 0xFFU; // Corrupt the CRC. + TEST_ASSERT_FALSE(header_deserialize((udpard_bytes_mut_t){ .size = sizeof(buffer), .data = buffer }, + &meta_out, + &frame_index, + &frame_payload_offset, + &prefix_crc, + &payload_out)); +} + +static void test_header_deserialize_edge_cases(void) +{ + byte_t buffer[64]; + meta_t meta_in = { + .priority = udpard_prio_nominal, + .flag_reliable = true, + .transfer_payload_size = 1000, + .transfer_id = 0x1234567890ABCDEFULL, + .sender_uid = 0xFEDCBA9876543210ULL, + .topic_hash = 0xAAAAAAAAAAAAAAAAULL, + }; + + meta_t meta_out; + udpard_bytes_t payload_out; + uint32_t frame_index = 0; + uint32_t frame_payload_offset = 0; + uint32_t prefix_crc = 0; + + // Test invalid version (version != 2) + memset(&buffer[HEADER_SIZE_BYTES], 0, sizeof(buffer) - HEADER_SIZE_BYTES); // Initialize payload + const uint32_t payload_crc_v1 = crc_full(sizeof(buffer) - HEADER_SIZE_BYTES, &buffer[HEADER_SIZE_BYTES]); + header_serialize(buffer, meta_in, 0, 0, payload_crc_v1); + buffer[0] = (buffer[0] & 0xE0U) | 3U; // Set version to 3 instead of 2 + // Recalculate CRC for the corrupted header + const uint32_t new_crc = crc_full(HEADER_SIZE_BYTES - CRC_SIZE_BYTES, buffer); + buffer[HEADER_SIZE_BYTES - 4] = (byte_t)(new_crc & 0xFFU); + buffer[HEADER_SIZE_BYTES - 3] = (byte_t)((new_crc >> 8U) & 0xFFU); + buffer[HEADER_SIZE_BYTES - 2] = (byte_t)((new_crc >> 16U) & 0xFFU); + buffer[HEADER_SIZE_BYTES - 1] = (byte_t)((new_crc >> 24U) & 0xFFU); + TEST_ASSERT_FALSE(header_deserialize((udpard_bytes_mut_t){ .size = sizeof(buffer), .data = buffer }, + &meta_out, + &frame_index, + &frame_payload_offset, + &prefix_crc, + &payload_out)); + + // Test frame_payload_offset validation: offset + payload > transfer_payload_size + // For non-first frames, prefix_crc can be any value (not validated) + header_serialize(buffer, meta_in, 5, 900, 0x12345678); // frame_index=5, offset=900 + // Payload size in buffer after header is 64-48=16 bytes + // So offset(900) + payload(16) = 916 > transfer_payload_size(1000) is OK + // But offset(995) + payload(16) = 1011 > transfer_payload_size(1000) should fail + buffer[8] = 0xE3; // Change offset to 995 (0x03E3) little-endian + buffer[9] = 0x03; + buffer[10] = 0x00; + buffer[11] = 0x00; + const uint32_t new_crc2 = crc_full(HEADER_SIZE_BYTES - CRC_SIZE_BYTES, buffer); + buffer[HEADER_SIZE_BYTES - 4] = (byte_t)(new_crc2 & 0xFFU); + buffer[HEADER_SIZE_BYTES - 3] = (byte_t)((new_crc2 >> 8U) & 0xFFU); + buffer[HEADER_SIZE_BYTES - 2] = (byte_t)((new_crc2 >> 16U) & 0xFFU); + buffer[HEADER_SIZE_BYTES - 1] = (byte_t)((new_crc2 >> 24U) & 0xFFU); + TEST_ASSERT_FALSE(header_deserialize((udpard_bytes_mut_t){ .size = sizeof(buffer), .data = buffer }, + &meta_out, + &frame_index, + &frame_payload_offset, + &prefix_crc, + &payload_out)); + + // Test frame_index != 0 but frame_payload_offset == 0 (invalid) + const uint32_t payload_crc_v3 = crc_full(sizeof(buffer) - HEADER_SIZE_BYTES, &buffer[HEADER_SIZE_BYTES]); + header_serialize(buffer, meta_in, 1, 0, payload_crc_v3); // frame_index=1, offset=0 is invalid + TEST_ASSERT_FALSE(header_deserialize((udpard_bytes_mut_t){ .size = sizeof(buffer), .data = buffer }, + &meta_out, + &frame_index, + &frame_payload_offset, + &prefix_crc, + &payload_out)); + + // Test invalid prefix_crc on first frame (offset=0, prefix_crc must match payload CRC) + header_serialize(buffer, meta_in, 0, 0, 0xDEADBEEF); // Wrong CRC for first frame + TEST_ASSERT_FALSE(header_deserialize((udpard_bytes_mut_t){ .size = sizeof(buffer), .data = buffer }, + &meta_out, + &frame_index, + &frame_payload_offset, + &prefix_crc, + &payload_out)); + + // Test valid case with reliable flag (first frame, so prefix_crc must match payload) + const uint32_t payload_crc_v4 = crc_full(sizeof(buffer) - HEADER_SIZE_BYTES, &buffer[HEADER_SIZE_BYTES]); + header_serialize(buffer, meta_in, 0, 0, payload_crc_v4); + TEST_ASSERT(header_deserialize((udpard_bytes_mut_t){ .size = sizeof(buffer), .data = buffer }, + &meta_out, + &frame_index, + &frame_payload_offset, + &prefix_crc, + &payload_out)); + TEST_ASSERT_TRUE(meta_out.flag_reliable); + TEST_ASSERT_EQUAL_UINT32(payload_crc_v4, prefix_crc); + + // Reject ACK frames with nonzero offset. + meta_in.flag_reliable = false; + meta_in.flag_acknowledgement = true; + header_serialize(buffer, meta_in, 1, 1, 0U); + TEST_ASSERT_FALSE(header_deserialize((udpard_bytes_mut_t){ .size = sizeof(buffer), .data = buffer }, + &meta_out, + &frame_index, + &frame_payload_offset, + &prefix_crc, + &payload_out)); + + // Reject ACK + reliable flag combination. + meta_in.flag_reliable = true; + meta_in.flag_acknowledgement = true; + header_serialize(buffer, meta_in, 0, 0, payload_crc_v4); + TEST_ASSERT_FALSE(header_deserialize((udpard_bytes_mut_t){ .size = sizeof(buffer), .data = buffer }, + &meta_out, + &frame_index, + &frame_payload_offset, + &prefix_crc, + &payload_out)); +} + +void setUp(void) {} + +void tearDown(void) {} + +int main(void) +{ + UNITY_BEGIN(); + RUN_TEST(test_header_v2); + RUN_TEST(test_header_deserialize_edge_cases); + return UNITY_END(); +} diff --git a/tests/src/test_intrusive_misc.c b/tests/src/test_intrusive_misc.c new file mode 100644 index 0000000..e67c1f0 --- /dev/null +++ b/tests/src/test_intrusive_misc.c @@ -0,0 +1,164 @@ +/// This software is distributed under the terms of the MIT License. +/// Copyright (C) OpenCyphal Development Team +/// Copyright Amazon.com Inc. or its affiliates. +/// SPDX-License-Identifier: MIT + +#include // NOLINT(bugprone-suspicious-include) +#include + +static void test_crc_streamed(void) +{ + uint32_t crc = crc_add(CRC_INITIAL, 3, "123"); + crc = crc_add(crc, 6, "456789"); + TEST_ASSERT_EQUAL_UINT32(0x1CF96D7CUL, crc); + TEST_ASSERT_EQUAL_UINT32(0xE3069283UL, crc ^ CRC_OUTPUT_XOR); + crc = crc_add(crc, 4, "\x83\x92\x06\xE3"); // Least significant byte first. + TEST_ASSERT_EQUAL_UINT32(CRC_RESIDUE_BEFORE_OUTPUT_XOR, crc); + TEST_ASSERT_EQUAL_UINT32(CRC_RESIDUE_AFTER_OUTPUT_XOR, crc ^ CRC_OUTPUT_XOR); +} + +static void test_list(void) +{ + typedef struct test_node_t + { + int value; + udpard_listed_t link; + } test_node_t; + + udpard_list_t list = { .head = NULL, .tail = NULL }; + + // Test 1: Empty list state + TEST_ASSERT_NULL(list.head); + TEST_ASSERT_NULL(list.tail); + + // Test 2: Enlist single item + test_node_t node1 = { .value = 1, .link = { .next = NULL, .prev = NULL } }; + enlist_head(&list, &node1.link); + TEST_ASSERT_EQUAL(&node1.link, list.head); + TEST_ASSERT_EQUAL(&node1.link, list.tail); + TEST_ASSERT_NULL(node1.link.next); + TEST_ASSERT_NULL(node1.link.prev); + + // Test 3: Enlist second item (should become head) + test_node_t node2 = { .value = 2, .link = { .next = NULL, .prev = NULL } }; + enlist_head(&list, &node2.link); + TEST_ASSERT_EQUAL(&node2.link, list.head); + TEST_ASSERT_EQUAL(&node1.link, list.tail); + TEST_ASSERT_EQUAL(&node1.link, node2.link.next); + TEST_ASSERT_NULL(node2.link.prev); + TEST_ASSERT_NULL(node1.link.next); + TEST_ASSERT_EQUAL(&node2.link, node1.link.prev); + + // Test 4: Enlist third item (should become new head) + test_node_t node3 = { .value = 3, .link = { .next = NULL, .prev = NULL } }; + enlist_head(&list, &node3.link); + TEST_ASSERT_EQUAL(&node3.link, list.head); + TEST_ASSERT_EQUAL(&node1.link, list.tail); + TEST_ASSERT_EQUAL(&node2.link, node3.link.next); + TEST_ASSERT_NULL(node3.link.prev); + TEST_ASSERT_EQUAL(&node1.link, node2.link.next); + TEST_ASSERT_EQUAL(&node3.link, node2.link.prev); + + // Test 5: Delist middle item + delist(&list, &node2.link); + TEST_ASSERT_EQUAL(&node3.link, list.head); + TEST_ASSERT_EQUAL(&node1.link, list.tail); + TEST_ASSERT_EQUAL(&node1.link, node3.link.next); + TEST_ASSERT_NULL(node3.link.prev); + TEST_ASSERT_NULL(node1.link.next); + TEST_ASSERT_EQUAL(&node3.link, node1.link.prev); + TEST_ASSERT_NULL(node2.link.next); + TEST_ASSERT_NULL(node2.link.prev); + + // Test 6: Re-enlist previously delisted item (should become head) + enlist_head(&list, &node2.link); + TEST_ASSERT_EQUAL(&node2.link, list.head); + TEST_ASSERT_EQUAL(&node1.link, list.tail); + TEST_ASSERT_EQUAL(&node3.link, node2.link.next); + TEST_ASSERT_NULL(node2.link.prev); + + // Test 7: Move existing item to head (enlist_head can be used for moving) + enlist_head(&list, &node1.link); + TEST_ASSERT_EQUAL(&node1.link, list.head); + TEST_ASSERT_EQUAL(&node3.link, list.tail); + TEST_ASSERT_EQUAL(&node2.link, node1.link.next); + TEST_ASSERT_NULL(node1.link.prev); + TEST_ASSERT_EQUAL(&node3.link, node2.link.next); + TEST_ASSERT_EQUAL(&node1.link, node2.link.prev); + TEST_ASSERT_NULL(node3.link.next); + TEST_ASSERT_EQUAL(&node2.link, node3.link.prev); + + // Test 8: Delist head + delist(&list, &node1.link); + TEST_ASSERT_EQUAL(&node2.link, list.head); + TEST_ASSERT_EQUAL(&node3.link, list.tail); + TEST_ASSERT_NULL(node1.link.next); + TEST_ASSERT_NULL(node1.link.prev); + + // Test 9: Delist tail + delist(&list, &node3.link); + TEST_ASSERT_EQUAL(&node2.link, list.head); + TEST_ASSERT_EQUAL(&node2.link, list.tail); + TEST_ASSERT_NULL(node2.link.next); + TEST_ASSERT_NULL(node2.link.prev); + TEST_ASSERT_NULL(node3.link.next); + TEST_ASSERT_NULL(node3.link.prev); + + // Test 10: Delist last item + delist(&list, &node2.link); + TEST_ASSERT_NULL(list.head); + TEST_ASSERT_NULL(list.tail); + TEST_ASSERT_NULL(node2.link.next); + TEST_ASSERT_NULL(node2.link.prev); + + // Test 11: Delist from empty list (should be safe) + delist(&list, &node1.link); + TEST_ASSERT_NULL(list.head); + TEST_ASSERT_NULL(list.tail); + + // Test 12: LIST_MEMBER macro + enlist_head(&list, &node1.link); + enlist_head(&list, &node2.link); + enlist_head(&list, &node3.link); + test_node_t* owner = LIST_MEMBER(list.head, test_node_t, link); + TEST_ASSERT_EQUAL(&node3, owner); + TEST_ASSERT_EQUAL(3, owner->value); + + // Test 13: LIST_TAIL macro + test_node_t* tail_owner = LIST_TAIL(list, test_node_t, link); + TEST_ASSERT_EQUAL(&node1, tail_owner); + TEST_ASSERT_EQUAL(1, tail_owner->value); + + // Test 14: LIST_MEMBER with NULL + test_node_t* null_owner = LIST_MEMBER(NULL, test_node_t, link); + TEST_ASSERT_NULL(null_owner); + + // Test 15: Traverse list from head to tail + test_node_t* current = LIST_MEMBER(list.head, test_node_t, link); + TEST_ASSERT_EQUAL(3, current->value); + current = LIST_MEMBER(current->link.next, test_node_t, link); + TEST_ASSERT_EQUAL(2, current->value); + current = LIST_MEMBER(current->link.next, test_node_t, link); + TEST_ASSERT_EQUAL(1, current->value); + current = LIST_MEMBER(current->link.next, test_node_t, link); + TEST_ASSERT_NULL(current); + + // Clean up + delist(&list, &node1.link); + delist(&list, &node2.link); + delist(&list, &node3.link); + TEST_ASSERT_NULL(list.head); + TEST_ASSERT_NULL(list.tail); +} + +void setUp(void) {} + +void tearDown(void) {} + +int main(void) +{ + UNITY_BEGIN(); + RUN_TEST(test_crc_streamed); + RUN_TEST(test_list); + return UNITY_END(); +} diff --git a/tests/src/test_intrusive_rx.c b/tests/src/test_intrusive_rx.c index cb4f66f..5f9bbe3 100644 --- a/tests/src/test_intrusive_rx.c +++ b/tests/src/test_intrusive_rx.c @@ -3,2388 +3,3021 @@ /// Copyright Amazon.com Inc. or its affiliates. /// SPDX-License-Identifier: MIT -#include // NOLINT(bugprone-suspicious-include) +// ReSharper disable CppDFATimeOver + +#include // NOLINT(bugprone-suspicious-include) #include "helpers.h" #include -// NOLINTBEGIN(clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling) - -/// Moves the payload from the origin into a new buffer and attaches is to the newly allocated fragment. -/// This function performs two allocations. This function is infallible. -static RxFragment* makeRxFragment(const RxMemory memory, - const uint32_t frame_index, - const struct UdpardPayload view, - const struct UdpardMutablePayload origin, - RxFragmentTreeNode* const parent) +static size_t tree_count(udpard_tree_t* const root) // how many make a forest? { - TEST_PANIC_UNLESS((view.data >= origin.data) && (view.size <= origin.size)); - TEST_PANIC_UNLESS((((const byte_t*) view.data) + view.size) <= (((const byte_t*) origin.data) + origin.size)); - byte_t* const new_origin = (byte_t*) instrumentedAllocatorAllocate(memory.payload.user_reference, origin.size); - RxFragment* const frag = (RxFragment*) memAlloc(memory.fragment, sizeof(RxFragment)); - if ((new_origin != NULL) && (frag != NULL)) - { - (void) memmove(new_origin, origin.data, origin.size); - (void) memset(frag, 0, sizeof(RxFragment)); - frag->tree.base.lr[0] = NULL; - frag->tree.base.lr[1] = NULL; - frag->tree.base.up = &parent->base; - frag->tree.this = frag; - frag->frame_index = frame_index; - frag->base.origin.data = new_origin; - frag->base.origin.size = origin.size; - frag->base.view.data = new_origin + (((const byte_t*) view.data) - ((byte_t*) origin.data)); - frag->base.view.size = view.size; + size_t count = 0; + for (udpard_tree_t* p = cavl2_min(root); p != NULL; p = cavl2_next_greater(p)) { + count++; } - else - { - TEST_PANIC("Failed to allocate RxFragment"); - } - return frag; + return count; } -/// This is a simple helper wrapper that constructs a new fragment using a null-terminated string as a payload. -static RxFragment* makeRxFragmentString(const RxMemory memory, - const uint32_t frame_index, - const char* const payload, - RxFragmentTreeNode* const parent) +/// Allocates the payload on the heap, emulating normal frame reception. +static rx_frame_base_t make_frame_base(const udpard_mem_t mem_payload, + const size_t offset, + const size_t size, + const void* const payload) { - const size_t sz = strlen(payload); - return makeRxFragment(memory, - frame_index, - (struct UdpardPayload) {.data = payload, .size = sz}, - (struct UdpardMutablePayload) {.data = (void*) payload, .size = sz}, - parent); + void* data = mem_res_alloc(mem_payload, size); + if (size > 0) { + memcpy(data, payload, size); + } + return (rx_frame_base_t){ .offset = offset, + .payload = { .data = data, .size = size }, + .origin = { .data = data, .size = size } }; +} +/// The payload string cannot contain NUL characters. +static rx_frame_base_t make_frame_base_str(const udpard_mem_t mem_payload, + const size_t offset, + const char* const payload) +{ + return make_frame_base(mem_payload, offset, (payload != NULL) ? (strlen(payload) + 1) : 0U, payload); } -static bool compareMemory(const size_t expected_size, - const void* const expected, - const size_t actual_size, - const void* const actual) +/// The created frame will copy the given full transfer payload at the specified offset, of the specified size. +/// The full transfer payload can be invalidated after this call. It is needed here so that we could compute the +/// CRC prefix correctly, which covers the transfer payload bytes in [0,(offset+size)]. +static rx_frame_t make_frame(const meta_t meta, + const udpard_mem_t mem_payload, + const void* const full_transfer_payload, + const size_t frame_payload_offset, + const size_t frame_payload_size) { - return (expected_size == actual_size) && (memcmp(expected, actual, expected_size) == 0); + rx_frame_base_t base = make_frame_base(mem_payload, + frame_payload_offset, + frame_payload_size, + (const uint8_t*)full_transfer_payload + frame_payload_offset); + base.crc = crc_full(frame_payload_offset + frame_payload_size, (const uint8_t*)full_transfer_payload); + return (rx_frame_t){ .base = base, .meta = meta }; } -static bool compareStringWithPayload(const char* const expected, const struct UdpardPayload payload) +/// A helper that creates a frame in static storage and returns a reference to it. This is a testing aid. +static rx_frame_t* make_frame_ptr(const meta_t meta, + const udpard_mem_t mem_payload, + const void* const full_transfer_payload, + const size_t frame_payload_offset, + const size_t frame_payload_size) { - return compareMemory(strlen(expected), expected, payload.size, payload.data); + static rx_frame_t frame; + frame = make_frame(meta, mem_payload, full_transfer_payload, frame_payload_offset, frame_payload_size); + return &frame; } -static RxFrameBase makeRxFrameBase(InstrumentedAllocator* const memory_payload, - const uint32_t frame_index, - const bool end_of_transfer, - const struct UdpardPayload view, - const struct UdpardMutablePayload origin) +/// Scans the transfer payload ensuring that its payload exactly matches the reference. +/// The node can be any node in the tree. +static bool transfer_payload_verify(udpard_rx_transfer_t* const transfer, + const size_t payload_size_stored, + const void* const payload, + const size_t payload_size_wire) { - TEST_PANIC_UNLESS((view.data >= origin.data) && (view.size <= origin.size)); - TEST_PANIC_UNLESS((((const byte_t*) view.data) + view.size) <= (((const byte_t*) origin.data) + origin.size)); - RxFrameBase out = {0}; - byte_t* const new_origin = (byte_t*) instrumentedAllocatorAllocate(memory_payload, origin.size); - if (new_origin != NULL) - { - (void) memmove(new_origin, origin.data, origin.size); - out.index = frame_index; - out.end_of_transfer = end_of_transfer; - out.origin.data = new_origin; - out.origin.size = origin.size; - out.payload.data = new_origin + (((const byte_t*) view.data) - ((byte_t*) origin.data)); - out.payload.size = view.size; - } - else - { - TEST_PANIC("Failed to allocate payload buffer for RxFrameBase"); + const udpard_fragment_t* frag = udpard_fragment_seek(transfer->payload, 0); + size_t offset = 0; + while (frag != NULL) { + if (frag->offset != offset) { + return false; + } + if ((offset + frag->view.size) > payload_size_stored) { + return false; + } + if (memcmp(frag->view.data, (const uint8_t*)payload + offset, frag->view.size) != 0) { + return false; + } + offset += frag->view.size; + frag = udpard_fragment_next(frag); } - return out; + return (transfer->payload_size_wire == payload_size_wire) && (offset == payload_size_stored); } -static RxFrameBase makeRxFrameBaseString(InstrumentedAllocator* const memory, - const uint32_t frame_index, - const bool end_of_transfer, - const char* const payload) +// --------------------------------------------- RX FRAGMENT TREE --------------------------------------------- + +static udpard_fragment_t* fragment_at(udpard_tree_t* const root, uint32_t index) { - return makeRxFrameBase(memory, - frame_index, - end_of_transfer, - (struct UdpardPayload) {.data = payload, .size = strlen(payload)}, - (struct UdpardMutablePayload) {.data = (void*) payload, .size = strlen(payload)}); + for (udpard_fragment_t* it = (udpard_fragment_t*)cavl2_min(root); it != NULL; + it = (udpard_fragment_t*)cavl2_next_greater(&it->index_offset)) { + if (index-- == 0U) { + return it; + } + } + return NULL; } -static RxFrame makeRxFrameString(InstrumentedAllocator* const memory, - const TransferMetadata meta, - const uint32_t frame_index, - const bool end_of_transfer, - const char* const payload) +static bool fragment_equals(udpard_fragment_t* const frag, + const size_t offset, + const size_t size, + const void* const payload) { - return (RxFrame) {.base = makeRxFrameBaseString(memory, frame_index, end_of_transfer, payload), .meta = meta}; + if ((frag == NULL) || (frag->offset != offset) || (frag->view.size != size)) { + return false; + } + return (size == 0U) || (memcmp(frag->view.data, payload, size) == 0); } -static RxMemory makeRxMemory(InstrumentedAllocator* const fragment, InstrumentedAllocator* const payload) +/// Scans the fragment tree ensuring that its payload exactly matches the reference. +/// The node can be any node in the tree. +static bool fragment_tree_verify(udpard_tree_t* const root, + const size_t payload_size, + const void* const payload, + const uint32_t crc) { - return (RxMemory) {.fragment = instrumentedAllocatorMakeMemoryResource(fragment), - .payload = instrumentedAllocatorMakeMemoryDeleter(payload)}; + // Remove redundancies from the payload tree and check the CRC. + if (!rx_fragment_tree_finalize(root, crc)) { + return false; + } + // Scan the payload tree. + size_t offset = 0; + for (udpard_fragment_t* it = (udpard_fragment_t*)cavl2_min(root); it != NULL; + it = (udpard_fragment_t*)cavl2_next_greater(&it->index_offset)) { + if (it->offset != offset) { + return false; + } + if ((offset + it->view.size) > payload_size) { + return false; + } + if ((it->view.size > 0) && (memcmp(it->view.data, (const uint8_t*)payload + offset, it->view.size) != 0)) { + return false; + } + offset += it->view.size; + } + return offset == payload_size; } -static struct UdpardMutablePayload makeDatagramPayload(InstrumentedAllocator* const memory, - const TransferMetadata meta, - const uint32_t frame_index, - const bool end_of_transfer, - const struct UdpardPayload payload) +/// Reference CRC calculation: +/// >>> from pycyphal.transport.commons.crc import CRC32C +/// >>> hex(CRC32C.new(b"abc\0").value) + "UL" +static void test_rx_fragment_tree_update_a(void) { - struct UdpardMutablePayload pld = {.size = payload.size + HEADER_SIZE_BYTES}; - pld.data = instrumentedAllocatorAllocate(memory, pld.size); - if (pld.data != NULL) + instrumented_allocator_t alloc_frag = { 0 }; + instrumented_allocator_new(&alloc_frag); + const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + + instrumented_allocator_t alloc_payload = { 0 }; + instrumented_allocator_new(&alloc_payload); + const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); + const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); + + // Empty payload test { - (void) memcpy(txSerializeHeader(pld.data, meta, frame_index, end_of_transfer), payload.data, payload.size); + udpard_tree_t* root = NULL; + size_t cov = 0; + rx_fragment_tree_update_result_t res = rx_fragment_tree_rejected; + // + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base_str(mem_payload, 0, NULL), + 0, + 0, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_done, res); + TEST_ASSERT_EQUAL_size_t(0, cov); + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT_EQUAL(1, tree_count(root)); + // Check the retained payload. + TEST_ASSERT_EQUAL_size_t(0, fragment_at(root, 0)->offset); + TEST_ASSERT_EQUAL_size_t(0, fragment_at(root, 0)->view.size); + TEST_ASSERT_NULL(fragment_at(root, 1)); + // Check the heap. + TEST_ASSERT_EQUAL_size_t(1, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); // bc payload empty + TEST_ASSERT_EQUAL_size_t(1, alloc_frag.count_alloc); + TEST_ASSERT_EQUAL_size_t(1, alloc_payload.count_alloc); + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.count_free); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.count_free); + // Verify the payload and free the tree. + TEST_ASSERT(fragment_tree_verify(root, 0, "", 0)); + udpard_fragment_free_all((udpard_fragment_t*)root, udpard_make_deleter(mem_frag)); + // Check the heap. + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(1, alloc_frag.count_alloc); + TEST_ASSERT_EQUAL_size_t(1, alloc_payload.count_alloc); + TEST_ASSERT_EQUAL_size_t(1, alloc_frag.count_free); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.count_free); // bc payload empty } - else + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_payload); + + // Non-empty payload test with zero extent. { - TEST_PANIC("Failed to allocate datagram payload"); + udpard_tree_t* root = NULL; + size_t cov = 0; + rx_fragment_tree_update_result_t res = rx_fragment_tree_rejected; + // Add fragment. + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base_str(mem_payload, 0, "abc"), + 4, + 0, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_done, res); + TEST_ASSERT_EQUAL_size_t(4, cov); + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT_EQUAL(1, tree_count(root)); + // Check the retained payload. + TEST_ASSERT_EQUAL_size_t(0, fragment_at(root, 0)->offset); + TEST_ASSERT_EQUAL_size_t(4, fragment_at(root, 0)->view.size); + TEST_ASSERT_NULL(fragment_at(root, 1)); + // Check the heap. + TEST_ASSERT_EQUAL_size_t(1, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(1, alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(1, alloc_frag.count_alloc); + TEST_ASSERT_EQUAL_size_t(1, alloc_payload.count_alloc); + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.count_free); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.count_free); + // Verify and free the tree (as in freedom). + TEST_ASSERT(fragment_tree_verify(root, 4, "abc", 0x34940e4cUL)); + udpard_fragment_free_all((udpard_fragment_t*)root, udpard_make_deleter(mem_frag)); + // Check the heap. + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(1, alloc_frag.count_alloc); + TEST_ASSERT_EQUAL_size_t(1, alloc_payload.count_alloc); + TEST_ASSERT_EQUAL_size_t(1, alloc_frag.count_free); + TEST_ASSERT_EQUAL_size_t(1, alloc_payload.count_free); } - return pld; -} + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_payload); -static struct UdpardMutablePayload makeDatagramPayloadString(InstrumentedAllocator* const memory, - const TransferMetadata meta, - const uint32_t frame_index, - const bool end_of_transfer, - const char* const string) -{ - return makeDatagramPayload(memory, - meta, - frame_index, - end_of_transfer, - (struct UdpardPayload) {.data = string, .size = strlen(string)}); -} + // Non-empty payload with non-zero extent. + { + udpard_tree_t* root = NULL; + size_t cov = 0; + rx_fragment_tree_update_result_t res = rx_fragment_tree_rejected; + // Add fragment beyond the extent, dropped early. + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base_str(mem_payload, 3, "abcdef"), + 8, + 3, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_rejected, res); + TEST_ASSERT_EQUAL_size_t(0, cov); + TEST_ASSERT_NULL(root); + TEST_ASSERT_EQUAL(0, tree_count(root)); + // Add fragment. + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base_str(mem_payload, 0, "abcdef"), + 7, + 3, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_done, res); + TEST_ASSERT_EQUAL_size_t(7, cov); + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT_EQUAL(1, tree_count(root)); + // Check the retained payload. + TEST_ASSERT_EQUAL_size_t(0, fragment_at(root, 0)->offset); + TEST_ASSERT_EQUAL_size_t(7, fragment_at(root, 0)->view.size); + TEST_ASSERT_EQUAL_STRING("abcdef", fragment_at(root, 0)->view.data); + TEST_ASSERT_NULL(fragment_at(root, 1)); + // Check the heap. + TEST_ASSERT_EQUAL_size_t(1, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(1, alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(1, alloc_frag.count_alloc); + TEST_ASSERT_EQUAL_size_t(2, alloc_payload.count_alloc); + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.count_free); + TEST_ASSERT_EQUAL_size_t(1, alloc_payload.count_free); + // Free the tree (as in freedom). + TEST_ASSERT(fragment_tree_verify(root, 7, "abcdef", 0x532b03c8UL)); + udpard_fragment_free_all((udpard_fragment_t*)root, udpard_make_deleter(mem_frag)); + // Check the heap. + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(1, alloc_frag.count_alloc); + TEST_ASSERT_EQUAL_size_t(2, alloc_payload.count_alloc); + TEST_ASSERT_EQUAL_size_t(1, alloc_frag.count_free); + TEST_ASSERT_EQUAL_size_t(2, alloc_payload.count_free); + } + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_payload); -static struct UdpardMutablePayload makeDatagramPayloadSingleFrame(InstrumentedAllocator* const memory, - const TransferMetadata meta, - const struct UdpardPayload payload) -{ - struct UdpardMutablePayload pld = - makeDatagramPayload(memory, - meta, - 0, - true, - (struct UdpardPayload) {.data = payload.data, - .size = payload.size + TRANSFER_CRC_SIZE_BYTES}); - TEST_PANIC_UNLESS(pld.size == (payload.size + HEADER_SIZE_BYTES + TRANSFER_CRC_SIZE_BYTES)); - txSerializeU32(((byte_t*) pld.data) + HEADER_SIZE_BYTES + payload.size, - transferCRCCompute(payload.size, payload.data)); - return pld; + // Multi-frame reassembly test: "abc def xyz "; the last nul is beyond the extent. + { + udpard_tree_t* root = NULL; + size_t cov = 0; + rx_fragment_tree_update_result_t res = rx_fragment_tree_rejected; + // Add fragment. + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base_str(mem_payload, 0, "abc"), + 100, + 10, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); + TEST_ASSERT_EQUAL_size_t(4, cov); + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT_EQUAL(1, tree_count(root)); + // Add fragment. + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base_str(mem_payload, 8, "xyz"), + 100, + 11, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); + TEST_ASSERT_EQUAL_size_t(4, cov); // not extended due to the gap in the middle. + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT_EQUAL(2, tree_count(root)); + // Add fragment. + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base_str(mem_payload, 4, "def"), + 100, + 11, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_done, res); + TEST_ASSERT_EQUAL_size_t(12, cov); // extended to cover the two remaining frames. + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT_EQUAL(3, tree_count(root)); + // Check the retained payload. + TEST_ASSERT_EQUAL_size_t(0, fragment_at(root, 0)->offset); + TEST_ASSERT_EQUAL_size_t(4, fragment_at(root, 0)->view.size); + TEST_ASSERT_EQUAL_STRING("abc", fragment_at(root, 0)->view.data); + TEST_ASSERT_EQUAL_size_t(4, fragment_at(root, 1)->offset); + TEST_ASSERT_EQUAL_size_t(4, fragment_at(root, 1)->view.size); + TEST_ASSERT_EQUAL_STRING("def", fragment_at(root, 1)->view.data); + TEST_ASSERT_EQUAL_size_t(8, fragment_at(root, 2)->offset); + TEST_ASSERT_EQUAL_size_t(4, fragment_at(root, 2)->view.size); + TEST_ASSERT_EQUAL_STRING("xyz", fragment_at(root, 2)->view.data); + TEST_ASSERT_NULL(fragment_at(root, 3)); + // Check the heap. + TEST_ASSERT_EQUAL_size_t(3, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(3, alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(3, alloc_frag.count_alloc); + TEST_ASSERT_EQUAL_size_t(3, alloc_payload.count_alloc); + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.count_free); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.count_free); + // Free the tree (as in freedom). + TEST_ASSERT(fragment_tree_verify(root, 12, "abc\0def\0xyz", 0x2758cbe6UL)); + udpard_fragment_free_all(udpard_fragment_seek((udpard_fragment_t*)root, 0), udpard_make_deleter(mem_frag)); + // Check the heap. + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(3, alloc_frag.count_alloc); + TEST_ASSERT_EQUAL_size_t(3, alloc_payload.count_alloc); + TEST_ASSERT_EQUAL_size_t(3, alloc_frag.count_free); + TEST_ASSERT_EQUAL_size_t(3, alloc_payload.count_free); + } + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_payload); + + // Multi-frame reassembly test with defragmentation: "0123456789". + { + udpard_tree_t* root = NULL; + size_t cov = 0; + rx_fragment_tree_update_result_t res = rx_fragment_tree_rejected; + // Add fragment. + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 0, 2, "01"), + 100, + 10, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); + TEST_ASSERT_EQUAL_size_t(2, cov); + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT_EQUAL(1, tree_count(root)); + // Add fragment. + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 4, 2, "45"), + 100, + 10, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); + TEST_ASSERT_EQUAL_size_t(2, cov); // not extended + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT_EQUAL(2, tree_count(root)); + // Add fragment. + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 3, 2, "34"), + 100, + 10, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); + TEST_ASSERT_EQUAL_size_t(2, cov); // not extended + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT_EQUAL(3, tree_count(root)); + // Intermediate check on the current state of the tree so far. + TEST_ASSERT_EQUAL_size_t(0, fragment_at(root, 0)->offset); + TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 0)->view.size); + TEST_ASSERT_EQUAL_STRING_LEN("01", fragment_at(root, 0)->view.data, 2); + TEST_ASSERT_EQUAL_size_t(3, fragment_at(root, 1)->offset); + TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 1)->view.size); + TEST_ASSERT_EQUAL_STRING_LEN("34", fragment_at(root, 1)->view.data, 2); + TEST_ASSERT_EQUAL_size_t(4, fragment_at(root, 2)->offset); + TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 2)->view.size); + TEST_ASSERT_EQUAL_STRING_LEN("45", fragment_at(root, 2)->view.data, 2); + TEST_ASSERT_NULL(fragment_at(root, 3)); + TEST_ASSERT_EQUAL_size_t(3, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(3, alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(3, alloc_frag.count_alloc); + TEST_ASSERT_EQUAL_size_t(3, alloc_payload.count_alloc); + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.count_free); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.count_free); + // Add fragment. BRIDGE THE LEFT GAP, EVICT `34` FRAGMENT AS REDUNDANT. + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 2, 2, "23"), + 100, + 10, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); + TEST_ASSERT_EQUAL_size_t(6, cov); // extended! + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT_EQUAL(3, tree_count(root)); + // Check the updated tree state after the eviction. Fragment `34` should be gone. + TEST_ASSERT_EQUAL_size_t(0, fragment_at(root, 0)->offset); + TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 0)->view.size); + TEST_ASSERT_EQUAL_STRING_LEN("01", fragment_at(root, 0)->view.data, 2); + TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 1)->offset); + TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 1)->view.size); + TEST_ASSERT_EQUAL_STRING_LEN("23", fragment_at(root, 1)->view.data, 2); + TEST_ASSERT_EQUAL_size_t(4, fragment_at(root, 2)->offset); + TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 2)->view.size); + TEST_ASSERT_EQUAL_STRING_LEN("45", fragment_at(root, 2)->view.data, 2); + TEST_ASSERT_NULL(fragment_at(root, 3)); + TEST_ASSERT_EQUAL_size_t(3, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(3, alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(4, alloc_frag.count_alloc); + TEST_ASSERT_EQUAL_size_t(4, alloc_payload.count_alloc); + TEST_ASSERT_EQUAL_size_t(1, alloc_frag.count_free); + TEST_ASSERT_EQUAL_size_t(1, alloc_payload.count_free); + // Add a fully-contained (redundant) fragment. Should be discarded. + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 1, 1, "z"), + 100, + 10, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_rejected, res); + TEST_ASSERT_EQUAL_size_t(6, cov); // no new information is added + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT_EQUAL(3, tree_count(root)); // no new frames added + TEST_ASSERT_EQUAL_size_t(3, alloc_frag.allocated_fragments); // no new allocations + TEST_ASSERT_EQUAL_size_t(3, alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(4, alloc_frag.count_alloc); + TEST_ASSERT_EQUAL_size_t(5, alloc_payload.count_alloc); // the payload was briefly allocated and discarded + TEST_ASSERT_EQUAL_size_t(1, alloc_frag.count_free); + TEST_ASSERT_EQUAL_size_t(2, alloc_payload.count_free); // yeah, discarded + // Add fragment. Slight overlap on the right, candidate for eviction in the future. + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 5, 2, "56"), + 100, + 10, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); + TEST_ASSERT_EQUAL_size_t(7, cov); // extended by 1 byte + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT_EQUAL(4, tree_count(root)); + // Check the updated tree state. + TEST_ASSERT_EQUAL_size_t(0, fragment_at(root, 0)->offset); + TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 0)->view.size); + TEST_ASSERT_EQUAL_STRING_LEN("01", fragment_at(root, 0)->view.data, 2); + TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 1)->offset); + TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 1)->view.size); + TEST_ASSERT_EQUAL_STRING_LEN("23", fragment_at(root, 1)->view.data, 2); + TEST_ASSERT_EQUAL_size_t(4, fragment_at(root, 2)->offset); + TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 2)->view.size); + TEST_ASSERT_EQUAL_STRING_LEN("45", fragment_at(root, 2)->view.data, 2); + TEST_ASSERT_EQUAL_size_t(5, fragment_at(root, 3)->offset); + TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 3)->view.size); + TEST_ASSERT_EQUAL_STRING_LEN("56", fragment_at(root, 3)->view.data, 2); + TEST_ASSERT_NULL(fragment_at(root, 4)); + TEST_ASSERT_EQUAL_size_t(4, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(4, alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(5, alloc_frag.count_alloc); + TEST_ASSERT_EQUAL_size_t(6, alloc_payload.count_alloc); + TEST_ASSERT_EQUAL_size_t(1, alloc_frag.count_free); + TEST_ASSERT_EQUAL_size_t(2, alloc_payload.count_free); + // Add fragment. Completes the transfer and evicts redundant `45` and `56` fragments. + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 4, 8, "456789--"), + 100, + 10, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_done, res); + TEST_ASSERT_EQUAL_size_t(12, cov); // extended all the way, beyond the extent. + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT_EQUAL(3, tree_count(root)); // the tree shrunk due to evictions + // Check the updated tree state. + TEST_ASSERT_EQUAL_size_t(0, fragment_at(root, 0)->offset); + TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 0)->view.size); + TEST_ASSERT_EQUAL_STRING_LEN("01", fragment_at(root, 0)->view.data, 2); + TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 1)->offset); + TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 1)->view.size); + TEST_ASSERT_EQUAL_STRING_LEN("23", fragment_at(root, 1)->view.data, 2); + TEST_ASSERT_EQUAL_size_t(4, fragment_at(root, 2)->offset); + TEST_ASSERT_EQUAL_size_t(8, fragment_at(root, 2)->view.size); + TEST_ASSERT_EQUAL_STRING_LEN("456789--", fragment_at(root, 2)->view.data, 8); + TEST_ASSERT_NULL(fragment_at(root, 3)); + TEST_ASSERT_EQUAL_size_t(3, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(3, alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(6, alloc_frag.count_alloc); + TEST_ASSERT_EQUAL_size_t(7, alloc_payload.count_alloc); + TEST_ASSERT_EQUAL_size_t(3, alloc_frag.count_free); + TEST_ASSERT_EQUAL_size_t(4, alloc_payload.count_free); + // Free the tree (as in freedom). The free tree is free to manifest its own destiny. + TEST_ASSERT(fragment_tree_verify(root, 12, "0123456789--", 0xc73f3ad8UL)); + udpard_fragment_free_all((udpard_fragment_t*)root, udpard_make_deleter(mem_frag)); + // Check the heap. + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(6, alloc_frag.count_alloc); + TEST_ASSERT_EQUAL_size_t(7, alloc_payload.count_alloc); + TEST_ASSERT_EQUAL_size_t(6, alloc_frag.count_free); + TEST_ASSERT_EQUAL_size_t(7, alloc_payload.count_free); + } + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_payload); + + // Multi-frame reassembly test with defragmentation: "abcdefghijklmnopqrst". Split with various MTU: + // + // MTU 4: abcd efgh ijkl mnop qrst + // 0 4 8 12 16 + // + // MTU 5: abcde fghij klmno pqrst + // 0 5 10 15 + // + // MTU 11: abcdefghijk lmnopqrst + // 0 11 + // + // Offset helper: + // abcdefghijklmnopqrst + // 01234567890123456789 + // 00000000001111111111 + { + udpard_tree_t* root = NULL; + size_t cov = 0; + rx_fragment_tree_update_result_t res = rx_fragment_tree_rejected; + + // Add fragment. + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 0, 5, "abcde"), + 21, + 21, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); + TEST_ASSERT_EQUAL_size_t(5, cov); + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT(fragment_equals(fragment_at(root, 0), 0, 5, "abcde")); + TEST_ASSERT_NULL(fragment_at(root, 1)); + + // Add fragment. Rejected because contained by existing. + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 0, 4, "abcd"), + 21, + 21, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_rejected, res); + TEST_ASSERT_EQUAL_size_t(5, cov); + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT(fragment_equals(fragment_at(root, 0), 0, 5, "abcde")); + TEST_ASSERT_NULL(fragment_at(root, 1)); + + // Add 2 fragments. They cover new ground with a gap but they are small, to be replaced later. + // Resulting state: + // 0 |abcde | + // 1 | ijkl | + // 2 | mnop | + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 8, 4, "ijkl"), + 21, + 21, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 12, 4, "mnop"), + 21, + 21, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); + TEST_ASSERT_EQUAL_size_t(5, cov); // not extended due to a gap + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT(fragment_equals(fragment_at(root, 0), 0, 5, "abcde")); + TEST_ASSERT(fragment_equals(fragment_at(root, 1), 8, 4, "ijkl")); + TEST_ASSERT(fragment_equals(fragment_at(root, 2), 12, 4, "mnop")); + TEST_ASSERT_NULL(fragment_at(root, 3)); + TEST_ASSERT_EQUAL_size_t(3, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(3, alloc_payload.allocated_fragments); + + // Add another fragment that doesn't add any new information but is accepted anyway because it is larger. + // This may enable defragmentation in the future. + // Resulting state: + // 0 |abcde | + // 1 | ijkl | + // 2 | klmno | + // 3 | mnop | + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 10, 5, "klmno"), + 21, + 21, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); + TEST_ASSERT_EQUAL_size_t(5, cov); // not extended due to a gap + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT(fragment_equals(fragment_at(root, 0), 0, 5, "abcde")); + TEST_ASSERT(fragment_equals(fragment_at(root, 1), 8, 4, "ijkl")); + TEST_ASSERT(fragment_equals(fragment_at(root, 2), 10, 5, "klmno")); + TEST_ASSERT(fragment_equals(fragment_at(root, 3), 12, 4, "mnop")); + TEST_ASSERT_NULL(fragment_at(root, 4)); + TEST_ASSERT_EQUAL_size_t(4, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(4, alloc_payload.allocated_fragments); + + // Add another fragment that bridges the gap and allows removing ijkl. + // Resulting state: + // 0 |abcde | + // 1 | fghij | replaces the old 1 + // 2 | klmno | + // 3 | mnop | kept because it has 'p' + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 5, 5, "fghij"), + 21, + 21, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); + TEST_ASSERT_EQUAL_size_t(16, cov); // jumps to the end because the gap is covered + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT(fragment_equals(fragment_at(root, 0), 0, 5, "abcde")); + TEST_ASSERT(fragment_equals(fragment_at(root, 1), 5, 5, "fghij")); + TEST_ASSERT(fragment_equals(fragment_at(root, 2), 10, 5, "klmno")); + TEST_ASSERT(fragment_equals(fragment_at(root, 3), 12, 4, "mnop")); + TEST_ASSERT_NULL(fragment_at(root, 4)); + TEST_ASSERT_EQUAL_size_t(4, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(4, alloc_payload.allocated_fragments); + + // Add the last smallest fragment. The transfer is not detected as complete because it is set to 21 bytes. + // Resulting state: + // 0 |abcde | + // 1 | fghij | replaces the old 1 + // 2 | klmno | + // 3 | mnop | kept because it has 'p' + // 4 | qrst| + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 16, 4, "qrst"), + 21, + 21, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); + TEST_ASSERT_EQUAL_size_t(20, cov); // updated + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT(fragment_equals(fragment_at(root, 0), 0, 5, "abcde")); + TEST_ASSERT(fragment_equals(fragment_at(root, 1), 5, 5, "fghij")); + TEST_ASSERT(fragment_equals(fragment_at(root, 2), 10, 5, "klmno")); + TEST_ASSERT(fragment_equals(fragment_at(root, 3), 12, 4, "mnop")); + TEST_ASSERT(fragment_equals(fragment_at(root, 4), 16, 4, "qrst")); + TEST_ASSERT_NULL(fragment_at(root, 5)); + TEST_ASSERT_EQUAL_size_t(5, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(5, alloc_payload.allocated_fragments); + + // Send redundant fragments. State unchanged. + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 4, 4, "efgh"), + 21, + 21, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_rejected, res); + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 5, 5, "fghij"), + 21, + 21, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_rejected, res); + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 0, 5, "abcde"), + 21, + 21, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_rejected, res); + TEST_ASSERT_EQUAL_size_t(20, cov); // no change + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT(fragment_equals(fragment_at(root, 0), 0, 5, "abcde")); + TEST_ASSERT(fragment_equals(fragment_at(root, 1), 5, 5, "fghij")); + TEST_ASSERT(fragment_equals(fragment_at(root, 2), 10, 5, "klmno")); + TEST_ASSERT(fragment_equals(fragment_at(root, 3), 12, 4, "mnop")); + TEST_ASSERT(fragment_equals(fragment_at(root, 4), 16, 4, "qrst")); + TEST_ASSERT_NULL(fragment_at(root, 5)); + TEST_ASSERT_EQUAL_size_t(5, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(5, alloc_payload.allocated_fragments); + + // Add the first max-MTU fragment. Replaces the smaller initial fragments. + // Resulting state: + // 0 |abcdefghijk | replaces 0 and 1 + // 1 | klmno | kept because it has 'lmno' + // 2 | mnop | kept because it has 'p' + // 3 | qrst| + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 0, 11, "abcdefghijk"), + 21, + 21, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); + TEST_ASSERT_EQUAL_size_t(20, cov); + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT(fragment_equals(fragment_at(root, 0), 0, 11, "abcdefghijk")); + TEST_ASSERT(fragment_equals(fragment_at(root, 1), 10, 5, "klmno")); + TEST_ASSERT(fragment_equals(fragment_at(root, 2), 12, 4, "mnop")); + TEST_ASSERT(fragment_equals(fragment_at(root, 3), 16, 4, "qrst")); + TEST_ASSERT_NULL(fragment_at(root, 4)); + TEST_ASSERT_EQUAL_size_t(4, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(4, alloc_payload.allocated_fragments); + + // Add the last MTU 5 fragment. Replaces the last two MTU 4 fragments. + // Resulting state: + // 0 |abcdefghijk | + // 1 | klmno | kept because it has 'lmno' + // 2 | pqrst| + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 15, 5, "pqrst"), + 21, + 21, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); + TEST_ASSERT_EQUAL_size_t(20, cov); + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT(fragment_equals(fragment_at(root, 0), 0, 11, "abcdefghijk")); + TEST_ASSERT(fragment_equals(fragment_at(root, 1), 10, 5, "klmno")); + TEST_ASSERT(fragment_equals(fragment_at(root, 2), 15, 5, "pqrst")); + TEST_ASSERT_NULL(fragment_at(root, 3)); + TEST_ASSERT_EQUAL_size_t(3, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(3, alloc_payload.allocated_fragments); + + // Add the last max-MTU fragment. Replaces the last two fragments. + // Resulting state: + // 0 |abcdefghijk | + // 1 | lmnopqrst| + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 11, 9, "lmnopqrst"), + 21, + 21, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); + TEST_ASSERT_EQUAL_size_t(20, cov); + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT(fragment_equals(fragment_at(root, 0), 0, 11, "abcdefghijk")); + TEST_ASSERT(fragment_equals(fragment_at(root, 1), 11, 9, "lmnopqrst")); + TEST_ASSERT_NULL(fragment_at(root, 2)); + TEST_ASSERT_EQUAL_size_t(2, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(2, alloc_payload.allocated_fragments); + + // Replace everything with a single huge fragment. + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 0, 20, "abcdefghijklmnopqrst"), + 21, + 21, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); + TEST_ASSERT_EQUAL_size_t(20, cov); + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT(fragment_equals(fragment_at(root, 0), 0, 20, "abcdefghijklmnopqrst")); + TEST_ASSERT_NULL(fragment_at(root, 1)); + TEST_ASSERT_EQUAL_size_t(1, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(1, alloc_payload.allocated_fragments); + + // One tiny boi will complete the transfer. + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 19, 2, "t-"), + 21, + 21, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_done, res); + TEST_ASSERT_EQUAL_size_t(21, cov); + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT(fragment_equals(fragment_at(root, 0), 0, 20, "abcdefghijklmnopqrst")); + TEST_ASSERT(fragment_equals(fragment_at(root, 1), 19, 2, "t-")); + TEST_ASSERT_NULL(fragment_at(root, 2)); + TEST_ASSERT_EQUAL_size_t(2, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(2, alloc_payload.allocated_fragments); + + // Verify the final state. + TEST_ASSERT(fragment_tree_verify(root, 21, "abcdefghijklmnopqrst-", 0xe7a60f1eUL)); + + // Cleanup. + udpard_fragment_free_all((udpard_fragment_t*)root, udpard_make_deleter(mem_frag)); + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); + } + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_payload); } -static struct UdpardMutablePayload makeDatagramPayloadSingleFrameString(InstrumentedAllocator* const memory, - const TransferMetadata meta, - const char* const payload) +/// Exhaustive test for rx_fragment_tree_update with random fragmentation patterns. +/// Tests a fixed payload split into every possible non-empty substring, +/// fed in random order with possible duplicates, and verifies correct completion detection. +static void test_rx_fragment_tree_update_exhaustive(void) { - return makeDatagramPayloadSingleFrame(memory, - meta, - (struct UdpardPayload) {.data = payload, .size = strlen(payload)}); -} + instrumented_allocator_t alloc_frag = { 0 }; + instrumented_allocator_new(&alloc_frag); + const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); -// -------------------------------------------------- MISC -------------------------------------------------- + instrumented_allocator_t alloc_payload = { 0 }; + instrumented_allocator_new(&alloc_payload); + const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); + const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); -static void testCompare32(void) -{ - TEST_ASSERT_EQUAL(0, compare32(0, 0)); - TEST_ASSERT_EQUAL(0, compare32(1, 1)); - TEST_ASSERT_EQUAL(0, compare32(0xdeadbeef, 0xdeadbeef)); - TEST_ASSERT_EQUAL(0, compare32(0x0badc0de, 0x0badc0de)); - TEST_ASSERT_EQUAL(0, compare32(0xffffffff, 0xffffffff)); - TEST_ASSERT_EQUAL(+1, compare32(1, 0)); - TEST_ASSERT_EQUAL(+1, compare32(0xffffffff, 0xfffffffe)); - TEST_ASSERT_EQUAL(-1, compare32(0, 1)); - TEST_ASSERT_EQUAL(-1, compare32(0xfffffffe, 0xffffffff)); -} + const char payload[] = "0123456789"; + const size_t payload_length = strlen(payload); -// -------------------------------------------------- FRAME PARSING -------------------------------------------------- - -// Generate reference data using PyCyphal: -// -// >>> from pycyphal.transport.udp import UDPFrame -// >>> from pycyphal.transport import Priority, MessageDataSpecifier, ServiceDataSpecifier -// >>> frame = UDPFrame(priority=Priority.FAST, transfer_id=0xbadc0ffee0ddf00d, index=12345, end_of_transfer=False, -// payload=memoryview(b''), source_node_id=2345, destination_node_id=5432, -// data_specifier=MessageDataSpecifier(7654), user_data=0) -// >>> list(frame.compile_header_and_payload()[0]) -// [1, 2, 41, 9, 56, 21, 230, 29, 13, 240, 221, 224, 254, 15, 220, 186, 57, 48, 0, 0, 0, 0, 224, 60] -static void testParseFrameValidMessage(void) -{ - byte_t data[] = {1, 2, 41, 9, 255, 255, 230, 29, 13, 240, 221, 224, - 254, 15, 220, 186, 57, 48, 0, 0, 0, 0, 30, 179, // - 'a', 'b', 'c'}; - RxFrame rxf = {0}; - TEST_ASSERT(rxParseFrame((struct UdpardMutablePayload) {.data = data, .size = sizeof(data)}, &rxf)); - TEST_ASSERT_EQUAL_UINT64(UdpardPriorityFast, rxf.meta.priority); - TEST_ASSERT_EQUAL_UINT64(2345, rxf.meta.src_node_id); - TEST_ASSERT_EQUAL_UINT64(UDPARD_NODE_ID_UNSET, rxf.meta.dst_node_id); - TEST_ASSERT_EQUAL_UINT64(7654, rxf.meta.data_specifier); - TEST_ASSERT_EQUAL_UINT64(0xbadc0ffee0ddf00d, rxf.meta.transfer_id); - TEST_ASSERT_EQUAL_UINT64(12345, rxf.base.index); - TEST_ASSERT_FALSE(rxf.base.end_of_transfer); - TEST_ASSERT_EQUAL_UINT64(3, rxf.base.payload.size); - TEST_ASSERT_EQUAL_UINT8_ARRAY("abc", rxf.base.payload.data, 3); - TEST_ASSERT_EQUAL_UINT64(sizeof(data), rxf.base.origin.size); - TEST_ASSERT_EQUAL_UINT8_ARRAY(data, rxf.base.origin.data, sizeof(data)); + // Generate all possible non-empty substrings (offset, length pairs). + // For a string of length N, there are N*(N+1)/2 possible substrings. + typedef struct + { + size_t offset; + size_t length; + } substring_t; + + const size_t max_substrings = (payload_length * (payload_length + 1)) / 2; + substring_t substrings[max_substrings]; + size_t substring_count = 0; + + for (size_t offset = 0; offset < payload_length; offset++) { + for (size_t length = 1; length <= (payload_length - offset); length++) { + substrings[substring_count].offset = offset; + substrings[substring_count].length = length; + substring_count++; + } + } + TEST_ASSERT_EQUAL_size_t(max_substrings, substring_count); + + // Run multiple randomized test iterations to explore different orderings. + // We use fewer iterations to keep test time reasonable. + const size_t num_iterations = 10000; + + for (size_t iteration = 0; iteration < num_iterations; iteration++) { + udpard_tree_t* root = NULL; + size_t cov = 0; + + // Create a randomized schedule of fragments to feed. + // We'll randomly select which substrings to use and in what order. + // Some may be duplicated, some may be omitted initially. + + // Track which bytes have been covered by submitted fragments. + bool byte_covered[10] = { false }; + bool transfer_complete = false; + + // Shuffle the substring indices to get a random order. + size_t schedule[substring_count]; + for (size_t i = 0; i < substring_count; i++) { + schedule[i] = i; + } + + // Fisher-Yates shuffle + for (size_t i = substring_count - 1; i > 0; i--) { + const size_t j = (size_t)(rand() % (int)(i + 1)); + const size_t tmp = schedule[i]; + schedule[i] = schedule[j]; + schedule[j] = tmp; + } + + // Feed fragments in the shuffled order. + // We stop after we've seen every byte at least once. + for (size_t sched_idx = 0; sched_idx < substring_count; sched_idx++) { + const substring_t sub = substrings[schedule[sched_idx]]; + + // Allocate and copy the substring payload. + char* const frag_data = mem_res_alloc(mem_payload, sub.length); + memcpy(frag_data, payload + sub.offset, sub.length); + + const rx_frame_base_t frame = { .offset = sub.offset, + .payload = { .data = frag_data, .size = sub.length }, + .origin = { .data = frag_data, .size = sub.length } }; + + const rx_fragment_tree_update_result_t res = + rx_fragment_tree_update(&root, mem_frag, del_payload, frame, payload_length, payload_length, &cov); + + // Update our tracking of covered bytes. + for (size_t i = 0; i < sub.length; i++) { + byte_covered[sub.offset + i] = true; + } + + // Check if all bytes are covered. + bool all_covered = true; + for (size_t i = 0; i < payload_length; i++) { + if (!byte_covered[i]) { + all_covered = false; + break; + } + } + if (all_covered) { + TEST_ASSERT_EQUAL(rx_fragment_tree_done, res); + transfer_complete = true; + break; + } + TEST_ASSERT((res == rx_fragment_tree_accepted) || (res == rx_fragment_tree_rejected)); + } + TEST_ASSERT_TRUE(transfer_complete); + TEST_ASSERT_EQUAL_size_t(payload_length, cov); + + // Verify the final state. + TEST_ASSERT(fragment_tree_verify(root, 10, "0123456789", 0x280c069eUL)); + udpard_fragment_free_all((udpard_fragment_t*)root, udpard_make_deleter(mem_frag)); + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); + } + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); + + // Test with duplicates: feed the same fragments multiple times. + for (size_t iteration = 0; iteration < num_iterations; iteration++) { + udpard_tree_t* root = NULL; + size_t cov = 0; + + bool byte_covered[10] = { false }; + bool transfer_complete = false; + + // Create a schedule with duplicates. + const size_t schedule_length = substring_count * 3; // 3x duplication factor + size_t schedule[schedule_length]; + for (size_t i = 0; i < schedule_length; i++) { + schedule[i] = (size_t)(rand() % (int)substring_count); + } + + // Feed fragments with duplicates. + for (size_t sched_idx = 0; sched_idx < schedule_length; sched_idx++) { + const substring_t sub = substrings[schedule[sched_idx]]; + + char* const frag_data = mem_res_alloc(mem_payload, sub.length); + memcpy(frag_data, payload + sub.offset, sub.length); + + const rx_frame_base_t frame = { .offset = sub.offset, + .payload = { .data = frag_data, .size = sub.length }, + .origin = { .data = frag_data, .size = sub.length } }; + + const rx_fragment_tree_update_result_t res = + rx_fragment_tree_update(&root, mem_frag, del_payload, frame, payload_length, payload_length, &cov); + + // Update tracking. + for (size_t i = 0; i < sub.length; i++) { + byte_covered[sub.offset + i] = true; + } + + // Check completion. + bool all_covered = true; + for (size_t i = 0; i < payload_length; i++) { + if (!byte_covered[i]) { + all_covered = false; + break; + } + } + if (all_covered) { + TEST_ASSERT_EQUAL(rx_fragment_tree_done, res); + transfer_complete = true; + break; + } + TEST_ASSERT((res == rx_fragment_tree_accepted) || (res == rx_fragment_tree_rejected)); + } + TEST_ASSERT_TRUE(transfer_complete); + TEST_ASSERT_EQUAL_size_t(payload_length, cov); + + // Verify the final state. + TEST_ASSERT(fragment_tree_verify(root, 10, "0123456789", 0x280c069eUL)); + udpard_fragment_free_all((udpard_fragment_t*)root, udpard_make_deleter(mem_frag)); + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); + } + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); } -static void testParseFrameValidRPCService(void) +static void test_rx_fragment_tree_oom(void) { - // frame = UDPFrame(priority=Priority.FAST, transfer_id=0xbadc0ffee0ddf00d, index=6654, end_of_transfer=False, - // payload=memoryview(b''), source_node_id=2345, destination_node_id=4567, - // data_specifier=ServiceDataSpecifier(role=ServiceDataSpecifier.Role.REQUEST, service_id=123), user_data=0) - byte_t data[] = {1, 2, 41, 9, 215, 17, 123, 192, 13, 240, 221, 224, - 254, 15, 220, 186, 254, 25, 0, 0, 0, 0, 173, 122, // - 'a', 'b', 'c'}; - RxFrame rxf = {0}; - TEST_ASSERT(rxParseFrame((struct UdpardMutablePayload) {.data = data, .size = sizeof(data)}, &rxf)); - TEST_ASSERT_EQUAL_UINT64(UdpardPriorityFast, rxf.meta.priority); - TEST_ASSERT_EQUAL_UINT64(2345, rxf.meta.src_node_id); - TEST_ASSERT_EQUAL_UINT64(4567, rxf.meta.dst_node_id); - TEST_ASSERT_EQUAL_UINT64(123U | DATA_SPECIFIER_SERVICE_NOT_MESSAGE_MASK | - DATA_SPECIFIER_SERVICE_REQUEST_NOT_RESPONSE_MASK, - rxf.meta.data_specifier); - TEST_ASSERT_EQUAL_UINT64(0xbadc0ffee0ddf00d, rxf.meta.transfer_id); - TEST_ASSERT_EQUAL_UINT64(6654, rxf.base.index); - TEST_ASSERT_FALSE(rxf.base.end_of_transfer); - TEST_ASSERT_EQUAL_UINT64(3, rxf.base.payload.size); - TEST_ASSERT_EQUAL_UINT8_ARRAY("abc", rxf.base.payload.data, 3); - TEST_ASSERT_EQUAL_UINT64(sizeof(data), rxf.base.origin.size); - TEST_ASSERT_EQUAL_UINT8_ARRAY(data, rxf.base.origin.data, sizeof(data)); -} + instrumented_allocator_t alloc_frag = { 0 }; + instrumented_allocator_new(&alloc_frag); + const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); -static void testParseFrameValidMessageAnonymous(void) -{ - byte_t data[] = {1, 2, 255, 255, 255, 255, 230, 29, 13, 240, 221, 224, - 254, 15, 220, 186, 0, 0, 0, 128, 0, 0, 168, 92, // - 'a', 'b', 'c'}; - RxFrame rxf = {0}; - TEST_ASSERT(rxParseFrame((struct UdpardMutablePayload) {.data = data, .size = sizeof(data)}, &rxf)); - TEST_ASSERT_EQUAL_UINT64(UdpardPriorityFast, rxf.meta.priority); - TEST_ASSERT_EQUAL_UINT64(UDPARD_NODE_ID_UNSET, rxf.meta.src_node_id); - TEST_ASSERT_EQUAL_UINT64(UDPARD_NODE_ID_UNSET, rxf.meta.dst_node_id); - TEST_ASSERT_EQUAL_UINT64(7654, rxf.meta.data_specifier); - TEST_ASSERT_EQUAL_UINT64(0xbadc0ffee0ddf00d, rxf.meta.transfer_id); - TEST_ASSERT_EQUAL_UINT64(0, rxf.base.index); - TEST_ASSERT_TRUE(rxf.base.end_of_transfer); - TEST_ASSERT_EQUAL_UINT64(3, rxf.base.payload.size); - TEST_ASSERT_EQUAL_UINT8_ARRAY("abc", rxf.base.payload.data, 3); - TEST_ASSERT_EQUAL_UINT64(sizeof(data), rxf.base.origin.size); - TEST_ASSERT_EQUAL_UINT8_ARRAY(data, rxf.base.origin.data, sizeof(data)); + instrumented_allocator_t alloc_payload = { 0 }; + instrumented_allocator_new(&alloc_payload); + const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); + const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); + + // Test OOM during fragment allocation + { + udpard_tree_t* root = NULL; + size_t cov = 0; + rx_fragment_tree_update_result_t res = rx_fragment_tree_rejected; + + // Set fragment allocation limit to zero - fragment allocation will fail + alloc_frag.limit_fragments = 0; + + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base_str(mem_payload, 0, "abc"), + 100, + 10, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_oom, res); + TEST_ASSERT_EQUAL_size_t(0, cov); + TEST_ASSERT_NULL(root); + // Payload should have been freed + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(1, alloc_payload.count_alloc); // payload was allocated by make_frame_base_str + TEST_ASSERT_EQUAL_size_t(1, alloc_payload.count_free); // but freed due to OOM + } + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_payload); + + // Test OOM during multi-fragment reassembly + { + udpard_tree_t* root = NULL; + size_t cov = 0; + rx_fragment_tree_update_result_t res = rx_fragment_tree_rejected; + + // First fragment succeeds + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base_str(mem_payload, 0, "abc"), + 100, + 10, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); + TEST_ASSERT_EQUAL_size_t(4, cov); + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT_EQUAL(1, tree_count(root)); + TEST_ASSERT_EQUAL_size_t(1, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(1, alloc_payload.allocated_fragments); + + // Second fragment fails due to OOM + alloc_frag.limit_fragments = 1; // Already used the limit + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base_str(mem_payload, 4, "def"), + 100, + 10, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_oom, res); + TEST_ASSERT_EQUAL_size_t(4, cov); // Coverage unchanged + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT_EQUAL(1, tree_count(root)); // Still only one fragment + TEST_ASSERT_EQUAL_size_t(1, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(1, alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(2, alloc_payload.count_alloc); // second payload was allocated + TEST_ASSERT_EQUAL_size_t(1, alloc_payload.count_free); // but freed due to OOM + + // Reset limit and add the second fragment successfully + alloc_frag.limit_fragments = SIZE_MAX; + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base_str(mem_payload, 4, "def"), + 100, + 10, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); + TEST_ASSERT_EQUAL_size_t(8, cov); + TEST_ASSERT_EQUAL(2, tree_count(root)); + TEST_ASSERT_EQUAL_size_t(2, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(2, alloc_payload.allocated_fragments); + + // Cleanup + udpard_fragment_free_all((udpard_fragment_t*)root, udpard_make_deleter(mem_frag)); + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); + } + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_payload); + + // Test OOM recovery: fragment allocation fails, then succeeds on retry + { + udpard_tree_t* root = NULL; + size_t cov = 0; + rx_fragment_tree_update_result_t res = rx_fragment_tree_rejected; + + // First attempt fails + alloc_frag.limit_fragments = 0; + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base_str(mem_payload, 0, "abcdef"), + 7, + 3, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_oom, res); + TEST_ASSERT_EQUAL_size_t(0, cov); + TEST_ASSERT_NULL(root); + + // Second attempt succeeds + alloc_frag.limit_fragments = SIZE_MAX; + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base_str(mem_payload, 0, "abcdef"), + 7, + 3, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_done, res); + TEST_ASSERT_EQUAL_size_t(7, cov); + TEST_ASSERT_NOT_NULL(root); + TEST_ASSERT_EQUAL(1, tree_count(root)); + TEST_ASSERT_EQUAL_size_t(1, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(1, alloc_payload.allocated_fragments); + + // Cleanup + udpard_fragment_free_all((udpard_fragment_t*)root, udpard_make_deleter(mem_frag)); + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); + } + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_payload); } -static void testParseFrameRPCServiceAnonymous(void) +// --------------------------------------------- RX SLOT --------------------------------------------- + +static void test_rx_slot_update(void) { - byte_t data[] = {1, 2, 255, 255, 215, 17, 123, 192, 13, 240, 221, 224, - 254, 15, 220, 186, 254, 25, 0, 0, 0, 0, 75, 79, // - 'a', 'b', 'c'}; - RxFrame rxf = {0}; - TEST_ASSERT_FALSE(rxParseFrame((struct UdpardMutablePayload) {.data = data, .size = sizeof(data)}, &rxf)); + instrumented_allocator_t alloc_frag = { 0 }; + instrumented_allocator_new(&alloc_frag); + const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + + instrumented_allocator_t alloc_payload = { 0 }; + instrumented_allocator_new(&alloc_payload); + const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); + const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); + + uint64_t errors_oom = 0; + uint64_t errors_transfer_malformed = 0; + + // Test 1: Initialize slot from idle state (slot->state != rx_slot_busy branch) + { + rx_slot_t slot = { 0 }; + slot.state = rx_slot_idle; + + rx_frame_t frame = { 0 }; + frame.base = make_frame_base(mem_payload, 0, 5, "hello"); + frame.base.crc = 0x9a71bb4cUL; // CRC32C for "hello" + frame.meta.transfer_id = 123; + frame.meta.transfer_payload_size = 5; + + const udpard_us_t ts = 1000; + + rx_slot_update(&slot, ts, mem_frag, del_payload, &frame, 5, &errors_oom, &errors_transfer_malformed); + + // Verify slot was initialized + TEST_ASSERT_EQUAL(rx_slot_done, slot.state); // Single-frame transfer completes immediately + TEST_ASSERT_EQUAL(123, slot.transfer_id); + TEST_ASSERT_EQUAL(ts, slot.ts_min); + TEST_ASSERT_EQUAL(ts, slot.ts_max); + TEST_ASSERT_EQUAL_size_t(5, slot.covered_prefix); + TEST_ASSERT_EQUAL(0, errors_oom); + + rx_slot_reset(&slot, mem_frag); + rx_slot_reset(&slot, mem_frag); // idempotent + } + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_payload); + + // Test 2: Multi-frame transfer with timestamp updates (later/earlier branches) + { + rx_slot_t slot = { 0 }; + slot.state = rx_slot_idle; + + // First frame at offset 0 + rx_frame_t frame1 = { 0 }; + frame1.base = make_frame_base(mem_payload, 0, 3, "abc"); + frame1.base.crc = 0x12345678; + frame1.meta.transfer_id = 456; + frame1.meta.transfer_payload_size = 10; + + const udpard_us_t ts1 = 2000; + rx_slot_update(&slot, ts1, mem_frag, del_payload, &frame1, 10, &errors_oom, &errors_transfer_malformed); + + TEST_ASSERT_EQUAL(rx_slot_busy, slot.state); + TEST_ASSERT_EQUAL(ts1, slot.ts_min); + TEST_ASSERT_EQUAL(ts1, slot.ts_max); + TEST_ASSERT_EQUAL_size_t(3, slot.covered_prefix); + TEST_ASSERT_EQUAL(3, slot.crc_end); + TEST_ASSERT_EQUAL(0x12345678, slot.crc); + + // Second frame at offset 5, with later timestamp + rx_frame_t frame2 = { 0 }; + frame2.base = make_frame_base(mem_payload, 5, 3, "def"); + frame2.base.crc = 0x87654321; + frame2.meta.transfer_id = 456; + frame2.meta.transfer_payload_size = 10; + + const udpard_us_t ts2 = 3000; // Later than ts1 + rx_slot_update(&slot, ts2, mem_frag, del_payload, &frame2, 10, &errors_oom, &errors_transfer_malformed); + + TEST_ASSERT_EQUAL(rx_slot_busy, slot.state); + TEST_ASSERT_EQUAL(ts1, slot.ts_min); // Unchanged (ts2 is later) + TEST_ASSERT_EQUAL(ts2, slot.ts_max); // Updated to later time + TEST_ASSERT_EQUAL_size_t(3, slot.covered_prefix); // Still 3 due to gap at [3-5) + TEST_ASSERT_EQUAL(8, slot.crc_end); // Updated to end of frame2 + TEST_ASSERT_EQUAL(0x87654321, slot.crc); // Updated to frame2's CRC + + // Third frame at offset 3 (fills gap), with earlier timestamp + rx_frame_t frame3 = { 0 }; + frame3.base = make_frame_base(mem_payload, 3, 2, "XX"); + frame3.base.crc = 0xAABBCCDD; + frame3.meta.transfer_id = 456; + frame3.meta.transfer_payload_size = 10; + + const udpard_us_t ts3 = 1500; // Earlier than ts1 + rx_slot_update(&slot, ts3, mem_frag, del_payload, &frame3, 10, &errors_oom, &errors_transfer_malformed); + + TEST_ASSERT_EQUAL(rx_slot_busy, slot.state); + TEST_ASSERT_EQUAL(ts3, slot.ts_min); // Updated to earlier time + TEST_ASSERT_EQUAL(ts2, slot.ts_max); // Unchanged (ts3 is earlier) + TEST_ASSERT_EQUAL_size_t(8, slot.covered_prefix); // Now contiguous 0-8 + TEST_ASSERT_EQUAL(8, slot.crc_end); // Unchanged (frame3 doesn't extend beyond frame2) + TEST_ASSERT_EQUAL(0x87654321, slot.crc); // Unchanged (crc_end didn't increase) + + rx_slot_reset(&slot, mem_frag); + } + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_payload); + + // Test 3: OOM handling (tree_res == rx_fragment_tree_oom branch) + { + rx_slot_t slot = { 0 }; + slot.state = rx_slot_idle; + errors_oom = 0; + + // Limit allocations to trigger OOM + alloc_frag.limit_fragments = 0; + + rx_frame_t frame = { 0 }; + frame.base = make_frame_base(mem_payload, 0, 5, "hello"); + frame.base.crc = 0x9a71bb4cUL; // CRC32C for "hello" + frame.meta.transfer_id = 789; + frame.meta.transfer_payload_size = 5; + + rx_slot_update(&slot, 5000, mem_frag, del_payload, &frame, 5, &errors_oom, &errors_transfer_malformed); + + // Verify OOM error was counted + TEST_ASSERT_EQUAL(1, errors_oom); + TEST_ASSERT_EQUAL(rx_slot_busy, slot.state); // Slot initialized but fragment not added + TEST_ASSERT_EQUAL_size_t(0, slot.covered_prefix); // No fragments accepted + + // Restore allocation limit + alloc_frag.limit_fragments = SIZE_MAX; + + rx_slot_reset(&slot, mem_frag); + } + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_payload); + + // Test 4: Malformed transfer handling (CRC failure in rx_fragment_tree_finalize) + { + rx_slot_t slot = { 0 }; + slot.state = rx_slot_idle; + errors_transfer_malformed = 0; + + // Single-frame transfer with incorrect CRC + rx_frame_t frame = { 0 }; + frame.base = make_frame_base(mem_payload, 0, 4, "test"); + frame.base.crc = 0xDEADBEEF; // Incorrect CRC + frame.meta.transfer_id = 999; + frame.meta.transfer_payload_size = 4; + + rx_slot_update(&slot, 6000, mem_frag, del_payload, &frame, 4, &errors_oom, &errors_transfer_malformed); + + // Verify malformed error was counted and slot was reset + TEST_ASSERT_EQUAL(1, errors_transfer_malformed); + TEST_ASSERT_EQUAL(rx_slot_idle, slot.state); // Slot reset after CRC failure + TEST_ASSERT_EQUAL_size_t(0, slot.covered_prefix); + TEST_ASSERT_NULL(slot.fragments); + + rx_slot_reset(&slot, mem_frag); + } + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_payload); + + // Test 5: Successful completion with correct CRC (tree_res == rx_fragment_tree_done, CRC pass) + { + rx_slot_t slot = { 0 }; + slot.state = rx_slot_idle; + errors_transfer_malformed = 0; + errors_oom = 0; + + // Single-frame transfer with correct CRC + // CRC calculation for "test": using Python pycyphal.transport.commons.crc.CRC32C + // >>> from pycyphal.transport.commons.crc import CRC32C + // >>> hex(CRC32C.new(b"test").value) + const uint32_t correct_crc = 0x86a072c0UL; + + rx_frame_t frame = { 0 }; + frame.base = make_frame_base(mem_payload, 0, 4, "test"); + frame.base.crc = correct_crc; + frame.meta.transfer_id = 1111; + frame.meta.transfer_payload_size = 4; + + rx_slot_update(&slot, 7000, mem_frag, del_payload, &frame, 4, &errors_oom, &errors_transfer_malformed); + + // Verify successful completion + TEST_ASSERT_EQUAL(0, errors_transfer_malformed); + TEST_ASSERT_EQUAL(rx_slot_done, slot.state); // Successfully completed + TEST_ASSERT_EQUAL_size_t(4, slot.covered_prefix); + TEST_ASSERT_NOT_NULL(slot.fragments); + + rx_slot_reset(&slot, mem_frag); + } + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_payload); + + // Test 6: CRC end update only when crc_end >= slot->crc_end + { + rx_slot_t slot = { 0 }; + slot.state = rx_slot_idle; + errors_transfer_malformed = 0; + errors_oom = 0; + + // Frame 1 at offset 5 (will set crc_end to 10) + rx_frame_t frame1 = { 0 }; + frame1.base = make_frame_base(mem_payload, 5, 5, "world"); + frame1.base.crc = 0xAAAAAAAA; + frame1.meta.transfer_id = 2222; + frame1.meta.transfer_payload_size = 20; + + rx_slot_update(&slot, 8000, mem_frag, del_payload, &frame1, 20, &errors_oom, &errors_transfer_malformed); + + TEST_ASSERT_EQUAL(10, slot.crc_end); + TEST_ASSERT_EQUAL(0xAAAAAAAA, slot.crc); + + // Frame 2 at offset 0 (crc_end would be 3, less than current 10, so CRC shouldn't update) + rx_frame_t frame2 = { 0 }; + frame2.base = make_frame_base(mem_payload, 0, 3, "abc"); + frame2.base.crc = 0xBBBBBBBB; + frame2.meta.transfer_id = 2222; + frame2.meta.transfer_payload_size = 20; + + rx_slot_update(&slot, 8100, mem_frag, del_payload, &frame2, 20, &errors_oom, &errors_transfer_malformed); + + TEST_ASSERT_EQUAL(10, slot.crc_end); // Unchanged + TEST_ASSERT_EQUAL(0xAAAAAAAA, slot.crc); // Unchanged (frame2 didn't update it) + + // Frame 3 at offset 10 (crc_end would be 15, greater than current 10, so CRC should update) + rx_frame_t frame3 = { 0 }; + frame3.base = make_frame_base(mem_payload, 10, 5, "hello"); + frame3.base.crc = 0xCCCCCCCC; + frame3.meta.transfer_id = 2222; + frame3.meta.transfer_payload_size = 20; + + rx_slot_update(&slot, 8200, mem_frag, del_payload, &frame3, 20, &errors_oom, &errors_transfer_malformed); + + TEST_ASSERT_EQUAL(15, slot.crc_end); // Updated + TEST_ASSERT_EQUAL(0xCCCCCCCC, slot.crc); // Updated + + rx_slot_reset(&slot, mem_frag); + } + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_payload); + + // Test 7: Inconsistent frame fields; suspicious transfer rejected. + { + rx_slot_t slot = { 0 }; + slot.state = rx_slot_idle; + errors_transfer_malformed = 0; + errors_oom = 0; + + // First frame initializes the slot with transfer_payload_size=20 and priority=udpard_prio_high + rx_frame_t frame1 = { 0 }; + frame1.base = make_frame_base(mem_payload, 0, 5, "hello"); + frame1.base.crc = 0x12345678; + frame1.meta.transfer_id = 3333; + frame1.meta.transfer_payload_size = 20; + frame1.meta.priority = udpard_prio_high; + + rx_slot_update(&slot, 9000, mem_frag, del_payload, &frame1, 20, &errors_oom, &errors_transfer_malformed); + + TEST_ASSERT_EQUAL(rx_slot_busy, slot.state); + TEST_ASSERT_EQUAL(20, slot.total_size); + TEST_ASSERT_EQUAL(udpard_prio_high, slot.priority); + TEST_ASSERT_EQUAL_size_t(5, slot.covered_prefix); + TEST_ASSERT_EQUAL(0, errors_transfer_malformed); + + // Second frame with DIFFERENT transfer_payload_size (should trigger the branch and reset the slot) + rx_frame_t frame2 = { 0 }; + frame2.base = make_frame_base(mem_payload, 5, 5, "world"); + frame2.base.crc = 0xABCDEF00; + frame2.meta.transfer_id = 3333; + frame2.meta.transfer_payload_size = 25; // DIFFERENT from frame1's 20 + frame2.meta.priority = udpard_prio_high; + + rx_slot_update(&slot, 9100, mem_frag, del_payload, &frame2, 25, &errors_oom, &errors_transfer_malformed); + + // Verify that the malformed error was counted and slot was reset + TEST_ASSERT_EQUAL(1, errors_transfer_malformed); + TEST_ASSERT_EQUAL(rx_slot_idle, slot.state); // Slot reset due to inconsistent total_size + TEST_ASSERT_EQUAL_size_t(0, slot.covered_prefix); + TEST_ASSERT_NULL(slot.fragments); + + // Reset counters + errors_transfer_malformed = 0; + + // Third frame initializes the slot again with transfer_payload_size=30 and priority=udpard_prio_low + rx_frame_t frame3 = { 0 }; + frame3.base = make_frame_base(mem_payload, 0, 5, "test1"); + frame3.base.crc = 0x11111111; + frame3.meta.transfer_id = 4444; + frame3.meta.transfer_payload_size = 30; + frame3.meta.priority = udpard_prio_low; + + rx_slot_update(&slot, 9200, mem_frag, del_payload, &frame3, 30, &errors_oom, &errors_transfer_malformed); + + TEST_ASSERT_EQUAL(rx_slot_busy, slot.state); + TEST_ASSERT_EQUAL(30, slot.total_size); + TEST_ASSERT_EQUAL(udpard_prio_low, slot.priority); + TEST_ASSERT_EQUAL_size_t(5, slot.covered_prefix); + TEST_ASSERT_EQUAL(0, errors_transfer_malformed); + + // Fourth frame with DIFFERENT priority (should trigger the branch and reset the slot) + rx_frame_t frame4 = { 0 }; + frame4.base = make_frame_base(mem_payload, 5, 5, "test2"); + frame4.base.crc = 0x22222222; + frame4.meta.transfer_id = 4444; + frame4.meta.transfer_payload_size = 30; // Same as frame3 + frame4.meta.priority = udpard_prio_high; // DIFFERENT from frame3's udpard_prio_low + + rx_slot_update(&slot, 9300, mem_frag, del_payload, &frame4, 30, &errors_oom, &errors_transfer_malformed); + + // Verify that the malformed error was counted and slot was reset + TEST_ASSERT_EQUAL(1, errors_transfer_malformed); + TEST_ASSERT_EQUAL(rx_slot_idle, slot.state); // Slot reset due to inconsistent priority + TEST_ASSERT_EQUAL_size_t(0, slot.covered_prefix); + TEST_ASSERT_NULL(slot.fragments); + + // Reset counters + errors_transfer_malformed = 0; + + // Fifth frame initializes the slot again + rx_frame_t frame5 = { 0 }; + frame5.base = make_frame_base(mem_payload, 0, 5, "test3"); + frame5.base.crc = 0x33333333; + frame5.meta.transfer_id = 5555; + frame5.meta.transfer_payload_size = 40; + frame5.meta.priority = udpard_prio_nominal; + + rx_slot_update(&slot, 9400, mem_frag, del_payload, &frame5, 40, &errors_oom, &errors_transfer_malformed); + + TEST_ASSERT_EQUAL(rx_slot_busy, slot.state); + TEST_ASSERT_EQUAL(40, slot.total_size); + TEST_ASSERT_EQUAL(udpard_prio_nominal, slot.priority); + TEST_ASSERT_EQUAL_size_t(5, slot.covered_prefix); + TEST_ASSERT_EQUAL(0, errors_transfer_malformed); + + // Sixth frame with BOTH different transfer_payload_size AND priority (should still trigger the branch) + rx_frame_t frame6 = { 0 }; + frame6.base = make_frame_base(mem_payload, 5, 5, "test4"); + frame6.base.crc = 0x44444444; + frame6.meta.transfer_id = 5555; + frame6.meta.transfer_payload_size = 50; // DIFFERENT from frame5's 40 + frame6.meta.priority = udpard_prio_fast; // DIFFERENT from frame5's udpard_prio_nominal + + rx_slot_update(&slot, 9500, mem_frag, del_payload, &frame6, 50, &errors_oom, &errors_transfer_malformed); + + // Verify that the malformed error was counted and slot was reset + TEST_ASSERT_EQUAL(1, errors_transfer_malformed); + TEST_ASSERT_EQUAL(rx_slot_idle, slot.state); // Slot reset due to both inconsistencies + TEST_ASSERT_EQUAL_size_t(0, slot.covered_prefix); + TEST_ASSERT_NULL(slot.fragments); + + rx_slot_reset(&slot, mem_frag); + } + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_payload); + + // Verify no memory leaks + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); } -static void testParseFrameRPCServiceBroadcast(void) +// --------------------------------------------- RX SESSION --------------------------------------------- + +static void test_rx_transfer_id_forward_distance(void) { - byte_t data[] = {1, 2, 41, 9, 255, 255, 123, 192, 13, 240, 221, 224, - 254, 15, 220, 186, 254, 25, 0, 0, 0, 0, 248, 152, // - 'a', 'b', 'c'}; - RxFrame rxf = {0}; - TEST_ASSERT_FALSE(rxParseFrame((struct UdpardMutablePayload) {.data = data, .size = sizeof(data)}, &rxf)); + // Test 1: Same value (distance is 0) + TEST_ASSERT_EQUAL_UINT64(0, rx_transfer_id_forward_distance(0, 0)); + TEST_ASSERT_EQUAL_UINT64(0, rx_transfer_id_forward_distance(100, 100)); + TEST_ASSERT_EQUAL_UINT64(0, rx_transfer_id_forward_distance(UINT64_MAX, UINT64_MAX)); + + // Test 2: Simple forward distance (no wraparound) + TEST_ASSERT_EQUAL_UINT64(1, rx_transfer_id_forward_distance(0, 1)); + TEST_ASSERT_EQUAL_UINT64(10, rx_transfer_id_forward_distance(5, 15)); + TEST_ASSERT_EQUAL_UINT64(100, rx_transfer_id_forward_distance(200, 300)); + TEST_ASSERT_EQUAL_UINT64(1000, rx_transfer_id_forward_distance(1000, 2000)); + + // Test 3: Wraparound at UINT64_MAX + TEST_ASSERT_EQUAL_UINT64(1, rx_transfer_id_forward_distance(UINT64_MAX, 0)); + TEST_ASSERT_EQUAL_UINT64(2, rx_transfer_id_forward_distance(UINT64_MAX, 1)); + TEST_ASSERT_EQUAL_UINT64(10, rx_transfer_id_forward_distance(UINT64_MAX - 5, 4)); + TEST_ASSERT_EQUAL_UINT64(100, rx_transfer_id_forward_distance(UINT64_MAX - 49, 50)); + + // Test 4: Large forward distances + TEST_ASSERT_EQUAL_UINT64(UINT64_MAX, rx_transfer_id_forward_distance(0, UINT64_MAX)); + TEST_ASSERT_EQUAL_UINT64(UINT64_MAX, rx_transfer_id_forward_distance(1, 0)); + TEST_ASSERT_EQUAL_UINT64(UINT64_MAX - 1, rx_transfer_id_forward_distance(0, UINT64_MAX - 1)); + TEST_ASSERT_EQUAL_UINT64(UINT64_MAX, rx_transfer_id_forward_distance(2, 1)); + + // Test 5: Half-way point (2^63) + const uint64_t half = 1ULL << 63U; + TEST_ASSERT_EQUAL_UINT64(half, rx_transfer_id_forward_distance(0, half)); + TEST_ASSERT_EQUAL_UINT64(half, rx_transfer_id_forward_distance(100, 100 + half)); + TEST_ASSERT_EQUAL_UINT64(half, rx_transfer_id_forward_distance(UINT64_MAX, half - 1)); + + // Test 6: Backward is interpreted as large forward distance + // Going from 10 to 5 is actually going forward by UINT64_MAX - 4 + TEST_ASSERT_EQUAL_UINT64(UINT64_MAX - 4, rx_transfer_id_forward_distance(10, 5)); + TEST_ASSERT_EQUAL_UINT64(UINT64_MAX - 9, rx_transfer_id_forward_distance(100, 90)); + + // Test 7: Edge cases around 0 + TEST_ASSERT_EQUAL_UINT64(UINT64_MAX, rx_transfer_id_forward_distance(1, 0)); + TEST_ASSERT_EQUAL_UINT64(1, rx_transfer_id_forward_distance(0, 1)); + + // Test 8: Random large numbers + TEST_ASSERT_EQUAL_UINT64(0x123456789ABCDEF0ULL - 0x0FEDCBA987654321ULL, + rx_transfer_id_forward_distance(0x0FEDCBA987654321ULL, 0x123456789ABCDEF0ULL)); } -static void testParseFrameAnonymousNonSingleFrame(void) -{ // Invalid anonymous message frame because EOT not set (multi-frame anonymous transfers are not allowed). - byte_t data[] = {1, 2, 255, 255, 255, 255, 230, 29, 13, 240, 221, 224, - 254, 15, 220, 186, 0, 0, 0, 0, 0, 0, 147, 6, // - 'a', 'b', 'c'}; - RxFrame rxf = {0}; - TEST_ASSERT_FALSE(rxParseFrame((struct UdpardMutablePayload) {.data = data, .size = sizeof(data)}, &rxf)); +// Captures ack transfers emitted into the TX pipelines. +typedef struct +{ + udpard_prio_t priority; + uint64_t transfer_id; + uint64_t topic_hash; + udpard_udpip_ep_t destination; + uint64_t acked_topic_hash; + uint64_t acked_transfer_id; +} ack_tx_info_t; + +typedef struct +{ + instrumented_allocator_t alloc_transfer; + instrumented_allocator_t alloc_payload; + udpard_tx_t tx; + ack_tx_info_t captured[16]; + size_t captured_count; +} tx_fixture_t; + +static bool tx_capture_ack_subject(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) +{ + (void)tx; + (void)ejection; + return true; // ACKs are P2P, subject eject should not be called for them } -static void testParseFrameBadHeaderCRC(void) -{ // Bad header CRC. - byte_t data[] = {1, 2, 41, 9, 255, 255, 230, 29, 13, 240, 221, 224, - 254, 15, 220, 186, 57, 48, 0, 0, 0, 0, 30, 180, // - 'a', 'b', 'c'}; - RxFrame rxf = {0}; - TEST_ASSERT_FALSE(rxParseFrame((struct UdpardMutablePayload) {.data = data, .size = sizeof(data)}, &rxf)); +static bool tx_capture_ack_p2p(udpard_tx_t* const tx, + udpard_tx_ejection_t* const ejection, + const udpard_udpip_ep_t destination) +{ + tx_fixture_t* const self = (tx_fixture_t*)tx->user; + if ((self == NULL) || (self->captured_count >= (sizeof(self->captured) / sizeof(self->captured[0])))) { + return false; + } + udpard_tx_refcount_inc(ejection->datagram); + meta_t meta = { 0 }; + uint32_t frame_index = 0; + uint32_t frame_offset = 0; + uint32_t prefix_crc = 0; + udpard_bytes_t payload = { 0 }; + const bool ok = header_deserialize( + (udpard_bytes_mut_t){ .size = ejection->datagram.size, .data = (void*)ejection->datagram.data }, + &meta, + &frame_index, + &frame_offset, + &prefix_crc, + &payload); + if (ok && (frame_index == 0U) && (frame_offset == 0U) && meta.flag_acknowledgement && + (payload.size >= ACK_SIZE_BYTES)) { + const byte_t* const pl = (const byte_t*)payload.data; + ack_tx_info_t* const info = &self->captured[self->captured_count++]; + info->priority = meta.priority; + info->transfer_id = meta.transfer_id; + info->topic_hash = meta.topic_hash; + info->destination = destination; + (void)deserialize_u64(pl + 0U, &info->acked_topic_hash); + (void)deserialize_u64(pl + 8U, &info->acked_transfer_id); + } + udpard_tx_refcount_dec(ejection->datagram); + return true; } -static void testParseFrameUnknownHeaderVersion(void) +static void tx_fixture_init(tx_fixture_t* const self, const uint64_t uid, const size_t capacity) { - // >>> from pycyphal.transport.commons.crc import CRC16CCITT - // >>> list(CRC16CCITT.new(bytes( - // [0, 2, 41, 9, 56, 21, 230, 29, 13, 240, 221, 224, 254, 15, 220, 186, 57, 48, 0, 0, 0, 0])).value_as_bytes) - byte_t data[] = {0, 2, 41, 9, 56, 21, 230, 29, 13, 240, 221, 224, - 254, 15, 220, 186, 57, 48, 0, 0, 0, 0, 141, 228, // - 'a', 'b', 'c'}; - RxFrame rxf = {0}; - TEST_ASSERT_FALSE(rxParseFrame((struct UdpardMutablePayload) {.data = data, .size = sizeof(data)}, &rxf)); + instrumented_allocator_new(&self->alloc_transfer); + instrumented_allocator_new(&self->alloc_payload); + self->captured_count = 0; + udpard_tx_mem_resources_t mem = { 0 }; + mem.transfer = instrumented_allocator_make_resource(&self->alloc_transfer); + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + mem.payload[i] = instrumented_allocator_make_resource(&self->alloc_payload); + } + static const udpard_tx_vtable_t vtb = { .eject_subject = &tx_capture_ack_subject, + .eject_p2p = &tx_capture_ack_p2p }; + TEST_ASSERT(udpard_tx_new(&self->tx, uid, 1U, capacity, mem, &vtb)); + self->tx.user = self; } -static void testParseFrameHeaderWithoutPayload(void) +static void tx_fixture_free(tx_fixture_t* const self) { - byte_t data[] = {1, 2, 41, 9, 255, 255, 230, 29, 13, 240, 221, 224, 254, 15, 220, 186, 57, 48, 0, 0, 0, 0, 30, 179}; - RxFrame rxf = {0}; - TEST_ASSERT_FALSE(rxParseFrame((struct UdpardMutablePayload) {.data = data, .size = sizeof(data)}, &rxf)); + udpard_tx_free(&self->tx); + TEST_ASSERT_EQUAL(0, self->alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL(0, self->alloc_payload.allocated_fragments); + instrumented_allocator_reset(&self->alloc_transfer); + instrumented_allocator_reset(&self->alloc_payload); } -static void testParseFrameEmpty(void) +typedef struct { - RxFrame rxf = {0}; - TEST_ASSERT_FALSE(rxParseFrame((struct UdpardMutablePayload) {.data = "", .size = 0}, &rxf)); -} + udpard_rx_t* rx; + udpard_rx_port_t* port; + struct + { + /// The most recently received transfer is at index #0; older transfers follow. + /// The history is needed to allow batch ejection when multiple interned transfers are released. + /// There cannot be more than RX_SLOT_COUNT transfers in the history because that is the maximum + /// number of concurrent transfers that can be in-flight for a given session. + udpard_rx_transfer_t history[RX_SLOT_COUNT]; + uint64_t count; + } message; + struct + { + udpard_remote_t remote; + uint64_t count; + } collision; + struct + { + ack_tx_info_t last; + uint64_t count; + } ack; +} callback_result_t; -static void testParseFrameInvalidTransferID(void) +static void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) { - // The transfer-ID offset is 8 bytes, starting from-->| - byte_t data[] = {1, 2, 41, 9, 56, 21, 230, 29, 255, 255, 255, 255, - 255, 255, 255, 255, 57, 48, 0, 0, 0, 0, 42, 107, // - 'a', 'b', 'c'}; - RxFrame rxf = {0}; - TEST_ASSERT_FALSE(rxParseFrame((struct UdpardMutablePayload) {.data = data, .size = sizeof(data)}, &rxf)); + printf("on_message: ts=%lld transfer_id=%llu payload_size_stored=%zu\n", + (long long)transfer.timestamp, + (unsigned long long)transfer.transfer_id, + transfer.payload_size_stored); + callback_result_t* const cb_result = (callback_result_t* const)rx->user; + cb_result->rx = rx; + cb_result->port = port; + for (size_t i = RX_SLOT_COUNT - 1; i > 0; i--) { + cb_result->message.history[i] = cb_result->message.history[i - 1]; + } + cb_result->message.history[0] = transfer; + cb_result->message.count++; } -// -------------------------------------------------- SLOT -------------------------------------------------- - -static void testSlotRestartEmpty(void) +static void on_collision(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_remote_t remote) { - RxSlot slot = { - .ts_usec = 1234567890, - .transfer_id = 0x123456789abcdef0, - .max_index = 546, - .eot_index = 654, - .accepted_frames = 555, - .payload_size = 987, - .fragments = NULL, - }; - InstrumentedAllocator alloc = {0}; - rxSlotRestart(&slot, 0x1122334455667788ULL, makeRxMemory(&alloc, &alloc)); - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, slot.ts_usec); - TEST_ASSERT_EQUAL(0x1122334455667788ULL, slot.transfer_id); - TEST_ASSERT_EQUAL(0, slot.max_index); - TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, slot.eot_index); - TEST_ASSERT_EQUAL(0, slot.accepted_frames); - TEST_ASSERT_EQUAL(0, slot.payload_size); - TEST_ASSERT_EQUAL(NULL, slot.fragments); + callback_result_t* const cb_result = (callback_result_t* const)rx->user; + cb_result->rx = rx; + cb_result->port = port; + cb_result->collision.remote = remote; + cb_result->collision.count++; } +static const udpard_rx_port_vtable_t callbacks = { &on_message, &on_collision }; -static void testSlotRestartNonEmpty(void) +/// Checks that ack transfers are emitted into the TX queues. +static void test_rx_ack_enqueued(void) { - InstrumentedAllocator mem_fragment = {0}; - InstrumentedAllocator mem_payload = {0}; - instrumentedAllocatorNew(&mem_fragment); - instrumentedAllocatorNew(&mem_payload); - const RxMemory mem = makeRxMemory(&mem_fragment, &mem_payload); - byte_t data[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; - // - RxSlot slot = { - .ts_usec = 1234567890, - .transfer_id = 0x123456789abcdef0, - .max_index = 546, - .eot_index = 654, - .accepted_frames = 555, - .payload_size = 987, - // - .fragments = &makeRxFragment(mem, - 1, - (struct UdpardPayload) {.data = &data[2], .size = 2}, - (struct UdpardMutablePayload) {.data = data, .size = sizeof(data)}, - NULL) - ->tree, + instrumented_allocator_t alloc_frag = { 0 }; + instrumented_allocator_new(&alloc_frag); + const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + + instrumented_allocator_t alloc_session = { 0 }; + instrumented_allocator_new(&alloc_session); + const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); + + instrumented_allocator_t alloc_payload = { 0 }; + instrumented_allocator_new(&alloc_payload); + const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); + const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); + + const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; + + tx_fixture_t tx_fix = { 0 }; + tx_fixture_init(&tx_fix, 0xBADC0FFEE0DDF00DULL, 8); + + udpard_rx_t rx; + udpard_rx_new(&rx, &tx_fix.tx); + callback_result_t cb_result = { 0 }; + rx.user = &cb_result; + + const uint64_t topic_hash = 0x4E81E200CB479D4CULL; + udpard_rx_port_t port; + const udpard_rx_mode_t mode = udpard_rx_unordered; + const udpard_us_t window = 0; + const uint64_t remote_uid = 0xA1B2C3D4E5F60718ULL; + const size_t extent = 1000; + TEST_ASSERT(udpard_rx_port_new(&port, topic_hash, extent, mode, window, rx_mem, &callbacks)); + rx_session_factory_args_t fac_args = { + .owner = &port, + .sessions_by_animation = &rx.list_session_by_animation, + .remote_uid = remote_uid, + .now = 0, }; - slot.fragments->base.lr[0] = &makeRxFragment(mem, - 0, - (struct UdpardPayload) {.data = &data[1], .size = 1}, - (struct UdpardMutablePayload) {.data = data, .size = sizeof(data)}, - slot.fragments) - ->tree.base; - slot.fragments->base.lr[1] = &makeRxFragment(mem, - 2, - (struct UdpardPayload) {.data = &data[3], .size = 3}, - (struct UdpardMutablePayload) {.data = data, .size = sizeof(data)}, - slot.fragments) - ->tree.base; - // Initialization done, ensure the memory utilization is as we expect. - TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(sizeof(data) * 3, mem_payload.allocated_bytes); - TEST_ASSERT_EQUAL(3, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(sizeof(RxFragment) * 3, mem_fragment.allocated_bytes); - // Now we reset the slot, causing all memory to be freed correctly. - rxSlotRestart(&slot, 0x1122334455667788ULL, makeRxMemory(&mem_fragment, &mem_payload)); - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, slot.ts_usec); - TEST_ASSERT_EQUAL(0x1122334455667788ULL, slot.transfer_id); - TEST_ASSERT_EQUAL(0, slot.max_index); - TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, slot.eot_index); - TEST_ASSERT_EQUAL(0, slot.accepted_frames); - TEST_ASSERT_EQUAL(0, slot.payload_size); - TEST_ASSERT_EQUAL(NULL, slot.fragments); - // Ensure all memory was freed. - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_bytes); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_bytes); + rx_session_t* const ses = (rx_session_t*)cavl2_find_or_insert(&port.index_session_by_remote_uid, + &remote_uid, + &cavl_compare_rx_session_by_remote_uid, + &fac_args, + &cavl_factory_rx_session_by_remote_uid); + TEST_ASSERT_NOT_NULL(ses); + + meta_t meta = { .priority = udpard_prio_high, + .flag_reliable = true, + .transfer_payload_size = 5, + .transfer_id = 77, + .sender_uid = remote_uid, + .topic_hash = topic_hash }; + udpard_us_t now = 0; + const udpard_udpip_ep_t ep0 = { .ip = 0x0A000001, .port = 0x1234 }; + now += 100; + rx_session_update(ses, &rx, now, ep0, make_frame_ptr(meta, mem_payload, "hello", 0, 5), del_payload, 0); + TEST_ASSERT_EQUAL(1, cb_result.message.count); + udpard_tx_poll(&tx_fix.tx, now, (uint_fast8_t)(1U << 0U)); + cb_result.ack.count = tx_fix.captured_count; + if (tx_fix.captured_count > 0) { + cb_result.ack.last = tx_fix.captured[tx_fix.captured_count - 1U]; + } + TEST_ASSERT(cb_result.ack.count >= 1); + TEST_ASSERT_EQUAL_UINT64(topic_hash, cb_result.ack.last.acked_topic_hash); + TEST_ASSERT_EQUAL_UINT64(meta.transfer_id, cb_result.ack.last.acked_transfer_id); + TEST_ASSERT_EQUAL_UINT32(ep0.ip, cb_result.ack.last.destination.ip); + TEST_ASSERT_EQUAL_UINT16(ep0.port, cb_result.ack.last.destination.port); + + udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); + cb_result.message.history[0].payload = NULL; + cb_result.message.history[0].payload = NULL; + cb_result.message.history[0].payload = NULL; + + const udpard_udpip_ep_t ep1 = { .ip = 0x0A000002, .port = 0x5678 }; + now += 100; + rx_session_update(ses, &rx, now, ep1, make_frame_ptr(meta, mem_payload, "hello", 0, 5), del_payload, 1); + udpard_tx_poll(&tx_fix.tx, now, (uint_fast8_t)(1U << 1U)); + cb_result.ack.count = tx_fix.captured_count; + if (tx_fix.captured_count > 0) { + cb_result.ack.last = tx_fix.captured[tx_fix.captured_count - 1U]; + } + TEST_ASSERT(cb_result.ack.count >= 2); // acks on interfaces 0 and 1 + TEST_ASSERT_EQUAL_UINT64(meta.transfer_id, cb_result.ack.last.acked_transfer_id); + + udpard_rx_port_free(&rx, &port); + tx_fixture_free(&tx_fix); + TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL(0, alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_session); + instrumented_allocator_reset(&alloc_payload); } -static void testSlotEjectValidLarge(void) +/// Tests the ORDERED reassembly mode (strictly increasing transfer-ID sequence). +static void test_rx_session_ordered(void) { - InstrumentedAllocator mem_fragment = {0}; - InstrumentedAllocator mem_payload = {0}; - instrumentedAllocatorNew(&mem_fragment); - instrumentedAllocatorNew(&mem_payload); - const RxMemory mem = makeRxMemory(&mem_fragment, &mem_payload); - //>>> from pycyphal.transport.commons.crc import CRC32C - //>>> CRC32C.new(data_bytes).value_as_bytes - static const size_t PayloadSize = 171; - // Build the fragment tree: - // 2 - // / ` - // 1 3 - // / - // 0 - RxFragment* const root = // - makeRxFragmentString(mem, 2, "Where does Man go? ", NULL); - root->tree.base.lr[0] = // - &makeRxFragmentString(mem, 1, "For example, where does Man come from? ", &root->tree)->tree.base; - root->tree.base.lr[1] = // - &makeRxFragmentString(mem, 3, "Where does the universe come from? xL\xAE\xCB", &root->tree)->tree.base; - root->tree.base.lr[0]->lr[0] = - &makeRxFragmentString(mem, // - 0, - "Da Shi, have you ever... considered certain ultimate philosophical questions? ", - ((RxFragmentTreeNode*) root->tree.base.lr[0])) - ->tree.base; - // Initialization done, ensure the memory utilization is as we expect. - TEST_ASSERT_EQUAL(4, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(PayloadSize + TRANSFER_CRC_SIZE_BYTES, mem_payload.allocated_bytes); - TEST_ASSERT_EQUAL(4, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(sizeof(RxFragment) * 4, mem_fragment.allocated_bytes); - // Eject and verify the payload. - size_t payload_size = 0; - struct UdpardFragment payload = {0}; - TEST_ASSERT(rxSlotEject(&payload_size, - &payload, - &root->tree, - mem_payload.allocated_bytes, - 1024, - makeRxMemory(&mem_fragment, &mem_payload))); - TEST_ASSERT_EQUAL(PayloadSize, payload_size); // CRC removed! - TEST_ASSERT( // - compareStringWithPayload("Da Shi, have you ever... considered certain ultimate philosophical questions? ", - payload.view)); - TEST_ASSERT(compareStringWithPayload("For example, where does Man come from? ", payload.next->view)); - TEST_ASSERT(compareStringWithPayload("Where does Man go? ", payload.next->next->view)); - TEST_ASSERT(compareStringWithPayload("Where does the universe come from? ", payload.next->next->next->view)); - TEST_ASSERT_NULL(payload.next->next->next->next); - // Check the memory utilization. All payload fragments are still kept, but the first fragment is freed because of - // the Scott's short payload optimization. - TEST_ASSERT_EQUAL(4, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(PayloadSize + TRANSFER_CRC_SIZE_BYTES, mem_payload.allocated_bytes); - TEST_ASSERT_EQUAL(3, mem_fragment.allocated_fragments); // One gone!!1 - TEST_ASSERT_EQUAL(sizeof(RxFragment) * 3, mem_fragment.allocated_bytes); // yes yes! - // Now, free the payload as the application would. - udpardRxFragmentFree(payload, mem.fragment, mem.payload); - // All memory shall be free now. As in "free beer". - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_bytes); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_bytes); + instrumented_allocator_t alloc_frag = { 0 }; + instrumented_allocator_new(&alloc_frag); + const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + + instrumented_allocator_t alloc_session = { 0 }; + instrumented_allocator_new(&alloc_session); + const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); + + instrumented_allocator_t alloc_payload = { 0 }; + instrumented_allocator_new(&alloc_payload); + const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); + const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); + + const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; + + udpard_rx_t rx; + udpard_rx_new(&rx, NULL); + callback_result_t cb_result = { 0 }; + rx.user = &cb_result; + + udpard_us_t now = 0; + const uint64_t remote_uid = 0xA1B2C3D4E5F60718ULL; + udpard_rx_port_t port; + TEST_ASSERT( + udpard_rx_port_new(&port, 0x4E81E200CB479D4CULL, 1000, udpard_rx_ordered, 20 * KILO, rx_mem, &callbacks)); + rx_session_factory_args_t fac_args = { + .owner = &port, + .sessions_by_animation = &rx.list_session_by_animation, + .remote_uid = remote_uid, + .now = now, + }; + rx_session_t* const ses = (rx_session_t*)cavl2_find_or_insert(&port.index_session_by_remote_uid, + &remote_uid, + &cavl_compare_rx_session_by_remote_uid, + &fac_args, + &cavl_factory_rx_session_by_remote_uid); + TEST_ASSERT_NOT_NULL(ses); + + meta_t meta = { .priority = udpard_prio_high, + .flag_reliable = true, + .transfer_payload_size = 10, + .transfer_id = 42, + .sender_uid = remote_uid, + .topic_hash = port.topic_hash }; + now += 1000; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, + make_frame_ptr(meta, mem_payload, "0123456789", 5, 5), + del_payload, + 0); + now += 1000; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x0A000002, .port = 0x4321 }, + make_frame_ptr(meta, mem_payload, "0123456789", 0, 5), + del_payload, + 2); + TEST_ASSERT_EQUAL(1, cb_result.message.count); + TEST_ASSERT_EQUAL(udpard_prio_high, cb_result.message.history[0].priority); + TEST_ASSERT_EQUAL(42, cb_result.message.history[0].transfer_id); + TEST_ASSERT_EQUAL(remote_uid, cb_result.message.history[0].remote.uid); + TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[0], 10, "0123456789", 10)); + udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); + cb_result.message.history[0].payload = NULL; + cb_result.message.history[0].payload = NULL; + cb_result.message.history[0].payload = NULL; + + meta.flag_reliable = false; + now += 500; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x0A000003, .port = 0x1111 }, + make_frame_ptr(meta, mem_payload, "abcdef", 0, 6), + del_payload, + 1); + TEST_ASSERT_EQUAL(1, cb_result.message.count); + TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); + + meta.flag_reliable = true; + meta.transfer_payload_size = 3; + meta.transfer_id = 44; + now += 500; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, + make_frame_ptr(meta, mem_payload, "444", 0, 3), + del_payload, + 0); + TEST_ASSERT_EQUAL(1, cb_result.message.count); + TEST_ASSERT_EQUAL(1, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL(1, alloc_payload.allocated_fragments); + + meta.transfer_id = 43; + now += 500; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, + make_frame_ptr(meta, mem_payload, "433", 0, 3), + del_payload, + 0); + udpard_rx_poll(&rx, now); + TEST_ASSERT_EQUAL(3, cb_result.message.count); + TEST_ASSERT_EQUAL(44, cb_result.message.history[0].transfer_id); + TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[0], 3, "444", 3)); + TEST_ASSERT_EQUAL(43, cb_result.message.history[1].transfer_id); + TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[1], 3, "433", 3)); + udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); + cb_result.message.history[0].payload = NULL; + udpard_fragment_free_all(cb_result.message.history[1].payload, udpard_make_deleter(mem_frag)); + TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); + + now += 25 * KILO; + meta.transfer_id = 41; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, + make_frame_ptr(meta, mem_payload, "old", 0, 3), + del_payload, + 0); + TEST_ASSERT_EQUAL(3, cb_result.message.count); + TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); + + udpard_rx_port_free(&rx, &port); + TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL(0, alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_session); + instrumented_allocator_reset(&alloc_payload); } -static void testSlotEjectValidSmall(void) +static void test_rx_session_unordered(void) { - InstrumentedAllocator mem_fragment = {0}; - InstrumentedAllocator mem_payload = {0}; - instrumentedAllocatorNew(&mem_fragment); - instrumentedAllocatorNew(&mem_payload); - const RxMemory mem = makeRxMemory(&mem_fragment, &mem_payload); - //>>> from pycyphal.transport.commons.crc import CRC32C - //>>> CRC32C.new(data_bytes).value_as_bytes - static const size_t PayloadSize = 262; - // Build the fragment tree: - // 1 - // / ` - // 0 3 - // / ` - // 2 4 - RxFragment* const root = // - makeRxFragmentString(mem, 1, "You told me that you came from the sea. Did you build the sea?\n", NULL); - root->tree.base.lr[0] = // - &makeRxFragmentString(mem, 0, "Did you build this four-dimensional fragment?\n", &root->tree)->tree.base; - root->tree.base.lr[1] = // - &makeRxFragmentString(mem, 3, "this four-dimensional space is like the sea for us?\n", &root->tree)->tree.base; - root->tree.base.lr[1]->lr[0] = // - &makeRxFragmentString(mem, - 2, - "Are you saying that for you, or at least for your creators, ", - ((RxFragmentTreeNode*) root->tree.base.lr[1])) - ->tree.base; - root->tree.base.lr[1]->lr[1] = // - &makeRxFragmentString(mem, - 4, - "More like a puddle. The sea has gone dry.\xA2\x93-\xB2", - ((RxFragmentTreeNode*) root->tree.base.lr[1])) - ->tree.base; - // Initialization done, ensure the memory utilization is as we expect. - TEST_ASSERT_EQUAL(5, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(PayloadSize + TRANSFER_CRC_SIZE_BYTES, mem_payload.allocated_bytes); - TEST_ASSERT_EQUAL(5, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(sizeof(RxFragment) * 5, mem_fragment.allocated_bytes); - // Eject and verify the payload. Use a small extent and ensure the excess is dropped. - size_t payload_size = 0; - struct UdpardFragment payload = {0}; - TEST_ASSERT(rxSlotEject(&payload_size, - &payload, - &root->tree, - mem_payload.allocated_bytes, - 136, // <-- small extent, rest truncated - makeRxMemory(&mem_fragment, &mem_payload))); - TEST_ASSERT_EQUAL(136, payload_size); // Equals the extent due to the truncation. - TEST_ASSERT(compareStringWithPayload("Did you build this four-dimensional fragment?\n", payload.view)); - TEST_ASSERT(compareStringWithPayload("You told me that you came from the sea. Did you build the sea?\n", - payload.next->view)); - TEST_ASSERT(compareStringWithPayload("Are you saying that for you", payload.next->next->view)); - TEST_ASSERT_NULL(payload.next->next->next); - // Check the memory utilization. - // The first fragment is freed because of the Scott's short payload optimization; - // the two last fragments are freed because of the truncation. - TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(169, mem_payload.allocated_bytes); // The last block is rounded up. - TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); // One gone!!1 - TEST_ASSERT_EQUAL(sizeof(RxFragment) * 2, mem_fragment.allocated_bytes); - // Now, free the payload as the application would. - udpardRxFragmentFree(payload, mem.fragment, mem.payload); - // All memory shall be free now. As in "free beer". - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_bytes); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_bytes); + // Memory and rx for P2P unordered session. + instrumented_allocator_t alloc_frag = { 0 }; + instrumented_allocator_new(&alloc_frag); + instrumented_allocator_t alloc_session = { 0 }; + instrumented_allocator_new(&alloc_session); + instrumented_allocator_t alloc_payload = { 0 }; + instrumented_allocator_new(&alloc_payload); + const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); + const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); + const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); + const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; + + udpard_rx_t rx; + udpard_rx_new(&rx, NULL); + callback_result_t cb_result = { 0 }; + rx.user = &cb_result; + + const uint64_t topic_hash = 0xC3C8E4974254E1F5ULL; + udpard_rx_port_t port = { 0 }; + TEST_ASSERT(udpard_rx_port_new(&port, topic_hash, SIZE_MAX, udpard_rx_unordered, 0, rx_mem, &callbacks)); + + udpard_us_t now = 0; + const uint64_t remote_uid = 0xA1B2C3D4E5F60718ULL; + rx_session_factory_args_t fac_args = { + .owner = &port, + .sessions_by_animation = &rx.list_session_by_animation, + .remote_uid = remote_uid, + .now = now, + }; + rx_session_t* const ses = (rx_session_t*)cavl2_find_or_insert(&port.index_session_by_remote_uid, + &remote_uid, + &cavl_compare_rx_session_by_remote_uid, + &fac_args, + &cavl_factory_rx_session_by_remote_uid); + TEST_ASSERT_NOT_NULL(ses); + + // Single-frame transfer is ejected immediately. + meta_t meta = { .priority = udpard_prio_high, + .flag_reliable = false, + .transfer_payload_size = 5, + .transfer_id = 100, + .sender_uid = remote_uid, + .topic_hash = port.topic_hash }; + now += 1000; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, + make_frame_ptr(meta, mem_payload, "hello", 0, 5), + del_payload, + 0); + TEST_ASSERT_EQUAL(1, cb_result.message.count); + TEST_ASSERT_EQUAL(100, cb_result.message.history[0].transfer_id); + TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[0], 5, "hello", 5)); + udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); + cb_result.message.history[0].payload = NULL; + + // Out-of-order arrivals are accepted. + meta.transfer_id = 103; + now += 1000; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x0A000002, .port = 0x5678 }, + make_frame_ptr(meta, mem_payload, "tid103", 0, 6), + del_payload, + 1); + TEST_ASSERT_EQUAL(2, cb_result.message.count); + TEST_ASSERT_EQUAL(103, cb_result.message.history[0].transfer_id); + udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); + cb_result.message.history[0].payload = NULL; + + meta.transfer_id = 102; + now += 500; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x9999 }, + make_frame_ptr(meta, mem_payload, "tid102", 0, 6), + del_payload, + 0); + TEST_ASSERT_EQUAL(3, cb_result.message.count); + TEST_ASSERT_EQUAL(102, cb_result.message.history[0].transfer_id); + udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); + cb_result.message.history[0].payload = NULL; + + // Duplicate is ignored. + meta.transfer_id = 103; + now += 100; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x0A000002, .port = 0x5678 }, + make_frame_ptr(meta, mem_payload, "dup103", 0, 6), + del_payload, + 1); + TEST_ASSERT_EQUAL(3, cb_result.message.count); + TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); + + // Multi-frame transfer completes once all pieces arrive. + meta.transfer_id = 200; + meta.transfer_payload_size = 10; + meta.priority = udpard_prio_fast; + meta.flag_reliable = true; + now += 500; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x0A000002, .port = 0x5678 }, + make_frame_ptr(meta, mem_payload, "0123456789", 5, 5), + del_payload, + 1); + TEST_ASSERT_EQUAL(3, cb_result.message.count); + TEST_ASSERT_EQUAL(1, alloc_frag.allocated_fragments); + now += 200; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, + make_frame_ptr(meta, mem_payload, "0123456789", 0, 5), + del_payload, + 0); + TEST_ASSERT(cb_result.message.count >= 1); + TEST_ASSERT_EQUAL(200, cb_result.message.history[0].transfer_id); + TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[0], 10, "0123456789", 10)); + TEST_ASSERT_EQUAL(0x0A000001, cb_result.message.history[0].remote.endpoints[0].ip); + TEST_ASSERT_EQUAL(0x0A000002, cb_result.message.history[0].remote.endpoints[1].ip); + udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); + cb_result.message.history[0].payload = NULL; + TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); + + udpard_rx_port_free(&rx, &port); + TEST_ASSERT_EQUAL(0, alloc_session.allocated_fragments); + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_session); + instrumented_allocator_reset(&alloc_payload); } -static void testSlotEjectValidEmpty(void) +static void test_rx_session_unordered_reject_old(void) { - InstrumentedAllocator mem_fragment = {0}; - InstrumentedAllocator mem_payload = {0}; - instrumentedAllocatorNew(&mem_fragment); - instrumentedAllocatorNew(&mem_payload); - const RxMemory mem = makeRxMemory(&mem_fragment, &mem_payload); - // Build the fragment tree: - // 1 - // / ` - // 0 2 - RxFragment* const root = makeRxFragmentString(mem, 1, "BBB", NULL); - root->tree.base.lr[0] = &makeRxFragmentString(mem, 0, "AAA", &root->tree)->tree.base; - root->tree.base.lr[1] = &makeRxFragmentString(mem, 2, "P\xF5\xA5?", &root->tree)->tree.base; - // Initialization done, ensure the memory utilization is as we expect. - TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(6 + TRANSFER_CRC_SIZE_BYTES, mem_payload.allocated_bytes); - TEST_ASSERT_EQUAL(3, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(sizeof(RxFragment) * 3, mem_fragment.allocated_bytes); - // Eject and verify the payload. The extent is zero, so all payload is removed. - size_t payload_size = 0; - struct UdpardFragment payload = {0}; - TEST_ASSERT(rxSlotEject(&payload_size, - &payload, - &root->tree, - mem_payload.allocated_bytes, - 0, - makeRxMemory(&mem_fragment, &mem_payload))); - TEST_ASSERT_EQUAL(0, payload_size); // Equals the extent due to the truncation. - TEST_ASSERT_NULL(payload.next); - TEST_ASSERT_EQUAL(0, payload.view.size); - // Check the memory utilization. No memory should be in use by this point. - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_bytes); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_bytes); - // Now, free the payload as the application would. - udpardRxFragmentFree(payload, mem.fragment, mem.payload); - // No memory is in use anyway, so no change here. - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_bytes); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_bytes); + // Memory and rx with TX for ack replay. + instrumented_allocator_t alloc_frag = { 0 }; + instrumented_allocator_new(&alloc_frag); + const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + instrumented_allocator_t alloc_session = { 0 }; + instrumented_allocator_new(&alloc_session); + const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); + instrumented_allocator_t alloc_payload = { 0 }; + instrumented_allocator_new(&alloc_payload); + const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); + const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); + const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; + + tx_fixture_t tx_fix = { 0 }; + tx_fixture_init(&tx_fix, 0xF00DCAFEF00DCAFEULL, 4); + udpard_rx_t rx; + udpard_rx_new(&rx, &tx_fix.tx); + callback_result_t cb_result = { 0 }; + rx.user = &cb_result; + + const uint64_t local_uid = 0xFACEB00CFACEB00CULL; + udpard_rx_port_t port = { 0 }; + TEST_ASSERT(udpard_rx_port_new(&port, local_uid, SIZE_MAX, udpard_rx_unordered, 0, rx_mem, &callbacks)); + + udpard_us_t now = 0; + const uint64_t remote_uid = 0x0123456789ABCDEFULL; + rx_session_factory_args_t fac_args = { + .owner = &port, + .sessions_by_animation = &rx.list_session_by_animation, + .remote_uid = remote_uid, + .now = now, + }; + rx_session_t* const ses = (rx_session_t*)cavl2_find_or_insert(&port.index_session_by_remote_uid, + &remote_uid, + &cavl_compare_rx_session_by_remote_uid, + &fac_args, + &cavl_factory_rx_session_by_remote_uid); + TEST_ASSERT_NOT_NULL(ses); + + meta_t meta = { .priority = udpard_prio_fast, + .flag_reliable = false, + .transfer_payload_size = 3, + .transfer_id = 10, + .sender_uid = remote_uid, + .topic_hash = local_uid }; + now += 1000; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x0A00000A, .port = 0x0A00 }, + make_frame_ptr(meta, mem_payload, "old", 0, 3), + del_payload, + 0); + TEST_ASSERT_EQUAL(1, cb_result.message.count); + TEST_ASSERT_EQUAL(10, cb_result.message.history[0].transfer_id); + udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); + + // Jump far ahead then report the old transfer again. + meta.transfer_id = 2050; + meta.transfer_payload_size = 4; + now += 1000; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x0A00000B, .port = 0x0B00 }, + make_frame_ptr(meta, mem_payload, "jump", 0, 4), + del_payload, + 1); + TEST_ASSERT_EQUAL(2, cb_result.message.count); + TEST_ASSERT_EQUAL(2050, cb_result.message.history[0].transfer_id); + udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); + + meta.transfer_id = 10; + meta.transfer_payload_size = 3; + meta.flag_reliable = true; + now += 1000; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x0A00000A, .port = 0x0A00 }, + make_frame_ptr(meta, mem_payload, "dup", 0, 3), + del_payload, + 0); + TEST_ASSERT_EQUAL(2, cb_result.message.count); + udpard_tx_poll(&tx_fix.tx, now, UDPARD_IFACE_BITMAP_ALL); + cb_result.ack.count = tx_fix.captured_count; + if (tx_fix.captured_count > 0) { + cb_result.ack.last = tx_fix.captured[tx_fix.captured_count - 1U]; + } + TEST_ASSERT_GREATER_OR_EQUAL_UINT64(1, cb_result.ack.count); + TEST_ASSERT_EQUAL_UINT64(10, cb_result.ack.last.acked_transfer_id); + TEST_ASSERT_EQUAL_UINT64(port.topic_hash, cb_result.ack.last.acked_topic_hash); + + udpard_rx_port_free(&rx, &port); + tx_fixture_free(&tx_fix); + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_session); + instrumented_allocator_reset(&alloc_payload); } -static void testSlotEjectInvalid(void) +static void test_rx_session_unordered_duplicates(void) { - InstrumentedAllocator mem_fragment = {0}; - InstrumentedAllocator mem_payload = {0}; - instrumentedAllocatorNew(&mem_fragment); - instrumentedAllocatorNew(&mem_payload); - const RxMemory mem = makeRxMemory(&mem_fragment, &mem_payload); - // Build the fragment tree; no valid CRC here: - // 1 - // / ` - // 0 2 - RxFragment* const root = makeRxFragmentString(mem, 1, "BBB", NULL); - root->tree.base.lr[0] = &makeRxFragmentString(mem, 0, "AAA", &root->tree)->tree.base; - root->tree.base.lr[1] = &makeRxFragmentString(mem, 2, "CCC", &root->tree)->tree.base; - // Initialization done, ensure the memory utilization is as we expect. - TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(9, mem_payload.allocated_bytes); - TEST_ASSERT_EQUAL(3, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(sizeof(RxFragment) * 3, mem_fragment.allocated_bytes); - // Eject and verify the payload. - size_t payload_size = 0; - struct UdpardFragment payload = {0}; - TEST_ASSERT_FALSE(rxSlotEject(&payload_size, - &payload, - &root->tree, - mem_payload.allocated_bytes, - 1000, - makeRxMemory(&mem_fragment, &mem_payload))); - // The call was unsuccessful, so the memory was freed instead of being handed over to the application. - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_bytes); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_bytes); + // Unordered session accepts earlier arrivals but rejects duplicates. + instrumented_allocator_t alloc_frag = { 0 }; + instrumented_allocator_new(&alloc_frag); + instrumented_allocator_t alloc_session = { 0 }; + instrumented_allocator_new(&alloc_session); + instrumented_allocator_t alloc_payload = { 0 }; + instrumented_allocator_new(&alloc_payload); + const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); + const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); + const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); + const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; + + udpard_rx_t rx; + udpard_rx_new(&rx, NULL); + callback_result_t cb_result = { 0 }; + rx.user = &cb_result; + + udpard_rx_port_t port = { 0 }; + TEST_ASSERT(udpard_rx_port_new(&port, 0xFEE1DEADBEEFF00DULL, SIZE_MAX, udpard_rx_unordered, 0, rx_mem, &callbacks)); + + udpard_us_t now = 0; + const uint64_t remote_uid = 0xAABBCCDDEEFF0011ULL; + rx_session_factory_args_t fac_args = { + .owner = &port, + .sessions_by_animation = &rx.list_session_by_animation, + .remote_uid = remote_uid, + .now = now, + }; + rx_session_t* const ses = (rx_session_t*)cavl2_find_or_insert(&port.index_session_by_remote_uid, + &remote_uid, + &cavl_compare_rx_session_by_remote_uid, + &fac_args, + &cavl_factory_rx_session_by_remote_uid); + TEST_ASSERT_NOT_NULL(ses); + + meta_t meta = { .priority = udpard_prio_nominal, + .flag_reliable = false, + .transfer_payload_size = 2, + .transfer_id = 5, + .sender_uid = remote_uid, + .topic_hash = port.topic_hash }; + now += 1000; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x11223344, .port = 0x1111 }, + make_frame_ptr(meta, mem_payload, "aa", 0, 2), + del_payload, + 0); + TEST_ASSERT_EQUAL(1, cb_result.message.count); + TEST_ASSERT_EQUAL(5, cb_result.message.history[0].transfer_id); + udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); + cb_result.message.history[0].payload = NULL; + + // Duplicate dropped. + now += 10; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x11223344, .port = 0x1111 }, + make_frame_ptr(meta, mem_payload, "bb", 0, 2), + del_payload, + 0); + TEST_ASSERT_EQUAL(1, cb_result.message.count); + TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); + + udpard_rx_port_free(&rx, &port); + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_session); + instrumented_allocator_reset(&alloc_payload); } -static void testSlotAcceptA(void) +static void test_rx_session_ordered_reject_stale_after_jump(void) { - InstrumentedAllocator mem_fragment = {0}; - InstrumentedAllocator mem_payload = {0}; - instrumentedAllocatorNew(&mem_fragment); - instrumentedAllocatorNew(&mem_payload); - const RxMemory mem = makeRxMemory(&mem_fragment, &mem_payload); - // Set up the RX slot instance we're going to be working with. - RxSlot slot = { - .ts_usec = 1234567890, - .transfer_id = 0x1122334455667788, - .max_index = 0, - .eot_index = FRAME_INDEX_UNSET, - .accepted_frames = 0, - .payload_size = 0, - .fragments = NULL, + // Ordered session releases interned transfers once gaps are filled. + instrumented_allocator_t alloc_frag = { 0 }; + instrumented_allocator_new(&alloc_frag); + instrumented_allocator_t alloc_session = { 0 }; + instrumented_allocator_new(&alloc_session); + instrumented_allocator_t alloc_payload = { 0 }; + instrumented_allocator_new(&alloc_payload); + const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); + const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); + const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); + const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; + + udpard_rx_t rx; + udpard_rx_new(&rx, NULL); + callback_result_t cb_result = { 0 }; + rx.user = &cb_result; + + udpard_rx_port_t port = { 0 }; + TEST_ASSERT( + udpard_rx_port_new(&port, 0x123456789ABCDEF0ULL, 1000, udpard_rx_ordered, 20 * KILO, rx_mem, &callbacks)); + + udpard_us_t now = 0; + const uint64_t remote_uid = 0xCAFEBEEFFACEFEEDULL; + rx_session_factory_args_t fac_args = { + .owner = &port, + .sessions_by_animation = &rx.list_session_by_animation, + .remote_uid = remote_uid, + .now = now, }; - size_t payload_size = 0; - struct UdpardFragment payload = {0}; - - // === TRANSFER === - // Accept a single-frame transfer. Ownership transferred to the payload object. - //>>> from pycyphal.transport.commons.crc import CRC32C - //>>> CRC32C.new(data_bytes).value_as_bytes - TEST_ASSERT_EQUAL(1, - rxSlotAccept(&slot, - &payload_size, - &payload, - makeRxFrameBaseString(&mem_payload, - 0, - true, - "The fish responsible for drying the sea are not here." - "\x04\x1F\x8C\x1F"), - 1000, - mem)); - // Verify the memory utilization. Note that the small transfer optimization is in effect: head fragment moved. - TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(53 + TRANSFER_CRC_SIZE_BYTES, mem_payload.allocated_bytes); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_bytes); - // Verify the payload and free it. Note the CRC is not part of the payload, obviously. - TEST_ASSERT_EQUAL(53, payload_size); - TEST_ASSERT(compareStringWithPayload("The fish responsible for drying the sea are not here.", payload.view)); - TEST_ASSERT_NULL(payload.next); - udpardRxFragmentFree(payload, mem.fragment, mem.payload); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_bytes); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_bytes); - // Ensure the slot has been restarted correctly. - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, slot.ts_usec); - TEST_ASSERT_EQUAL(0x1122334455667789, slot.transfer_id); // INCREMENTED - TEST_ASSERT_EQUAL(0, slot.max_index); - TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, slot.eot_index); - TEST_ASSERT_EQUAL(0, slot.accepted_frames); - TEST_ASSERT_EQUAL(0, slot.payload_size); - TEST_ASSERT_NULL(slot.fragments); - - // === TRANSFER === - // Accept a multi-frame transfer. Here, frames arrive in order. - TEST_ASSERT_EQUAL(0, - rxSlotAccept(&slot, - &payload_size, - &payload, - makeRxFrameBaseString(&mem_payload, - 0, - false, - "We're sorry. What you said is really hard to understand.\n"), - 1000, - mem)); - TEST_ASSERT_EQUAL(0, - rxSlotAccept(&slot, - &payload_size, - &payload, - makeRxFrameBaseString(&mem_payload, - 1, - false, - "The fish who dried the sea went onto land before they did " - "this. "), - 1000, - mem)); - TEST_ASSERT_EQUAL(1, - rxSlotAccept(&slot, - &payload_size, - &payload, - makeRxFrameBaseString(&mem_payload, - 2, - true, - "They moved from one dark forest to another dark forest." - "?\xAC(\xBE"), - 1000, - mem)); - // Verify the memory utilization. Note that the small transfer optimization is in effect: head fragment moved. - TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(176 + TRANSFER_CRC_SIZE_BYTES, mem_payload.allocated_bytes); - TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); // One freed. - TEST_ASSERT_EQUAL(sizeof(RxFragment) * 2, mem_fragment.allocated_bytes); - // Verify the payload and free it. Note the CRC is not part of the payload, obviously. - TEST_ASSERT_EQUAL(176, payload_size); - TEST_ASSERT(compareStringWithPayload("We're sorry. What you said is really hard to understand.\n", payload.view)); - TEST_ASSERT_NOT_NULL(payload.next); - TEST_ASSERT(compareStringWithPayload("The fish who dried the sea went onto land before they did this. ", - payload.next->view)); - TEST_ASSERT_NOT_NULL(payload.next->next); - TEST_ASSERT(compareStringWithPayload("They moved from one dark forest to another dark forest.", // - payload.next->next->view)); - TEST_ASSERT_NULL(payload.next->next->next); - udpardRxFragmentFree(payload, mem.fragment, mem.payload); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_bytes); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_bytes); - // Ensure the slot has been restarted correctly. - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, slot.ts_usec); - TEST_ASSERT_EQUAL(0x112233445566778A, slot.transfer_id); // INCREMENTED - TEST_ASSERT_EQUAL(0, slot.max_index); - TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, slot.eot_index); - TEST_ASSERT_EQUAL(0, slot.accepted_frames); - TEST_ASSERT_EQUAL(0, slot.payload_size); - TEST_ASSERT_NULL(slot.fragments); - - // === TRANSFER === - // Accept an out-of-order transfer with extent truncation. Frames arrive out-of-order with duplicates. - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, - rxSlotAccept(&slot, - &payload_size, - &payload, - makeRxFrameBaseString(&mem_payload, // - 2, - true, - "Toss it over." - "K(\xBB\xEE"), - 45, - mem)); - TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, - rxSlotAccept(&slot, - &payload_size, - &payload, - makeRxFrameBaseString(&mem_payload, // - 1, - false, - "How do we give it to you?\n"), - 45, - mem)); - TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, - rxSlotAccept(&slot, - &payload_size, - &payload, - makeRxFrameBaseString(&mem_payload, // - 1, - false, - "DUPLICATE #1"), - 45, - mem)); - TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); // NO CHANGE, duplicate discarded. - TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, - rxSlotAccept(&slot, - &payload_size, - &payload, - makeRxFrameBaseString(&mem_payload, // - 2, - true, - "DUPLICATE #2"), - 45, - mem)); - TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); // NO CHANGE, duplicate discarded. - TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(1, // transfer completed - rxSlotAccept(&slot, - &payload_size, - &payload, - makeRxFrameBaseString(&mem_payload, // - 0, - false, - "I like fish. Can I have it?\n"), - 45, - mem)); - // Verify the memory utilization. Note that the small transfer optimization is in effect: head fragment moved. - // Due to the implicit truncation (the extent is small), the last fragment is already freed. - TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); // One freed because of truncation. - TEST_ASSERT_EQUAL(28 + 26, mem_payload.allocated_bytes); - TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); // One freed because truncation, one optimized away. - TEST_ASSERT_EQUAL(sizeof(RxFragment) * 1, mem_fragment.allocated_bytes); - // Verify the payload and free it. Note the CRC is not part of the payload, obviously. - TEST_ASSERT_EQUAL(45, payload_size); // Equals the extent. - TEST_ASSERT(compareStringWithPayload("I like fish. Can I have it?\n", payload.view)); - TEST_ASSERT_NOT_NULL(payload.next); - TEST_ASSERT(compareStringWithPayload("How do we give it", payload.next->view)); // TRUNCATED - TEST_ASSERT_NULL(payload.next->next); - udpardRxFragmentFree(payload, mem.fragment, mem.payload); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_bytes); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_bytes); - // Ensure the slot has been restarted correctly. - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, slot.ts_usec); - TEST_ASSERT_EQUAL(0x112233445566778B, slot.transfer_id); // INCREMENTED - TEST_ASSERT_EQUAL(0, slot.max_index); - TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, slot.eot_index); - TEST_ASSERT_EQUAL(0, slot.accepted_frames); - TEST_ASSERT_EQUAL(0, slot.payload_size); - TEST_ASSERT_NULL(slot.fragments); - - // === TRANSFER === - // Shorter than TRANSFER_CRC_SIZE_BYTES, discarded early. - TEST_ASSERT_EQUAL(0, - rxSlotAccept(&slot, - &payload_size, - &payload, - makeRxFrameBaseString(&mem_payload, 0, true, ":D"), - 1000, - mem)); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_bytes); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_bytes); - // Ensure the slot has been restarted correctly. - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, slot.ts_usec); - TEST_ASSERT_EQUAL(0x112233445566778C, slot.transfer_id); // INCREMENTED - TEST_ASSERT_EQUAL(0, slot.max_index); - TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, slot.eot_index); - TEST_ASSERT_EQUAL(0, slot.accepted_frames); - TEST_ASSERT_EQUAL(0, slot.payload_size); - TEST_ASSERT_NULL(slot.fragments); - - // === TRANSFER === - // OOM on reception. Note that the payload allocator does not require restrictions as the library does not - // allocate memory for the payload, only for the fragments. - mem_fragment.limit_fragments = 1; // Can only store one fragment, but the transfer requires more. - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, - rxSlotAccept(&slot, - &payload_size, - &payload, - makeRxFrameBaseString(&mem_payload, // - 2, - true, - "Toss it over." - "K(\xBB\xEE"), - 1000, - mem)); - TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); // Limit reached here. Cannot accept next fragment. - TEST_ASSERT_EQUAL(-UDPARD_ERROR_MEMORY, - rxSlotAccept(&slot, - &payload_size, - &payload, - makeRxFrameBaseString(&mem_payload, // - 1, - false, - "How do we give it to you?\n"), - 1000, - mem)); - TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); // Payload not accepted, cannot alloc fragment. - TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); - mem_fragment.limit_fragments = 2; // Lift the limit and repeat the same frame, this time it is accepted. - TEST_ASSERT_EQUAL(0, - rxSlotAccept(&slot, - &payload_size, - &payload, - makeRxFrameBaseString(&mem_payload, // - 0, - false, - "I like fish. Can I have it?\n"), - 1000, - mem)); - TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); // Accepted! - TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(-UDPARD_ERROR_MEMORY, // Cannot alloc third fragment. - rxSlotAccept(&slot, - &payload_size, - &payload, - makeRxFrameBaseString(&mem_payload, // - 1, - false, - "How do we give it to you?\n"), - 1000, - mem)); - TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); // Payload not accepted, cannot alloc fragment. - TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); - mem_fragment.limit_fragments = 3; // Lift the limit and repeat the same frame, this time it is accepted. - TEST_ASSERT_EQUAL(1, // transfer completed - rxSlotAccept(&slot, - &payload_size, - &payload, - makeRxFrameBaseString(&mem_payload, // - 1, - false, - "How do we give it to you?\n"), - 1000, - mem)); - // Verify the memory utilization. Note that the small transfer optimization is in effect: head fragment moved. - TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(67 + TRANSFER_CRC_SIZE_BYTES, mem_payload.allocated_bytes); - TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(sizeof(RxFragment) * 2, mem_fragment.allocated_bytes); - // Verify the payload and free it. Note the CRC is not part of the payload, obviously. - TEST_ASSERT_EQUAL(67, payload_size); // Equals the extent. - TEST_ASSERT(compareStringWithPayload("I like fish. Can I have it?\n", payload.view)); - TEST_ASSERT_NOT_NULL(payload.next); - TEST_ASSERT(compareStringWithPayload("How do we give it to you?\n", payload.next->view)); - TEST_ASSERT_NOT_NULL(payload.next->next); - TEST_ASSERT(compareStringWithPayload("Toss it over.", payload.next->next->view)); - TEST_ASSERT_NULL(payload.next->next->next); - udpardRxFragmentFree(payload, mem.fragment, mem.payload); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_bytes); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_bytes); - // Ensure the slot has been restarted correctly. - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, slot.ts_usec); - TEST_ASSERT_EQUAL(0x112233445566778D, slot.transfer_id); // INCREMENTED - TEST_ASSERT_EQUAL(0, slot.max_index); - TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, slot.eot_index); - TEST_ASSERT_EQUAL(0, slot.accepted_frames); - TEST_ASSERT_EQUAL(0, slot.payload_size); - TEST_ASSERT_NULL(slot.fragments); - - // === TRANSFER === - // Inconsistent EOT flag. - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, // Just an ordinary transfer passing by, what could go wrong? - rxSlotAccept(&slot, - &payload_size, - &payload, - makeRxFrameBaseString(&mem_payload, // - 2, - true, - "Toss it over." - "K(\xBB\xEE"), - 45, - mem)); - TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); // Okay, accepted, some data stored... - TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, - rxSlotAccept(&slot, - &payload_size, - &payload, - makeRxFrameBaseString(&mem_payload, // - 1, // - true, // SURPRISE! EOT is set in distinct frames! - "How do we give it to you?\n"), - 45, - mem)); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); // This is outrageous. Of course we have to drop everything. - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - // Ensure the slot has been restarted correctly. - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, slot.ts_usec); - TEST_ASSERT_EQUAL(0x112233445566778E, slot.transfer_id); // INCREMENTED - TEST_ASSERT_EQUAL(0, slot.max_index); - TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, slot.eot_index); - TEST_ASSERT_EQUAL(0, slot.accepted_frames); - TEST_ASSERT_EQUAL(0, slot.payload_size); - TEST_ASSERT_NULL(slot.fragments); - - // === TRANSFER === - // More frames past the EOT; or, in other words, the frame index where EOT is set is not the maximum index. - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, - rxSlotAccept(&slot, - &payload_size, - &payload, - makeRxFrameBaseString(&mem_payload, // - 2, - true, - "Toss it over." - "K(\xBB\xEE"), - 45, - mem)); - TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); // Okay, accepted, some data stored... - TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, - rxSlotAccept(&slot, - &payload_size, - &payload, - makeRxFrameBaseString(&mem_payload, // - 3, // SURPRISE! Frame #3 while #2 was EOT! - false, - "How do we give it to you?\n"), - 45, - mem)); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); // This is outrageous. Of course we have to drop everything. - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - // Ensure the slot has been restarted correctly. - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, slot.ts_usec); - TEST_ASSERT_EQUAL(0x112233445566778F, slot.transfer_id); // INCREMENTED - TEST_ASSERT_EQUAL(0, slot.max_index); - TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, slot.eot_index); - TEST_ASSERT_EQUAL(0, slot.accepted_frames); - TEST_ASSERT_EQUAL(0, slot.payload_size); - TEST_ASSERT_NULL(slot.fragments); -} + rx_session_t* const ses = (rx_session_t*)cavl2_find_or_insert(&port.index_session_by_remote_uid, + &remote_uid, + &cavl_compare_rx_session_by_remote_uid, + &fac_args, + &cavl_factory_rx_session_by_remote_uid); + TEST_ASSERT_NOT_NULL(ses); + + meta_t meta = { .priority = udpard_prio_nominal, + .flag_reliable = false, + .transfer_payload_size = 2, + .transfer_id = 10, + .sender_uid = remote_uid, + .topic_hash = port.topic_hash }; + now += 1000; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x01010101, .port = 0x1111 }, + make_frame_ptr(meta, mem_payload, "aa", 0, 2), + del_payload, + 0); + TEST_ASSERT_EQUAL(1, cb_result.message.count); + udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); + cb_result.message.history[0].payload = NULL; + + // Intern two transfers out of order. + meta.transfer_id = 12; + now += 100; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x02020202, .port = 0x2222 }, + make_frame_ptr(meta, mem_payload, "bb", 0, 2), + del_payload, + 1); + // Depending on implementation, the jump may be dropped or interned. + TEST_ASSERT(cb_result.message.count >= 1); + meta.transfer_id = 11; + now += 100; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x03030303, .port = 0x3333 }, + make_frame_ptr(meta, mem_payload, "cc", 0, 2), + del_payload, + 0); + TEST_ASSERT_EQUAL(3, cb_result.message.count); + TEST_ASSERT_EQUAL(12, cb_result.message.history[0].transfer_id); + TEST_ASSERT_EQUAL(11, cb_result.message.history[1].transfer_id); + udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); + cb_result.message.history[0].payload = NULL; + udpard_fragment_free_all(cb_result.message.history[1].payload, udpard_make_deleter(mem_frag)); + cb_result.message.history[1].payload = NULL; + + // Very old transfer is still accepted once the head has advanced. + meta.transfer_id = 5; + now += 100; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x04040404, .port = 0x4444 }, + make_frame_ptr(meta, mem_payload, "dd", 0, 2), + del_payload, + 2); + if ((cb_result.message.count > 0) && (cb_result.message.history[0].payload != NULL)) { + udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); + cb_result.message.history[0].payload = NULL; + } + TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); -// -------------------------------------------------- IFACE -------------------------------------------------- + udpard_rx_port_free(&rx, &port); + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_session); + instrumented_allocator_reset(&alloc_payload); +} -static void testIfaceIsFutureTransferID(void) +static void test_rx_session_ordered_zero_reordering_window(void) { - InstrumentedAllocator mem_fragment = {0}; - InstrumentedAllocator mem_payload = {0}; - instrumentedAllocatorNew(&mem_fragment); - instrumentedAllocatorNew(&mem_payload); - RxIface iface; - rxIfaceInit(&iface, makeRxMemory(&mem_fragment, &mem_payload)); - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.ts_usec); - for (size_t i = 0; i < RX_SLOT_COUNT; i++) - { - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.slots[i].ts_usec); - TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, iface.slots[i].transfer_id); - TEST_ASSERT_EQUAL(0, iface.slots[i].max_index); - TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, iface.slots[i].eot_index); - TEST_ASSERT_EQUAL(0, iface.slots[i].accepted_frames); - TEST_ASSERT_EQUAL(0, iface.slots[i].payload_size); - TEST_ASSERT_NULL(iface.slots[i].fragments); + // Zero window ordered session should only accept strictly sequential IDs. + instrumented_allocator_t alloc_frag = { 0 }; + instrumented_allocator_new(&alloc_frag); + instrumented_allocator_t alloc_session = { 0 }; + instrumented_allocator_new(&alloc_session); + instrumented_allocator_t alloc_payload = { 0 }; + instrumented_allocator_new(&alloc_payload); + const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); + const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); + const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); + const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; + + udpard_rx_t rx; + udpard_rx_new(&rx, NULL); + callback_result_t cb_result = { 0 }; + rx.user = &cb_result; + + udpard_rx_port_t port = { 0 }; + TEST_ASSERT(udpard_rx_port_new(&port, 0x0F0E0D0C0B0A0908ULL, 256, udpard_rx_ordered, 0, rx_mem, &callbacks)); + + udpard_us_t now = 0; + const uint64_t remote_uid = 0x0102030405060708ULL; + rx_session_factory_args_t fac_args = { + .owner = &port, + .sessions_by_animation = &rx.list_session_by_animation, + .remote_uid = remote_uid, + .now = now, + }; + rx_session_t* const ses = (rx_session_t*)cavl2_find_or_insert(&port.index_session_by_remote_uid, + &remote_uid, + &cavl_compare_rx_session_by_remote_uid, + &fac_args, + &cavl_factory_rx_session_by_remote_uid); + TEST_ASSERT_NOT_NULL(ses); + + meta_t meta = { .priority = udpard_prio_nominal, + .flag_reliable = false, + .transfer_payload_size = 2, + .transfer_id = 1, + .sender_uid = remote_uid, + .topic_hash = port.topic_hash }; + now += 1000; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0xAA000001, .port = 0x1111 }, + make_frame_ptr(meta, mem_payload, "x1", 0, 2), + del_payload, + 0); + TEST_ASSERT(cb_result.message.count >= 1); + udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); + cb_result.message.history[0].payload = NULL; + + // Jump is dropped with zero window. + meta.transfer_id = 3; + now += 10; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0xAA000001, .port = 0x1111 }, + make_frame_ptr(meta, mem_payload, "x3", 0, 2), + del_payload, + 1); + TEST_ASSERT(cb_result.message.count >= 1); + + // Next expected transfer is accepted. + meta.transfer_id = 2; + now += 10; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0xAA000001, .port = 0x1111 }, + make_frame_ptr(meta, mem_payload, "x2", 0, 2), + del_payload, + 0); + TEST_ASSERT(cb_result.message.count >= 1); + if (cb_result.message.history[0].payload != NULL) { + udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); + cb_result.message.history[0].payload = NULL; + } + if ((cb_result.message.count > 1) && (cb_result.message.history[1].payload != NULL)) { + udpard_fragment_free_all(cb_result.message.history[1].payload, udpard_make_deleter(mem_frag)); + cb_result.message.history[1].payload = NULL; } - TEST_ASSERT_TRUE(rxIfaceIsFutureTransferID(&iface, 0)); - TEST_ASSERT_TRUE(rxIfaceIsFutureTransferID(&iface, 0xFFFFFFFFFFFFFFFFULL)); - iface.slots[0].transfer_id = 100; - TEST_ASSERT_FALSE(rxIfaceIsFutureTransferID(&iface, 99)); - TEST_ASSERT_FALSE(rxIfaceIsFutureTransferID(&iface, 100)); - TEST_ASSERT_TRUE(rxIfaceIsFutureTransferID(&iface, 101)); - iface.slots[0].transfer_id = TRANSFER_ID_UNSET; - iface.slots[1].transfer_id = 100; - TEST_ASSERT_FALSE(rxIfaceIsFutureTransferID(&iface, 99)); - TEST_ASSERT_FALSE(rxIfaceIsFutureTransferID(&iface, 100)); - TEST_ASSERT_TRUE(rxIfaceIsFutureTransferID(&iface, 101)); + + udpard_rx_port_free(&rx, &port); + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_session); + instrumented_allocator_reset(&alloc_payload); } -static void testIfaceCheckTransferIDTimeout(void) +static void test_rx_port(void) { - InstrumentedAllocator mem_fragment = {0}; - InstrumentedAllocator mem_payload = {0}; - instrumentedAllocatorNew(&mem_fragment); - instrumentedAllocatorNew(&mem_payload); - RxIface iface; - rxIfaceInit(&iface, makeRxMemory(&mem_fragment, &mem_payload)); - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.ts_usec); - for (size_t i = 0; i < RX_SLOT_COUNT; i++) - { - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.slots[i].ts_usec); - TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, iface.slots[i].transfer_id); - TEST_ASSERT_EQUAL(0, iface.slots[i].max_index); - TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, iface.slots[i].eot_index); - TEST_ASSERT_EQUAL(0, iface.slots[i].accepted_frames); - TEST_ASSERT_EQUAL(0, iface.slots[i].payload_size); - TEST_ASSERT_NULL(iface.slots[i].fragments); - } - // No successful transfers so far, and no slots in progress at the moment. - TEST_ASSERT_TRUE(rxIfaceCheckTransferIDTimeout(&iface, 0, 100)); - TEST_ASSERT_TRUE(rxIfaceCheckTransferIDTimeout(&iface, 1000, 100)); - // Suppose we have on successful transfer now. - iface.ts_usec = 1000; - TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 500, 100)); // TS is in the past! Check overflows. - TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 1000, 100)); - TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 1050, 100)); - TEST_ASSERT_TRUE(rxIfaceCheckTransferIDTimeout(&iface, 1150, 100)); // Yup, this is a timeout. - TEST_ASSERT_TRUE(rxIfaceCheckTransferIDTimeout(&iface, 2150, 100)); // Yup, this is a timeout. - // Suppose there are some slots in progress. - iface.ts_usec = 1000; - iface.slots[0].ts_usec = 2000; - TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 500, 100)); // TS is in the past! Check overflows. - TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 1000, 100)); - TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 1050, 100)); - TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 1150, 100)); // No timeout because of the slot in progress. - TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 2050, 100)); // Nope. - TEST_ASSERT_TRUE(rxIfaceCheckTransferIDTimeout(&iface, 2150, 100)); // Yeah. - TEST_ASSERT_TRUE(rxIfaceCheckTransferIDTimeout(&iface, 3050, 100)); // Ooh. - // More slots in progress. - iface.ts_usec = 1000; - iface.slots[0].ts_usec = 2000; - iface.slots[1].ts_usec = 3000; - TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 500, 100)); // TS is in the past! Check overflows. - TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 1000, 100)); - TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 1050, 100)); - TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 1150, 100)); // No timeout because of the slot in progress. - TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 2050, 100)); // Nope. - TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 2150, 100)); // The other slot is newer. - TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 3050, 100)); // Yes, but not yet. - TEST_ASSERT_TRUE(rxIfaceCheckTransferIDTimeout(&iface, 3150, 100)); // Yes. - // Now suppose there is no successful transfer, but there are some slots in progress. It's all the same. - iface.ts_usec = TIMESTAMP_UNSET; - iface.slots[0].ts_usec = 2000; - iface.slots[1].ts_usec = 3000; - TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 500, 100)); // TS is in the past! Check overflows. - TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 1000, 100)); - TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 1050, 100)); - TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 1150, 100)); // No timeout because of the slot in progress. - TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 2050, 100)); // Nope. - TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 2150, 100)); // The other slot is newer. - TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 3050, 100)); // Yes, but not yet. - TEST_ASSERT_TRUE(rxIfaceCheckTransferIDTimeout(&iface, 3150, 100)); // Ooh yes. + // P2P ports behave like ordinary ports for payload delivery. + instrumented_allocator_t alloc_frag = { 0 }; + instrumented_allocator_new(&alloc_frag); + instrumented_allocator_t alloc_session = { 0 }; + instrumented_allocator_new(&alloc_session); + instrumented_allocator_t alloc_payload = { 0 }; + instrumented_allocator_new(&alloc_payload); + const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); + const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); + const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); + const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; + + udpard_rx_t rx; + udpard_rx_new(&rx, NULL); + callback_result_t cb_result = { 0 }; + rx.user = &cb_result; + + const uint64_t local_uid = 0xCAFED00DCAFED00DULL; + udpard_rx_port_t port = { 0 }; + TEST_ASSERT(udpard_rx_port_new(&port, local_uid, 64, udpard_rx_unordered, 0, rx_mem, &callbacks)); + + // Compose a P2P response datagram without a P2P header. + const uint64_t resp_tid = 55; + const uint8_t payload[3] = { 'a', 'b', 'c' }; + + meta_t meta = { .priority = udpard_prio_fast, + .flag_reliable = false, + .transfer_payload_size = sizeof(payload), + .transfer_id = resp_tid, + .sender_uid = 0x0BADF00D0BADF00DULL, + .topic_hash = port.topic_hash }; + rx_frame_t* frame = make_frame_ptr(meta, mem_payload, payload, 0, sizeof(payload)); + byte_t dgram[HEADER_SIZE_BYTES + sizeof(payload)]; + header_serialize(dgram, meta, 0, 0, frame->base.crc); + memcpy(dgram + HEADER_SIZE_BYTES, payload, sizeof(payload)); + mem_free(mem_payload, frame->base.origin.size, frame->base.origin.data); + void* push_payload = mem_res_alloc(mem_payload, sizeof(dgram)); + memcpy(push_payload, dgram, sizeof(dgram)); + + udpard_us_t now = 0; + TEST_ASSERT(udpard_rx_port_push(&rx, + &port, + now, + (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, + (udpard_bytes_mut_t){ .data = push_payload, .size = sizeof(dgram) }, + del_payload, + 0)); + TEST_ASSERT_EQUAL(1, cb_result.message.count); + TEST_ASSERT_EQUAL(resp_tid, cb_result.message.history[0].transfer_id); + udpard_fragment_t* const frag = udpard_fragment_seek(cb_result.message.history[0].payload, 0); + TEST_ASSERT_NOT_NULL(frag); + TEST_ASSERT_EQUAL_size_t(3, frag->view.size); + TEST_ASSERT_EQUAL_MEMORY("abc", frag->view.data, 3); + udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); + cb_result.message.history[0].payload = NULL; + + udpard_rx_port_free(&rx, &port); + TEST_ASSERT_EQUAL(0, alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_session); + instrumented_allocator_reset(&alloc_payload); } -static void testIfaceFindMatchingSlot(void) +static void test_rx_port_timeouts(void) { - InstrumentedAllocator mem_fragment = {0}; - InstrumentedAllocator mem_payload = {0}; - instrumentedAllocatorNew(&mem_fragment); - instrumentedAllocatorNew(&mem_payload); - RxSlot slots[RX_SLOT_COUNT] = {0}; - rxSlotRestart(&slots[0], 1000, makeRxMemory(&mem_fragment, &mem_payload)); - rxSlotRestart(&slots[1], 1001, makeRxMemory(&mem_fragment, &mem_payload)); - // No matching slot. - TEST_ASSERT_NULL(rxIfaceFindMatchingSlot(slots, 123)); - // Matching slots. - TEST_ASSERT_EQUAL_PTR(&slots[0], rxIfaceFindMatchingSlot(slots, 1000)); - TEST_ASSERT_EQUAL_PTR(&slots[1], rxIfaceFindMatchingSlot(slots, 1001)); - // Identical slots, neither in progress. - slots[0].ts_usec = TIMESTAMP_UNSET; - slots[1].ts_usec = TIMESTAMP_UNSET; - slots[0].transfer_id = 1000; - slots[1].transfer_id = 1000; - TEST_ASSERT_EQUAL_PTR(&slots[0], rxIfaceFindMatchingSlot(slots, 1000)); // First match. - TEST_ASSERT_EQUAL_PTR(NULL, rxIfaceFindMatchingSlot(slots, 1001)); - // Identical slots, one of them in progress. - slots[0].ts_usec = TIMESTAMP_UNSET; - slots[1].ts_usec = 1234567890; - TEST_ASSERT_EQUAL_PTR(&slots[1], rxIfaceFindMatchingSlot(slots, 1000)); - TEST_ASSERT_EQUAL_PTR(NULL, rxIfaceFindMatchingSlot(slots, 1001)); - // The other is in progress now. - slots[0].ts_usec = 1234567890; - slots[1].ts_usec = TIMESTAMP_UNSET; - TEST_ASSERT_EQUAL_PTR(&slots[0], rxIfaceFindMatchingSlot(slots, 1000)); - TEST_ASSERT_EQUAL_PTR(NULL, rxIfaceFindMatchingSlot(slots, 1001)); - // Both in progress, pick first. - slots[0].ts_usec = 1234567890; - slots[1].ts_usec = 2345678901; - TEST_ASSERT_EQUAL_PTR(&slots[0], rxIfaceFindMatchingSlot(slots, 1000)); - TEST_ASSERT_EQUAL_PTR(NULL, rxIfaceFindMatchingSlot(slots, 1001)); + // Sessions are retired after SESSION_LIFETIME. + instrumented_allocator_t alloc_frag = { 0 }; + instrumented_allocator_new(&alloc_frag); + instrumented_allocator_t alloc_session = { 0 }; + instrumented_allocator_new(&alloc_session); + instrumented_allocator_t alloc_payload = { 0 }; + instrumented_allocator_new(&alloc_payload); + const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); + const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); + const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); + const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; + + udpard_rx_t rx; + udpard_rx_new(&rx, NULL); + callback_result_t cb_result = { 0 }; + rx.user = &cb_result; + + udpard_rx_port_t port = { 0 }; + TEST_ASSERT( + udpard_rx_port_new(&port, 0xBADC0FFEE0DDF00DULL, 128, udpard_rx_ordered, 20 * KILO, rx_mem, &callbacks)); + + meta_t meta = { .priority = udpard_prio_nominal, + .flag_reliable = false, + .transfer_payload_size = 4, + .transfer_id = 1, + .sender_uid = 0x1111222233334444ULL, + .topic_hash = port.topic_hash }; + rx_frame_t* frame = make_frame_ptr(meta, mem_payload, "ping", 0, 4); + const byte_t payload_bytes[] = { 'p', 'i', 'n', 'g' }; + byte_t dgram[HEADER_SIZE_BYTES + sizeof(payload_bytes)]; + header_serialize(dgram, meta, 0, 0, frame->base.crc); + memcpy(dgram + HEADER_SIZE_BYTES, payload_bytes, sizeof(payload_bytes)); + mem_free(mem_payload, frame->base.origin.size, frame->base.origin.data); + void* payload_buf = mem_res_alloc(mem_payload, sizeof(dgram)); + memcpy(payload_buf, dgram, sizeof(dgram)); + + udpard_us_t now = 0; + TEST_ASSERT(udpard_rx_port_push(&rx, + &port, + now, + (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, + (udpard_bytes_mut_t){ .data = payload_buf, .size = sizeof(dgram) }, + del_payload, + 0)); + TEST_ASSERT_GREATER_THAN_UINT32(0, alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL(1, cb_result.message.count); + udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); + cb_result.message.history[0].payload = NULL; + + now += SESSION_LIFETIME + 1; + udpard_rx_poll(&rx, now); + TEST_ASSERT_EQUAL(0, alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); + udpard_rx_port_free(&rx, &port); + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_session); + instrumented_allocator_reset(&alloc_payload); } -static void testIfaceAcceptA(void) +static void test_rx_port_oom(void) { - InstrumentedAllocator mem_fragment = {0}; - InstrumentedAllocator mem_payload = {0}; - instrumentedAllocatorNew(&mem_fragment); - instrumentedAllocatorNew(&mem_payload); - const RxMemory mem = makeRxMemory(&mem_fragment, &mem_payload); - RxIface iface; - rxIfaceInit(&iface, mem); - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.ts_usec); - for (size_t i = 0; i < RX_SLOT_COUNT; i++) - { - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.slots[i].ts_usec); - TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, iface.slots[i].transfer_id); - TEST_ASSERT_EQUAL(0, iface.slots[i].max_index); - TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, iface.slots[i].eot_index); - TEST_ASSERT_EQUAL(0, iface.slots[i].accepted_frames); - TEST_ASSERT_EQUAL(0, iface.slots[i].payload_size); - TEST_ASSERT_NULL(iface.slots[i].fragments); - } - struct UdpardRxTransfer transfer = {0}; - - // === TRANSFER === - // A simple single-frame transfer successfully accepted. - TEST_ASSERT_EQUAL(1, - rxIfaceAccept(&iface, - 1234567890, - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityHigh, - .src_node_id = 1234, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0x1234, - .transfer_id = 0x1122334455667788U}, - 0, - true, - "I am a tomb." - "\x1F\\\xCDs"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); // Head fragment is not heap-allocated. - // Check the transfer we just accepted. - TEST_ASSERT_EQUAL(1234567890, transfer.timestamp_usec); - TEST_ASSERT_EQUAL(UdpardPriorityHigh, transfer.priority); - TEST_ASSERT_EQUAL(1234, transfer.source_node_id); - TEST_ASSERT_EQUAL(0x1122334455667788U, transfer.transfer_id); - TEST_ASSERT_EQUAL(12, transfer.payload_size); - TEST_ASSERT(compareStringWithPayload("I am a tomb.", transfer.payload.view)); - udpardRxFragmentFree(transfer.payload, mem.fragment, mem.payload); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - // Check the internal states of the iface. - TEST_ASSERT_EQUAL(1234567890, iface.ts_usec); - TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, iface.slots[0].transfer_id); // Still unused. - TEST_ASSERT_EQUAL(0x1122334455667789U, iface.slots[1].transfer_id); // Incremented. - - // === TRANSFER === - // Send a duplicate and ensure it is rejected. - TEST_ASSERT_EQUAL(0, // No transfer accepted. - rxIfaceAccept(&iface, - 1234567891, // different timestamp but ignored anyway - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityHigh, - .src_node_id = 1234, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0x1234, - .transfer_id = 0x1122334455667788U}, - 0, - true, - "I am a tomb." - "\x1F\\\xCDs"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - // Check the internal states of the iface. - TEST_ASSERT_EQUAL(1234567890, iface.ts_usec); // same old timestamp - TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, iface.slots[0].transfer_id); // Still unused. - TEST_ASSERT_EQUAL(0x1122334455667789U, iface.slots[1].transfer_id); // good ol' transfer id - - // === TRANSFER === - // Send a non-duplicate transfer with an invalid CRC using an in-sequence (matching) transfer-ID. - TEST_ASSERT_EQUAL(0, // No transfer accepted. - rxIfaceAccept(&iface, - 1234567892, // different timestamp but ignored anyway - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityHigh, - .src_node_id = 1234, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0x1234, - .transfer_id = 0x1122334455667789U}, - 0, - true, - "I am a tomb." - "No CRC here."), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - // Check the internal states of the iface. - TEST_ASSERT_EQUAL(1234567890, iface.ts_usec); // same old timestamp - TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, iface.slots[0].transfer_id); // Still unused. - TEST_ASSERT_EQUAL(0x112233445566778AU, iface.slots[1].transfer_id); // Incremented. - - // === TRANSFER === - // Send a non-duplicate transfer with an invalid CRC using an out-of-sequence (non-matching) transfer-ID. - // Transfer-ID jumps forward, no existing slot; will use the second one. - TEST_ASSERT_EQUAL(0, // No transfer accepted. - rxIfaceAccept(&iface, - 1234567893, // different timestamp but ignored anyway - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityHigh, - .src_node_id = 1234, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0x1234, - .transfer_id = 0x1122334455667790U}, - 0, - true, - "I am a tomb." - "No CRC here, #2."), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - // Check the internal states of the iface. - TEST_ASSERT_EQUAL(1234567890, iface.ts_usec); // same old timestamp - TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, iface.slots[0].transfer_id); // Still unused. - TEST_ASSERT_EQUAL(0x1122334455667791U, iface.slots[1].transfer_id); // Replaced the old one, it was unneeded. - - // === TRANSFER === (x2) - // Send two interleaving multi-frame out-of-order transfers: - // A2 B1 A0 B0 A1 - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - // A2 - TEST_ASSERT_EQUAL(0, - rxIfaceAccept(&iface, - 2000000020, - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityHigh, - .src_node_id = 1111, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0x1111, - .transfer_id = 1000U}, - 2, - true, - "A2" - "v\x1E\xBD]"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - TEST_ASSERT_EQUAL(1234567890, iface.ts_usec); // same old timestamp - TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, iface.slots[0].transfer_id); // Still unused. - TEST_ASSERT_EQUAL(1000, iface.slots[1].transfer_id); // Replaced the old one, it was unneeded. - TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); - // B1 - TEST_ASSERT_EQUAL(0, - rxIfaceAccept(&iface, - 2000000010, // Transfer-ID timeout. - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPrioritySlow, - .src_node_id = 2222, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0x2222, - .transfer_id = 1001U}, - 1, - true, - "B1" - "g\x8D\x9A\xD7"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - TEST_ASSERT_EQUAL(1234567890, iface.ts_usec); // same old timestamp - TEST_ASSERT_EQUAL(1001, iface.slots[0].transfer_id); // Used for B because the other one is taken. - TEST_ASSERT_EQUAL(1000, iface.slots[1].transfer_id); // Keeps A because it is in-progress, can't discard. - TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); - // A0 - TEST_ASSERT_EQUAL(0, - rxIfaceAccept(&iface, - 2000000030, - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityHigh, - .src_node_id = 1111, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0x1111, - .transfer_id = 1000U}, - 0, - false, - "A0"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - TEST_ASSERT_EQUAL(1234567890, iface.ts_usec); // same old timestamp - TEST_ASSERT_EQUAL(1001, iface.slots[0].transfer_id); - TEST_ASSERT_EQUAL(1000, iface.slots[1].transfer_id); - TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(3, mem_fragment.allocated_fragments); - // B0 - TEST_ASSERT_EQUAL(1, - rxIfaceAccept(&iface, - 2000000040, - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPrioritySlow, - .src_node_id = 2222, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0x2222, - .transfer_id = 1001U}, - 0, - false, - "B0"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - // TRANSFER B RECEIVED, check it. - TEST_ASSERT_EQUAL(2000000010, iface.ts_usec); - TEST_ASSERT_EQUAL(1002, iface.slots[0].transfer_id); // Incremented to meet the next transfer. - TEST_ASSERT_EQUAL(1000, iface.slots[1].transfer_id); - TEST_ASSERT_EQUAL(4, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(3, mem_fragment.allocated_fragments); // One fragment freed because of the head optimization. - // Check the payload. - TEST_ASSERT_EQUAL(2000000010, transfer.timestamp_usec); - TEST_ASSERT_EQUAL(UdpardPrioritySlow, transfer.priority); - TEST_ASSERT_EQUAL(2222, transfer.source_node_id); - TEST_ASSERT_EQUAL(1001, transfer.transfer_id); - TEST_ASSERT_EQUAL(4, transfer.payload_size); - TEST_ASSERT(compareStringWithPayload("B0", transfer.payload.view)); - TEST_ASSERT_NOT_NULL(transfer.payload.next); - TEST_ASSERT(compareStringWithPayload("B1", transfer.payload.next->view)); - TEST_ASSERT_NULL(transfer.payload.next->next); - udpardRxFragmentFree(transfer.payload, mem.fragment, mem.payload); - TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); // Only the remaining A0 A2 are left. - TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); - // A1 - TEST_ASSERT_EQUAL(1, - rxIfaceAccept(&iface, - 2000000050, - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityHigh, - .src_node_id = 1111, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0x1111, - .transfer_id = 1000U}, - 1, - false, - "A1"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - // TRANSFER A RECEIVED, check it. - TEST_ASSERT_EQUAL(2000000020, iface.ts_usec); // same old timestamp - TEST_ASSERT_EQUAL(1002, iface.slots[0].transfer_id); - TEST_ASSERT_EQUAL(1001, iface.slots[1].transfer_id); // Incremented to meet the next transfer. - // Check the payload. - TEST_ASSERT_EQUAL(2000000020, transfer.timestamp_usec); - TEST_ASSERT_EQUAL(UdpardPriorityHigh, transfer.priority); - TEST_ASSERT_EQUAL(1111, transfer.source_node_id); - TEST_ASSERT_EQUAL(1000, transfer.transfer_id); - TEST_ASSERT_EQUAL(6, transfer.payload_size); - TEST_ASSERT(compareStringWithPayload("A0", transfer.payload.view)); - TEST_ASSERT_NOT_NULL(transfer.payload.next); - TEST_ASSERT(compareStringWithPayload("A1", transfer.payload.next->view)); - TEST_ASSERT_NOT_NULL(transfer.payload.next->next); - TEST_ASSERT(compareStringWithPayload("A2", transfer.payload.next->next->view)); - TEST_ASSERT_NULL(transfer.payload.next->next->next); - udpardRxFragmentFree(transfer.payload, mem.fragment, mem.payload); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + // Session allocation failure should be reported gracefully. + instrumented_allocator_t alloc_frag = { 0 }; + instrumented_allocator_new(&alloc_frag); + instrumented_allocator_t alloc_session = { 0 }; + instrumented_allocator_new(&alloc_session); + alloc_session.limit_fragments = 0; // force allocation failure + instrumented_allocator_t alloc_payload = { 0 }; + instrumented_allocator_new(&alloc_payload); + const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); + const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); + const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); + const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; + + udpard_rx_t rx; + udpard_rx_new(&rx, NULL); + callback_result_t cb_result = { 0 }; + rx.user = &cb_result; + + udpard_rx_port_t port = { 0 }; + TEST_ASSERT(udpard_rx_port_new(&port, 0xCAFEBABECAFEBABEULL, 64, udpard_rx_unordered, 0, rx_mem, &callbacks)); + + meta_t meta = { .priority = udpard_prio_nominal, + .flag_reliable = false, + .transfer_payload_size = 4, + .transfer_id = 1, + .sender_uid = 0x0101010101010101ULL, + .topic_hash = port.topic_hash }; + rx_frame_t* frame = make_frame_ptr(meta, mem_payload, "oom!", 0, 4); + const byte_t payload_bytes[] = { 'o', 'o', 'm', '!' }; + byte_t dgram[HEADER_SIZE_BYTES + sizeof(payload_bytes)]; + header_serialize(dgram, meta, 0, 0, frame->base.crc); + memcpy(dgram + HEADER_SIZE_BYTES, payload_bytes, sizeof(payload_bytes)); + mem_free(mem_payload, frame->base.origin.size, frame->base.origin.data); + void* payload_buf = mem_res_alloc(mem_payload, sizeof(dgram)); + memcpy(payload_buf, dgram, sizeof(dgram)); + + udpard_us_t now = 0; + TEST_ASSERT(udpard_rx_port_push(&rx, + &port, + now, + (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, + (udpard_bytes_mut_t){ .data = payload_buf, .size = sizeof(dgram) }, + del_payload, + 0)); + TEST_ASSERT_GREATER_THAN_UINT64(0, rx.errors_oom); + TEST_ASSERT_EQUAL(0, alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL(0, cb_result.message.count); + TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); + udpard_rx_port_free(&rx, &port); + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_session); + instrumented_allocator_reset(&alloc_payload); } -static void testIfaceAcceptB(void) +static void test_rx_port_free_loop(void) { - InstrumentedAllocator mem_fragment = {0}; - InstrumentedAllocator mem_payload = {0}; - instrumentedAllocatorNew(&mem_fragment); - instrumentedAllocatorNew(&mem_payload); - const RxMemory mem = makeRxMemory(&mem_fragment, &mem_payload); - RxIface iface; - rxIfaceInit(&iface, mem); - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.ts_usec); - for (size_t i = 0; i < RX_SLOT_COUNT; i++) + // Freeing ports with in-flight transfers releases all allocations. + instrumented_allocator_t alloc_frag = { 0 }; + instrumented_allocator_new(&alloc_frag); + instrumented_allocator_t alloc_session = { 0 }; + instrumented_allocator_new(&alloc_session); + instrumented_allocator_t alloc_payload = { 0 }; + instrumented_allocator_new(&alloc_payload); + const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); + const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); + const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); + const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; + + udpard_rx_t rx; + udpard_rx_new(&rx, NULL); + callback_result_t cb_result = { 0 }; + rx.user = &cb_result; + + udpard_rx_port_t port_p2p = { 0 }; + TEST_ASSERT( + udpard_rx_port_new(&port_p2p, 0xCAFED00DCAFED00DULL, SIZE_MAX, udpard_rx_unordered, 0, rx_mem, &callbacks)); + udpard_rx_port_t port_extra = { 0 }; + const uint64_t topic_hash_extra = 0xDEADBEEFF00D1234ULL; + TEST_ASSERT(udpard_rx_port_new(&port_extra, topic_hash_extra, 1000, udpard_rx_ordered, 5000, rx_mem, &callbacks)); + + udpard_us_t now = 0; + + // Incomplete transfer on the p2p port. { - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.slots[i].ts_usec); - TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, iface.slots[i].transfer_id); - TEST_ASSERT_EQUAL(0, iface.slots[i].max_index); - TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, iface.slots[i].eot_index); - TEST_ASSERT_EQUAL(0, iface.slots[i].accepted_frames); - TEST_ASSERT_EQUAL(0, iface.slots[i].payload_size); - TEST_ASSERT_NULL(iface.slots[i].fragments); + const char* payload = "INCOMPLETE"; + meta_t meta = { .priority = udpard_prio_slow, + .flag_reliable = false, + .transfer_payload_size = (uint32_t)strlen(payload), + .transfer_id = 10, + .sender_uid = 0xAAAAULL, + .topic_hash = port_p2p.topic_hash }; + rx_frame_t* frame = make_frame_ptr(meta, mem_payload, payload, 0, 4); + byte_t dgram[HEADER_SIZE_BYTES + 4]; + header_serialize(dgram, meta, 0, 0, frame->base.crc); + memcpy(dgram + HEADER_SIZE_BYTES, payload, 4); + mem_free(mem_payload, frame->base.origin.size, frame->base.origin.data); + void* push_payload = mem_res_alloc(mem_payload, sizeof(dgram)); + memcpy(push_payload, dgram, sizeof(dgram)); + now += 1000; + TEST_ASSERT(udpard_rx_port_push(&rx, + &port_p2p, + now, + (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, + (udpard_bytes_mut_t){ .data = push_payload, .size = sizeof(dgram) }, + del_payload, + 0)); } - struct UdpardRxTransfer transfer = {0}; - // === TRANSFER === (x3) - // Send three interleaving multi-frame out-of-order transfers (primes for duplicates): - // A2 B1 A0 C0 B0 A1 C0' C1 - // A2 arrives before B1 but its timestamp is higher. - // Transfer B will be evicted by C because by the time C0 arrives, transfer B is the oldest one, - // since its timestamp is inherited from B0. - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - // A2 - TEST_ASSERT_EQUAL(0, - rxIfaceAccept(&iface, - 2000000020, - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityHigh, - .src_node_id = 1111, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0x1111, - .transfer_id = 1000U}, - 2, - true, - "A2" - "v\x1E\xBD]"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.ts_usec); - TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, iface.slots[0].transfer_id); - TEST_ASSERT_EQUAL(1000, iface.slots[1].transfer_id); - TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); - // B1 - TEST_ASSERT_EQUAL(0, - rxIfaceAccept(&iface, - 2000000010, // TIME REORDERING -- lower than previous. - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPrioritySlow, - .src_node_id = 2222, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0x2222, - .transfer_id = 1001U}, - 1, - true, - "B1" - "g\x8D\x9A\xD7"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.ts_usec); - TEST_ASSERT_EQUAL(1001, iface.slots[0].transfer_id); - TEST_ASSERT_EQUAL(1000, iface.slots[1].transfer_id); - TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); - // A0 - TEST_ASSERT_EQUAL(0, - rxIfaceAccept(&iface, - 2000000030, - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityHigh, - .src_node_id = 1111, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0x1111, - .transfer_id = 1000U}, - 0, - false, - "A0"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.ts_usec); - TEST_ASSERT_EQUAL(1001, iface.slots[0].transfer_id); - TEST_ASSERT_EQUAL(1000, iface.slots[1].transfer_id); - TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(3, mem_fragment.allocated_fragments); - // C0 - TEST_ASSERT_EQUAL(0, - rxIfaceAccept(&iface, - 2000000040, - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityHigh, - .src_node_id = 3333, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0x3333, - .transfer_id = 1002U}, - 0, - false, - "C0"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.ts_usec); - TEST_ASSERT_EQUAL(1002, iface.slots[0].transfer_id); // B evicted by C. - TEST_ASSERT_EQUAL(1000, iface.slots[1].transfer_id); - TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); // Payload of B is freed, so the usage is unchanged. - TEST_ASSERT_EQUAL(3, mem_fragment.allocated_fragments); - // B0 - TEST_ASSERT_EQUAL(0, // Cannot be accepted because its slot is taken over by C. - rxIfaceAccept(&iface, - 2000000050, - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPrioritySlow, - .src_node_id = 2222, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0x2222, - .transfer_id = 1001U}, - 0, - false, - "B0"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.ts_usec); - TEST_ASSERT_EQUAL(1002, iface.slots[0].transfer_id); - TEST_ASSERT_EQUAL(1000, iface.slots[1].transfer_id); - TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); // No increase, frame not accepted. - TEST_ASSERT_EQUAL(3, mem_fragment.allocated_fragments); - // A1 - TEST_ASSERT_EQUAL(1, - rxIfaceAccept(&iface, - 2000000050, - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityHigh, - .src_node_id = 1111, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0x1111, - .transfer_id = 1000U}, - 1, - false, - "A1"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - // TRANSFER A RECEIVED, check it. - TEST_ASSERT_EQUAL(2000000020, iface.ts_usec); // same old timestamp - TEST_ASSERT_EQUAL(1002, iface.slots[0].transfer_id); - TEST_ASSERT_EQUAL(1001, iface.slots[1].transfer_id); // Incremented to meet the next transfer. - // Check the payload. - TEST_ASSERT_EQUAL(2000000020, transfer.timestamp_usec); - TEST_ASSERT_EQUAL(UdpardPriorityHigh, transfer.priority); - TEST_ASSERT_EQUAL(1111, transfer.source_node_id); - TEST_ASSERT_EQUAL(1000, transfer.transfer_id); - TEST_ASSERT_EQUAL(6, transfer.payload_size); - TEST_ASSERT(compareStringWithPayload("A0", transfer.payload.view)); - TEST_ASSERT_NOT_NULL(transfer.payload.next); - TEST_ASSERT(compareStringWithPayload("A1", transfer.payload.next->view)); - TEST_ASSERT_NOT_NULL(transfer.payload.next->next); - TEST_ASSERT(compareStringWithPayload("A2", transfer.payload.next->next->view)); - TEST_ASSERT_NULL(transfer.payload.next->next->next); - udpardRxFragmentFree(transfer.payload, mem.fragment, mem.payload); - TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); // Some memory is retained for the C0 payload. - TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); - // C0 DUPLICATE - TEST_ASSERT_EQUAL(0, - rxIfaceAccept(&iface, - 2000000060, - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityHigh, - .src_node_id = 3333, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0x3333, - .transfer_id = 1002U}, - 0, - false, - "C0 DUPLICATE"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - TEST_ASSERT_EQUAL(2000000020, iface.ts_usec); // Last transfer timestamp. - TEST_ASSERT_EQUAL(1002, iface.slots[0].transfer_id); - TEST_ASSERT_EQUAL(1001, iface.slots[1].transfer_id); - TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); // Not accepted, so no change. - TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); - // C1 - TEST_ASSERT_EQUAL(1, - rxIfaceAccept(&iface, - 2000000070, - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityHigh, - .src_node_id = 3333, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0x3333, - .transfer_id = 1002U}, - 1, - true, - "C1" - "\xA8\xBF}\x19"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - // TRANSFER C RECEIVED, check it. - TEST_ASSERT_EQUAL(2000000040, iface.ts_usec); - TEST_ASSERT_EQUAL(1003, iface.slots[0].transfer_id); // Incremented to meet the next transfer. - TEST_ASSERT_EQUAL(1001, iface.slots[1].transfer_id); - TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); // Keeping two fragments of C. - TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); // Head optimization in effect. - // Check the payload. - TEST_ASSERT_EQUAL(2000000040, transfer.timestamp_usec); - TEST_ASSERT_EQUAL(UdpardPriorityHigh, transfer.priority); - TEST_ASSERT_EQUAL(3333, transfer.source_node_id); - TEST_ASSERT_EQUAL(1002, transfer.transfer_id); - TEST_ASSERT_EQUAL(4, transfer.payload_size); - TEST_ASSERT(compareStringWithPayload("C0", transfer.payload.view)); - TEST_ASSERT_NOT_NULL(transfer.payload.next); - TEST_ASSERT(compareStringWithPayload("C1", transfer.payload.next->view)); - TEST_ASSERT_NULL(transfer.payload.next->next); - udpardRxFragmentFree(transfer.payload, mem.fragment, mem.payload); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); // Some memory is retained for the C0 payload. - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); -} -static void testIfaceAcceptC(void) -{ - InstrumentedAllocator mem_fragment = {0}; - InstrumentedAllocator mem_payload = {0}; - instrumentedAllocatorNew(&mem_fragment); - instrumentedAllocatorNew(&mem_payload); - const RxMemory mem = makeRxMemory(&mem_fragment, &mem_payload); - RxIface iface; - rxIfaceInit(&iface, mem); - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.ts_usec); - for (size_t i = 0; i < RX_SLOT_COUNT; i++) + // Incomplete transfer on the extra port. { - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.slots[i].ts_usec); - TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, iface.slots[i].transfer_id); - TEST_ASSERT_EQUAL(0, iface.slots[i].max_index); - TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, iface.slots[i].eot_index); - TEST_ASSERT_EQUAL(0, iface.slots[i].accepted_frames); - TEST_ASSERT_EQUAL(0, iface.slots[i].payload_size); - TEST_ASSERT_NULL(iface.slots[i].fragments); + const char* payload = "FRAGMENTS"; + meta_t meta = { .priority = udpard_prio_fast, + .flag_reliable = false, + .transfer_payload_size = (uint32_t)strlen(payload), + .transfer_id = 20, + .sender_uid = 0xBBBBULL, + .topic_hash = topic_hash_extra }; + rx_frame_t* frame = make_frame_ptr(meta, mem_payload, payload, 0, 3); + byte_t dgram[HEADER_SIZE_BYTES + 3]; + header_serialize(dgram, meta, 0, 0, frame->base.crc); + memcpy(dgram + HEADER_SIZE_BYTES, payload, 3); + mem_free(mem_payload, frame->base.origin.size, frame->base.origin.data); + void* push_payload = mem_res_alloc(mem_payload, sizeof(dgram)); + memcpy(push_payload, dgram, sizeof(dgram)); + now += 1000; + TEST_ASSERT(udpard_rx_port_push(&rx, + &port_extra, + now, + (udpard_udpip_ep_t){ .ip = 0x0A000002, .port = 0x5678 }, + (udpard_bytes_mut_t){ .data = push_payload, .size = sizeof(dgram) }, + del_payload, + 1)); } - struct UdpardRxTransfer transfer = {0}; - // === TRANSFER === - // Send interleaving multi-frame transfers such that in the end slots have the same transfer-ID value - // (primes for duplicates): - // A0 B0 A1 C0 B1 C1 B1' - // The purpose of this test is to ensure that the case of multiple RX slots having the same transfer-ID is - // handled correctly (including correct duplicate detection). - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - // A0 - TEST_ASSERT_EQUAL(0, - rxIfaceAccept(&iface, - 2000000010, - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityOptional, - .src_node_id = 1111, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0, - .transfer_id = 0xA}, - 0, - false, - "A0"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.ts_usec); - TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, iface.slots[0].transfer_id); - TEST_ASSERT_EQUAL(0xA, iface.slots[1].transfer_id); - TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); - // B0 - TEST_ASSERT_EQUAL(0, - rxIfaceAccept(&iface, - 2000000020, - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityExceptional, - .src_node_id = 2222, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0, - .transfer_id = 0xB}, - 0, - false, - "B0"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.ts_usec); - TEST_ASSERT_EQUAL(0xB, iface.slots[0].transfer_id); - TEST_ASSERT_EQUAL(0xA, iface.slots[1].transfer_id); - TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); - // A1 - TEST_ASSERT_EQUAL(1, - rxIfaceAccept(&iface, - 2000000030, - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityOptional, - .src_node_id = 1111, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0, - .transfer_id = 0xA}, - 1, - true, - "A1" - "\xc7\xac_\x81"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - // Check the received transfer. - TEST_ASSERT_EQUAL(2000000010, iface.ts_usec); - TEST_ASSERT_EQUAL(0xB, iface.slots[0].transfer_id); - TEST_ASSERT_EQUAL(0xB, iface.slots[1].transfer_id); // SAME VALUE!!1 - TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); // Head optimization in effect. - TEST_ASSERT_EQUAL(UdpardPriorityOptional, transfer.priority); - TEST_ASSERT_EQUAL(4, transfer.payload_size); - TEST_ASSERT(compareStringWithPayload("A0", transfer.payload.view)); - TEST_ASSERT_NOT_NULL(transfer.payload.next); - TEST_ASSERT(compareStringWithPayload("A1", transfer.payload.next->view)); - TEST_ASSERT_NULL(transfer.payload.next->next); - udpardRxFragmentFree(transfer.payload, mem.fragment, mem.payload); - TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); // B0 still allocated. - TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); - // C0 - TEST_ASSERT_EQUAL(0, - rxIfaceAccept(&iface, - 2000000040, - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityExceptional, - .src_node_id = 3333, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0, - .transfer_id = 0xC}, - 0, - false, - "C0"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - TEST_ASSERT_EQUAL(2000000010, iface.ts_usec); // <- unchanged. - TEST_ASSERT_EQUAL(0xB, iface.slots[0].transfer_id); - TEST_ASSERT_EQUAL(0xC, iface.slots[1].transfer_id); // <- reused for C. - TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); // Two transfers in transit again: B and C. - TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); - // B1 - TEST_ASSERT_EQUAL(1, - rxIfaceAccept(&iface, - 2000000050, - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityExceptional, - .src_node_id = 2222, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0, - .transfer_id = 0xB}, - 1, - true, - "B1" - "g\x8D\x9A\xD7"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - // Check the received transfer. - TEST_ASSERT_EQUAL(2000000020, iface.ts_usec); - TEST_ASSERT_EQUAL(0xC, iface.slots[0].transfer_id); // <-- INCREMENTED, SO - TEST_ASSERT_EQUAL(0xC, iface.slots[1].transfer_id); // WE HAVE TWO IDENTICAL VALUES AGAIN! - TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(UdpardPriorityExceptional, transfer.priority); - TEST_ASSERT_EQUAL(4, transfer.payload_size); - TEST_ASSERT(compareStringWithPayload("B0", transfer.payload.view)); - TEST_ASSERT_NOT_NULL(transfer.payload.next); - TEST_ASSERT(compareStringWithPayload("B1", transfer.payload.next->view)); - TEST_ASSERT_NULL(transfer.payload.next->next); - udpardRxFragmentFree(transfer.payload, mem.fragment, mem.payload); - TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); // C0 is still allocated. - TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); - // C1 - // This is the DIFFICULT CASE because we have two RX slots with the same transfer-ID, but THE FIRST ONE IS NOT - // THE ONE THAT WE NEED! Watch what happens next. - TEST_ASSERT_EQUAL(1, - rxIfaceAccept(&iface, - 2000000060, - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityExceptional, - .src_node_id = 3333, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0, - .transfer_id = 0xC}, - 1, - true, - "C1" - "\xA8\xBF}\x19"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - // Check the received transfer. - TEST_ASSERT_EQUAL(2000000040, iface.ts_usec); - TEST_ASSERT_EQUAL(0xC, iface.slots[0].transfer_id); // Old, unused. - TEST_ASSERT_EQUAL(0xD, iface.slots[1].transfer_id); // INCREMENTED! - TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(UdpardPriorityExceptional, transfer.priority); - TEST_ASSERT_EQUAL(3333, transfer.source_node_id); - TEST_ASSERT_EQUAL(4, transfer.payload_size); - TEST_ASSERT(compareStringWithPayload("C0", transfer.payload.view)); - TEST_ASSERT_NOT_NULL(transfer.payload.next); - TEST_ASSERT(compareStringWithPayload("C1", transfer.payload.next->view)); - TEST_ASSERT_NULL(transfer.payload.next->next); - udpardRxFragmentFree(transfer.payload, mem.fragment, mem.payload); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - // B0 duplicate multi-frame; shall be rejected. - TEST_ASSERT_EQUAL(0, - rxIfaceAccept(&iface, - 2000000070, - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityExceptional, - .src_node_id = 2222, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0, - .transfer_id = 0xB}, - 0, - false, - "B0"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - // B0 duplicate single-frame; shall be rejected. - TEST_ASSERT_EQUAL(0, - rxIfaceAccept(&iface, - 2000000080, - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityExceptional, - .src_node_id = 2222, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0, - .transfer_id = 0xB}, - 0, - true, - "B0" - "g\x8D\x9A\xD7"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + + TEST_ASSERT(alloc_session.allocated_fragments >= 2); + TEST_ASSERT(alloc_frag.allocated_fragments >= 2); + udpard_rx_port_free(&rx, &port_p2p); + udpard_rx_port_free(&rx, &port_extra); + TEST_ASSERT_EQUAL_size_t(0, alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); + + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_session); + instrumented_allocator_reset(&alloc_payload); } -// -------------------------------------------------- SESSION -------------------------------------------------- +static size_t g_collision_count = 0; // NOLINT(*-avoid-non-const-global-variables) -static void testSessionDeduplicate(void) +static void stub_on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) { - InstrumentedAllocator mem_fragment = {0}; - InstrumentedAllocator mem_payload = {0}; - instrumentedAllocatorNew(&mem_fragment); - instrumentedAllocatorNew(&mem_payload); - const RxMemory mem = makeRxMemory(&mem_fragment, &mem_payload); - struct UdpardInternalRxSession session = {0}; - rxSessionInit(&session, mem); - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, session.last_ts_usec); - TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, session.last_transfer_id); - { - struct UdpardFragment* const head = &makeRxFragmentString(mem, 0, "ABC", NULL)->base; - head->next = &makeRxFragmentString(mem, 1, "DEF", NULL)->base; - struct UdpardRxTransfer transfer = {.timestamp_usec = 10000000, - .transfer_id = 0x0DDC0FFEEBADF00D, - .payload_size = 6, - .payload = *head}; - memFree(mem.fragment, sizeof(RxFragment), head); // Cloned, no longer needed. - TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); - // The first transfer after initialization is always accepted. - TEST_ASSERT(rxSessionDeduplicate(&session, UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, &transfer, mem)); - // Check the final states. - TEST_ASSERT_EQUAL(6, transfer.payload_size); - TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); // The application shall free the payload. - TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(10000000, session.last_ts_usec); - TEST_ASSERT_EQUAL(0x0DDC0FFEEBADF00D, session.last_transfer_id); - // Feed the same transfer again; now it is a duplicate and so it is rejected and freed. - transfer.timestamp_usec = 10000001; - TEST_ASSERT_FALSE(rxSessionDeduplicate(&session, UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, &transfer, mem)); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(10000000, session.last_ts_usec); // Timestamp is not updated. - TEST_ASSERT_EQUAL(0x0DDC0FFEEBADF00D, session.last_transfer_id); - } - { - // Emit a duplicate but after the transfer-ID timeout has occurred. Ensure it is accepted. - struct UdpardFragment* const head = &makeRxFragmentString(mem, 0, "ABC", NULL)->base; - head->next = &makeRxFragmentString(mem, 1, "DEF", NULL)->base; - struct UdpardRxTransfer transfer = {.timestamp_usec = 12000000, // TID timeout. - .transfer_id = 0x0DDC0FFEEBADF000, // transfer-ID reduced. - .payload_size = 6, - .payload = *head}; - memFree(mem.fragment, sizeof(RxFragment), head); // Cloned, no longer needed. - TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); - // Accepted due to the TID timeout. - TEST_ASSERT(rxSessionDeduplicate(&session, UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, &transfer, mem)); - // Check the final states. - TEST_ASSERT_EQUAL(6, transfer.payload_size); - TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); // The application shall free the payload. - TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(12000000, session.last_ts_usec); - TEST_ASSERT_EQUAL(0x0DDC0FFEEBADF000, session.last_transfer_id); - // Feed the same transfer again; now it is a duplicate and so it is rejected and freed. - transfer.timestamp_usec = 12000001; - TEST_ASSERT_FALSE(rxSessionDeduplicate(&session, UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, &transfer, mem)); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(12000000, session.last_ts_usec); // Timestamp is not updated. - TEST_ASSERT_EQUAL(0x0DDC0FFEEBADF000, session.last_transfer_id); - } - { - // Ensure another transfer with a greater transfer-ID is accepted immediately. - struct UdpardFragment* const head = &makeRxFragmentString(mem, 0, "ABC", NULL)->base; - head->next = &makeRxFragmentString(mem, 1, "DEF", NULL)->base; - struct UdpardRxTransfer transfer = {.timestamp_usec = 11000000, // Simulate clock jitter. - .transfer_id = 0x0DDC0FFEEBADF001, // Incremented. - .payload_size = 6, - .payload = *head}; - memFree(mem.fragment, sizeof(RxFragment), head); // Cloned, no longer needed. - TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); - // Accepted because TID greater. - TEST_ASSERT(rxSessionDeduplicate(&session, UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, &transfer, mem)); - // Check the final states. - TEST_ASSERT_EQUAL(6, transfer.payload_size); - TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); // The application shall free the payload. - TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(11000000, session.last_ts_usec); // Updated. - TEST_ASSERT_EQUAL(0x0DDC0FFEEBADF001, session.last_transfer_id); // Updated. - // Feed the same transfer again; now it is a duplicate and so it is rejected and freed. - transfer.timestamp_usec = 11000000; - TEST_ASSERT_FALSE(rxSessionDeduplicate(&session, UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, &transfer, mem)); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(11000000, session.last_ts_usec); // Timestamp is not updated. - TEST_ASSERT_EQUAL(0x0DDC0FFEEBADF001, session.last_transfer_id); - } + (void)rx; + udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); } -static void testSessionAcceptA(void) +static void stub_on_collision(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_remote_t remote) { - InstrumentedAllocator mem_fragment = {0}; - InstrumentedAllocator mem_payload = {0}; - instrumentedAllocatorNew(&mem_fragment); - instrumentedAllocatorNew(&mem_payload); - const RxMemory mem = makeRxMemory(&mem_fragment, &mem_payload); - struct UdpardInternalRxSession session = {0}; - rxSessionInit(&session, mem); - TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, session.last_ts_usec); - TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, session.last_transfer_id); - struct UdpardRxTransfer transfer = {0}; - // Accept a simple transfer through iface #1. - TEST_ASSERT_EQUAL(1, - rxSessionAccept(&session, - 1, - 10000000, - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityExceptional, - .src_node_id = 2222, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0, - .transfer_id = 0xB}, - 0, - true, - "Z\xBA\xA1\xBAh"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - // Free the payload. - udpardRxFragmentFree(transfer.payload, mem.fragment, mem.payload); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - // Send the same transfer again through a different iface; it is a duplicate and so it is rejected and freed. - TEST_ASSERT_EQUAL(0, - rxSessionAccept(&session, - 0, - 10000010, - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityExceptional, - .src_node_id = 2222, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0, - .transfer_id = 0xB}, - 0, - true, - "Z\xBA\xA1\xBAh"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - // Send a valid transfer that should be accepted but we inject an OOM error. - mem_fragment.limit_fragments = 0; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_MEMORY, - rxSessionAccept(&session, - 2, - 12000020, - makeRxFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityExceptional, - .src_node_id = 2222, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0, - .transfer_id = 0xC}, - 0, - true, - "Z\xBA\xA1\xBAh"), - 1000, - UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, - mem, - &transfer)); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + (void)rx; + (void)port; + (void)remote; + g_collision_count++; } -// -------------------------------------------------- PORT -------------------------------------------------- +static udpard_udpip_ep_t make_ep(const uint32_t ip) { return (udpard_udpip_ep_t){ .ip = ip, .port = 1U }; } -static inline void testPortAcceptFrameA(void) +static void test_rx_additional_coverage(void) { - InstrumentedAllocator mem_session = {0}; - InstrumentedAllocator mem_fragment = {0}; - InstrumentedAllocator mem_payload = {0}; - instrumentedAllocatorNew(&mem_session); - instrumentedAllocatorNew(&mem_fragment); - instrumentedAllocatorNew(&mem_payload); - const struct UdpardRxMemoryResources mem = {.session = instrumentedAllocatorMakeMemoryResource(&mem_session), // - .fragment = instrumentedAllocatorMakeMemoryResource(&mem_fragment), - .payload = instrumentedAllocatorMakeMemoryDeleter(&mem_payload)}; - struct UdpardRxTransfer transfer = {0}; - // Initialize the port. - struct UdpardRxPort port; - rxPortInit(&port); - TEST_ASSERT_EQUAL(SIZE_MAX, port.extent); - TEST_ASSERT_EQUAL(UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, port.transfer_id_timeout_usec); - TEST_ASSERT_NULL(port.sessions); - - // Accept valid non-anonymous transfer. - TEST_ASSERT_EQUAL( - 1, - rxPortAcceptFrame(&port, - 1, - 10000000, - makeDatagramPayloadSingleFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityImmediate, - .src_node_id = 2222, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0, - .transfer_id = 0xB}, - "When will the collapse of space in the vicinity of the " - "Solar System into two dimensions cease?"), - mem, - &transfer)); - TEST_ASSERT_EQUAL(1, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); // Head optimization in effect. - TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); - // Check the received transfer. - TEST_ASSERT_EQUAL(10000000, transfer.timestamp_usec); - TEST_ASSERT_EQUAL(UdpardPriorityImmediate, transfer.priority); - TEST_ASSERT_EQUAL(2222, transfer.source_node_id); - TEST_ASSERT_EQUAL(0xB, transfer.transfer_id); - TEST_ASSERT_EQUAL(94, transfer.payload_size); - TEST_ASSERT(compareStringWithPayload("When will the collapse of space in the vicinity of the " - "Solar System into two dimensions cease?", - transfer.payload.view)); - // Free the memory. - udpardRxFragmentFree(transfer.payload, mem.fragment, mem.payload); - TEST_ASSERT_EQUAL(1, mem_session.allocated_fragments); // The session remains. - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - - // Send another transfer from another node and see the session count increase. - TEST_ASSERT_EQUAL( - 1, - rxPortAcceptFrame(&port, - 0, - 10000010, - makeDatagramPayloadSingleFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityImmediate, - .src_node_id = 3333, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0, - .transfer_id = 0xC}, - "It will never cease."), - mem, - &transfer)); - TEST_ASSERT_EQUAL(2, mem_session.allocated_fragments); // New session created. - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); // Head optimization in effect. - TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); - // Check the received transfer. - TEST_ASSERT_EQUAL(10000010, transfer.timestamp_usec); - TEST_ASSERT_EQUAL(UdpardPriorityImmediate, transfer.priority); - TEST_ASSERT_EQUAL(3333, transfer.source_node_id); - TEST_ASSERT_EQUAL(0xC, transfer.transfer_id); - TEST_ASSERT_EQUAL(20, transfer.payload_size); - TEST_ASSERT(compareStringWithPayload("It will never cease.", transfer.payload.view)); - // Free the memory. - udpardRxFragmentFree(transfer.payload, mem.fragment, mem.payload); - TEST_ASSERT_EQUAL(2, mem_session.allocated_fragments); // The sessions remain. - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - - // Try sending another frame with no memory left and see it fail during session allocation. - mem_session.limit_fragments = 0; - TEST_ASSERT_EQUAL( - -UDPARD_ERROR_MEMORY, - rxPortAcceptFrame(&port, - 2, - 10000020, - makeDatagramPayloadSingleFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityImmediate, - .src_node_id = 4444, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0, - .transfer_id = 0xD}, - "Cheng Xin shuddered."), - mem, - &transfer)); - TEST_ASSERT_EQUAL(2, mem_session.allocated_fragments); // Not increased. - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); // Not accepted. - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); // Buffer freed. - - // Anonymous transfers are stateless and do not require session allocation. - mem_session.limit_fragments = 0; - TEST_ASSERT_EQUAL( - 1, - rxPortAcceptFrame(&port, - 2, - 10000030, - makeDatagramPayloadSingleFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityImmediate, - .src_node_id = UDPARD_NODE_ID_UNSET, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0, - .transfer_id = 0xD}, - "Cheng Xin shuddered."), - mem, - &transfer)); - TEST_ASSERT_EQUAL(2, mem_session.allocated_fragments); // Not increased. - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); // Head optimization in effect. - TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); // Frame passed to the application. - // Check the received transfer. - TEST_ASSERT_EQUAL(10000030, transfer.timestamp_usec); - TEST_ASSERT_EQUAL(UdpardPriorityImmediate, transfer.priority); - TEST_ASSERT_EQUAL(UDPARD_NODE_ID_UNSET, transfer.source_node_id); - TEST_ASSERT_EQUAL(0xD, transfer.transfer_id); - TEST_ASSERT_EQUAL(20, transfer.payload_size); - TEST_ASSERT(compareStringWithPayload("Cheng Xin shuddered.", transfer.payload.view)); - // Free the memory. - udpardRxFragmentFree(transfer.payload, mem.fragment, mem.payload); - TEST_ASSERT_EQUAL(2, mem_session.allocated_fragments); // The sessions remain. - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - - // Send invalid anonymous transfers and see them fail. - { // Bad CRC. - struct UdpardMutablePayload datagram = - makeDatagramPayloadSingleFrameString(&mem_payload, // - (TransferMetadata) {.priority = UdpardPriorityImmediate, - .src_node_id = UDPARD_NODE_ID_UNSET, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0, - .transfer_id = 0xE}, - "You are scared? Do you think that in this galaxy, in this universe, " - "only the Solar System is collapsing into two dimensions? Haha..."); - *(((byte_t*) datagram.data) + HEADER_SIZE_BYTES) = 0x00; // Corrupt the payload, CRC invalid. - TEST_ASSERT_EQUAL(0, rxPortAcceptFrame(&port, 0, 10000040, datagram, mem, &transfer)); - TEST_ASSERT_EQUAL(2, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + instrumented_allocator_t alloc_frag = { 0 }; + instrumented_allocator_t alloc_ses = { 0 }; + instrumented_allocator_new(&alloc_frag); + instrumented_allocator_new(&alloc_ses); + const udpard_rx_mem_resources_t mem = { .session = instrumented_allocator_make_resource(&alloc_ses), + .fragment = instrumented_allocator_make_resource(&alloc_frag) }; + // Memory validation rejects missing hooks. + const udpard_mem_vtable_t vtable_no_free = { .base = { .free = NULL }, .alloc = dummy_alloc }; + const udpard_mem_vtable_t vtable_no_alloc = { .base = { .free = dummy_free }, .alloc = NULL }; + udpard_rx_mem_resources_t bad_mem = mem; + bad_mem.session.vtable = &vtable_no_free; + TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_mem)); + bad_mem.session.vtable = &vtable_no_alloc; + TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_mem)); + bad_mem = mem; + bad_mem.fragment.vtable = &vtable_no_free; + TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_mem)); + bad_mem.fragment.vtable = &vtable_no_alloc; + TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_mem)); + + // Session helpers and free paths. + udpard_rx_port_t port = { .memory = mem, + .vtable = &(udpard_rx_port_vtable_t){ .on_message = stub_on_message, + .on_collision = stub_on_collision }, + .mode = udpard_rx_ordered, + .reordering_window = 10, + .topic_hash = 1 }; + rx_session_t* ses = mem_res_alloc(mem.session, sizeof(rx_session_t)); + TEST_ASSERT_NOT_NULL(ses); + mem_zero(sizeof(*ses), ses); + ses->port = &port; + ses->remote.uid = 77; + ses->slots[0].state = rx_slot_done; + ses->slots[0].transfer_id = 5; + TEST_ASSERT_TRUE(rx_session_is_transfer_interned(ses, 5)); + ses->reordering_window_deadline = 5; + // Comparator smoke-test with stable key. + const rx_reordering_key_t dl_key = { .deadline = 5, .remote_uid = ses->remote.uid }; + (void)cavl_compare_rx_session_by_reordering_deadline(&dl_key, &ses->index_reordering_window); + // Comparator branches for UID and deadline ordering. + TEST_ASSERT_EQUAL(-1, cavl_compare_rx_session_by_remote_uid(&(uint64_t){ 10 }, &ses->index_remote_uid)); + TEST_ASSERT_EQUAL(1, cavl_compare_rx_session_by_remote_uid(&(uint64_t){ 100 }, &ses->index_remote_uid)); + rx_reordering_key_t dl_key_hi = { .deadline = 10, .remote_uid = ses->remote.uid + 1U }; + TEST_ASSERT_EQUAL(1, cavl_compare_rx_session_by_reordering_deadline(&dl_key_hi, &ses->index_reordering_window)); + rx_reordering_key_t dl_key_lo = { .deadline = 1, .remote_uid = ses->remote.uid - 1U }; + TEST_ASSERT_EQUAL(-1, cavl_compare_rx_session_by_reordering_deadline(&dl_key_lo, &ses->index_reordering_window)); + rx_reordering_key_t dl_key_uid_lo = { .deadline = 5, .remote_uid = ses->remote.uid - 1U }; + TEST_ASSERT_EQUAL(-1, + cavl_compare_rx_session_by_reordering_deadline(&dl_key_uid_lo, &ses->index_reordering_window)); + rx_reordering_key_t dl_key_uid_hi = { .deadline = 5, .remote_uid = ses->remote.uid + 1U }; + TEST_ASSERT_EQUAL(1, cavl_compare_rx_session_by_reordering_deadline(&dl_key_uid_hi, &ses->index_reordering_window)); + udpard_list_t anim_list = { 0 }; + udpard_tree_t* by_reorder = NULL; + cavl2_find_or_insert(&port.index_session_by_remote_uid, + &ses->remote.uid, + cavl_compare_rx_session_by_remote_uid, + &ses->index_remote_uid, + cavl2_trivial_factory); + ses->reordering_window_deadline = 3; + const rx_reordering_key_t reorder_key = { .deadline = ses->reordering_window_deadline, + .remote_uid = ses->remote.uid }; + const udpard_tree_t* const tree_reorder = cavl2_find_or_insert(&by_reorder, + &reorder_key, + cavl_compare_rx_session_by_reordering_deadline, + &ses->index_reordering_window, + cavl2_trivial_factory); + TEST_ASSERT_EQUAL_PTR(&ses->index_reordering_window, tree_reorder); + enlist_head(&anim_list, &ses->list_by_animation); + rx_session_free(ses, &anim_list, &by_reorder); + + // Ordered scan cleans late busy slots. + rx_session_t ses_busy; + mem_zero(sizeof(ses_busy), &ses_busy); + ses_busy.port = &port; + ses_busy.history[0] = 10; + ses_busy.slots[0].state = rx_slot_busy; + ses_busy.slots[0].transfer_id = 10; + ses_busy.slots[0].ts_min = 0; + ses_busy.slots[0].ts_max = 0; + udpard_rx_t rx = { 0 }; + rx_session_ordered_scan_slots(&ses_busy, &rx, 10, false); + + // Ordered scan resets late busy slots. + rx_session_t ses_late; + mem_zero(sizeof(ses_late), &ses_late); + ses_late.port = &port; + ses_late.history[0] = 42; + ses_late.slots[0].state = rx_slot_busy; + ses_late.slots[0].transfer_id = 42; + rx_session_ordered_scan_slots(&ses_late, &rx, 10, false); + TEST_ASSERT_EQUAL(rx_slot_idle, ses_late.slots[0].state); + + // Forced scan ejects a done slot. + rx_session_t ses_force; + mem_zero(sizeof(ses_force), &ses_force); + ses_force.port = &port; + ses_force.history[0] = 1; + ses_force.slots[0].state = rx_slot_done; + ses_force.slots[0].transfer_id = 100; + rx_session_ordered_scan_slots(&ses_force, &rx, 0, true); + TEST_ASSERT_EQUAL(rx_slot_idle, ses_force.slots[0].state); + + // Slot acquisition covers stale busy, busy eviction, and done eviction. + rx_session_t ses_slots; + mem_zero(sizeof(ses_slots), &ses_slots); + ses_slots.port = &port; + ses_slots.history_current = 0; + for (size_t i = 0; i < RX_TRANSFER_HISTORY_COUNT; i++) { + ses_slots.history[i] = 1; } - { // No payload (transfer CRC is always required). - byte_t* const payload = instrumentedAllocatorAllocate(&mem_payload, HEADER_SIZE_BYTES); - (void) txSerializeHeader(payload, - (TransferMetadata) {.priority = UdpardPriorityImmediate, - .src_node_id = UDPARD_NODE_ID_UNSET, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0, - .transfer_id = 0xE}, - 0, - true); - TEST_ASSERT_EQUAL(0, - rxPortAcceptFrame(&port, - 0, - 10000050, - (struct UdpardMutablePayload) {.size = HEADER_SIZE_BYTES, .data = payload}, - mem, - &transfer)); - TEST_ASSERT_EQUAL(2, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + ses_slots.slots[0].state = rx_slot_busy; + ses_slots.slots[0].ts_max = 0; + ses_slots.slots[0].transfer_id = 1; + rx_slot_t* slot = rx_session_get_slot(&ses_slots, &rx, SESSION_LIFETIME + 1, 99); + TEST_ASSERT_NOT_NULL(slot); + for (size_t i = 0; i < RX_SLOT_COUNT; i++) { + ses_slots.slots[i].state = (i == 0) ? rx_slot_busy : rx_slot_done; + ses_slots.slots[i].ts_max = 10 + (udpard_us_t)i; } - - // Send an invalid frame and make sure the memory is freed. - TEST_ASSERT_EQUAL(0, - rxPortAcceptFrame(&port, - 0, - 10000060, - (struct - UdpardMutablePayload) {.size = HEADER_SIZE_BYTES, - .data = - instrumentedAllocatorAllocate(&mem_payload, - HEADER_SIZE_BYTES)}, - mem, - &transfer)); - TEST_ASSERT_EQUAL(2, mem_session.allocated_fragments); // Not increased. - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); // Not accepted. - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); // Buffer freed. - - // Send incomplete transfers to see them cleaned up upon destruction. - mem_session.limit_fragments = SIZE_MAX; - TEST_ASSERT_EQUAL(0, - rxPortAcceptFrame(&port, - 0, - 10000070, - makeDatagramPayloadString(&mem_payload, // - (TransferMetadata) { - .priority = UdpardPriorityImmediate, - .src_node_id = 10000, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0, - .transfer_id = 0xD, - }, - 100, - false, - "What you're saying makes no sense. " - "At least, it doesn't make sense to lower spatial " - "dimensions as a weapon. "), - mem, - &transfer)); - TEST_ASSERT_EQUAL(3, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, - rxPortAcceptFrame(&port, - 0, - 10000080, - makeDatagramPayloadString(&mem_payload, // - (TransferMetadata) { - .priority = UdpardPriorityImmediate, - .src_node_id = 10000, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0, - .transfer_id = 0xD, - }, - 101, - false, - "In the long run, that's the sort of attack that " - "would kill the attacker as well as the target. " - "Eventually, the side that initiated attack would " - "also see their own space fall into the " - "two-dimensional abyss they created."), - mem, - &transfer)); - TEST_ASSERT_EQUAL(3, mem_session.allocated_fragments); // Same session because it comes from the same source. - TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, - rxPortAcceptFrame(&port, - 2, - 10000090, - makeDatagramPayloadString(&mem_payload, // - (TransferMetadata) { - .priority = UdpardPriorityImmediate, - .src_node_id = 10001, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 0, - .transfer_id = 0xD, - }, - 10, - true, - "You're too... kind-hearted."), - mem, - &transfer)); - TEST_ASSERT_EQUAL(4, mem_session.allocated_fragments); // New source. - TEST_ASSERT_EQUAL(3, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(4 * sizeof(struct UdpardInternalRxSession), mem_session.allocated_bytes); - TEST_ASSERT_EQUAL(3 * sizeof(RxFragment), mem_fragment.allocated_bytes); - - // Free the port instance and ensure all ifaces and sessions are cleaned up. - rxPortFree(&port, mem); - TEST_ASSERT_EQUAL(0, mem_session.allocated_fragments); // All gone. - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + slot = rx_session_get_slot(&ses_slots, &rx, 50, 2); + TEST_ASSERT_NOT_NULL(slot); + for (size_t i = 0; i < RX_SLOT_COUNT; i++) { + ses_slots.slots[i].state = rx_slot_done; + ses_slots.slots[i].transfer_id = i + 1U; + ses_slots.slots[i].ts_min = (udpard_us_t)i; + ses_slots.slots[i].ts_max = (udpard_us_t)i; + } + port.vtable = &(udpard_rx_port_vtable_t){ .on_message = stub_on_message, .on_collision = stub_on_collision }; + slot = rx_session_get_slot(&ses_slots, &rx, 60, 3); + TEST_ASSERT_NOT_NULL(slot); + + // Ordered update retransmits ACK for ejected transfers. + rx_session_t ses_ack; + mem_zero(sizeof(ses_ack), &ses_ack); + ses_ack.port = &port; + ses_ack.remote.uid = 55; + ses_ack.history[0] = 7; + ses_ack.initialized = true; + rx_frame_t ack_frame; + mem_zero(sizeof(ack_frame), &ack_frame); + void* ack_buf = mem_res_alloc(mem.fragment, ACK_SIZE_BYTES); + TEST_ASSERT_NOT_NULL(ack_buf); + memset(ack_buf, 0, ACK_SIZE_BYTES); + ack_frame.base.payload = (udpard_bytes_t){ .data = ack_buf, .size = ACK_SIZE_BYTES }; + ack_frame.base.origin = (udpard_bytes_mut_t){ .data = ack_buf, .size = ACK_SIZE_BYTES }; + ack_frame.base.offset = 0; + ack_frame.meta.priority = udpard_prio_nominal; + ack_frame.meta.flag_reliable = true; + ack_frame.meta.transfer_payload_size = ACK_SIZE_BYTES; + ack_frame.meta.transfer_id = 7; + ack_frame.meta.sender_uid = ses_ack.remote.uid; + ack_frame.meta.topic_hash = port.topic_hash; + rx.errors_ack_tx = 0; + rx.tx = NULL; + rx_session_update_ordered(&ses_ack, &rx, 0, &ack_frame, instrumented_allocator_make_deleter(&alloc_frag)); + TEST_ASSERT_EQUAL_UINT64(1U, rx.errors_ack_tx); + + // Stateless accept success, OOM, malformed. + g_collision_count = 0; + port.vtable = &(udpard_rx_port_vtable_t){ .on_message = stub_on_message, .on_collision = stub_on_collision }; + port.extent = 8; + port.mode = udpard_rx_stateless; + port.reordering_window = 0; + rx_frame_t frame; + byte_t payload[4] = { 1, 2, 3, 4 }; + mem_zero(sizeof(frame), &frame); + void* payload_buf = mem_res_alloc(mem.fragment, sizeof(payload)); + memcpy(payload_buf, payload, sizeof(payload)); + frame.base.payload = (udpard_bytes_t){ .data = payload_buf, .size = sizeof(payload) }; + frame.base.origin = (udpard_bytes_mut_t){ .data = payload_buf, .size = sizeof(payload) }; + frame.base.crc = crc_full(frame.base.payload.size, frame.base.payload.data); + frame.meta.transfer_payload_size = (uint32_t)frame.base.payload.size; + frame.meta.sender_uid = 9; + frame.meta.transfer_id = 11; + rx_port_accept_stateless(&rx, &port, 0, make_ep(1), &frame, instrumented_allocator_make_deleter(&alloc_frag), 0); + alloc_frag.limit_fragments = 0; + frame.base.payload.data = payload; + frame.base.payload.size = sizeof(payload); + frame.base.origin = (udpard_bytes_mut_t){ 0 }; + frame.base.crc = crc_full(frame.base.payload.size, frame.base.payload.data); + rx_port_accept_stateless(&rx, &port, 0, make_ep(1), &frame, instrumented_allocator_make_deleter(&alloc_frag), 0); + frame.base.payload.size = 0; + frame.meta.transfer_payload_size = 8; + rx_port_accept_stateless(&rx, &port, 0, make_ep(1), &frame, instrumented_allocator_make_deleter(&alloc_frag), 0); + // Stateless accept rejects nonzero offsets. + alloc_frag.limit_fragments = SIZE_MAX; + void* payload_buf2 = mem_res_alloc(mem.fragment, sizeof(payload)); + TEST_ASSERT_NOT_NULL(payload_buf2); + memcpy(payload_buf2, payload, sizeof(payload)); + frame.base.payload = (udpard_bytes_t){ .data = payload_buf2, .size = sizeof(payload) }; + frame.base.origin = (udpard_bytes_mut_t){ .data = payload_buf2, .size = sizeof(payload) }; + frame.base.offset = 1U; + frame.meta.transfer_payload_size = (uint32_t)sizeof(payload); + rx_port_accept_stateless(&rx, &port, 0, make_ep(1), &frame, instrumented_allocator_make_deleter(&alloc_frag), 0); + frame.base.offset = 0; + udpard_rx_port_t port_stateless_new = { 0 }; + TEST_ASSERT_TRUE(udpard_rx_port_new(&port_stateless_new, 22, 8, udpard_rx_stateless, 0, mem, port.vtable)); + TEST_ASSERT_NOT_NULL(port_stateless_new.vtable_private); + udpard_rx_port_free(&rx, &port_stateless_new); + instrumented_allocator_reset(&alloc_frag); + + // Port push collision and malformed header. + udpard_rx_port_t port_normal = { 0 }; + TEST_ASSERT_TRUE(udpard_rx_port_new(&port_normal, 1, 8, udpard_rx_ordered, 10, mem, port.vtable)); + udpard_bytes_mut_t bad_payload = { .data = mem_res_alloc(mem.fragment, 4), .size = 4 }; + TEST_ASSERT(udpard_rx_port_push( + &rx, &port_normal, 0, make_ep(2), bad_payload, instrumented_allocator_make_deleter(&alloc_frag), 0)); + byte_t good_dgram[HEADER_SIZE_BYTES + 1] = { 0 }; + meta_t meta = { .priority = udpard_prio_nominal, + .flag_reliable = false, + .transfer_payload_size = 1, + .transfer_id = 1, + .sender_uid = 2, + .topic_hash = 99 }; + good_dgram[HEADER_SIZE_BYTES] = 0xAA; + header_serialize(good_dgram, meta, 0, 0, crc_full(1, &good_dgram[HEADER_SIZE_BYTES])); + udpard_bytes_mut_t good_payload = { .data = mem_res_alloc(mem.fragment, sizeof(good_dgram)), + .size = sizeof(good_dgram) }; + memcpy(good_payload.data, good_dgram, sizeof(good_dgram)); + TEST_ASSERT(udpard_rx_port_push( + &rx, &port_normal, 0, make_ep(3), good_payload, instrumented_allocator_make_deleter(&alloc_frag), 1)); + TEST_ASSERT_GREATER_THAN_UINT64(0, g_collision_count); + udpard_rx_port_free(&rx, &port_normal); + // Short ACK messages are ignored. + rx.errors_ack_tx = 0; + rx_accept_ack(&rx, (udpard_bytes_t){ .data = payload, .size = 1U }); + TEST_ASSERT_EQUAL_UINT64(0, rx.errors_ack_tx); + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_ses); } -// --------------------------------------------------------------------------------------------------------------------- - void setUp(void) {} void tearDown(void) {} @@ -2392,41 +3025,28 @@ void tearDown(void) {} int main(void) { UNITY_BEGIN(); - // misc - RUN_TEST(testCompare32); - // frame parser - RUN_TEST(testParseFrameValidMessage); - RUN_TEST(testParseFrameValidRPCService); - RUN_TEST(testParseFrameValidMessageAnonymous); - RUN_TEST(testParseFrameRPCServiceAnonymous); - RUN_TEST(testParseFrameRPCServiceBroadcast); - RUN_TEST(testParseFrameAnonymousNonSingleFrame); - RUN_TEST(testParseFrameBadHeaderCRC); - RUN_TEST(testParseFrameUnknownHeaderVersion); - RUN_TEST(testParseFrameHeaderWithoutPayload); - RUN_TEST(testParseFrameEmpty); - RUN_TEST(testParseFrameInvalidTransferID); - // slot - RUN_TEST(testSlotRestartEmpty); - RUN_TEST(testSlotRestartNonEmpty); - RUN_TEST(testSlotEjectValidLarge); - RUN_TEST(testSlotEjectValidSmall); - RUN_TEST(testSlotEjectValidEmpty); - RUN_TEST(testSlotEjectInvalid); - RUN_TEST(testSlotAcceptA); - // iface - RUN_TEST(testIfaceIsFutureTransferID); - RUN_TEST(testIfaceCheckTransferIDTimeout); - RUN_TEST(testIfaceFindMatchingSlot); - RUN_TEST(testIfaceAcceptA); - RUN_TEST(testIfaceAcceptB); - RUN_TEST(testIfaceAcceptC); - // session - RUN_TEST(testSessionDeduplicate); - RUN_TEST(testSessionAcceptA); - // port - RUN_TEST(testPortAcceptFrameA); + + RUN_TEST(test_rx_fragment_tree_update_a); + RUN_TEST(test_rx_fragment_tree_update_exhaustive); + RUN_TEST(test_rx_fragment_tree_oom); + + RUN_TEST(test_rx_slot_update); + + RUN_TEST(test_rx_transfer_id_forward_distance); + RUN_TEST(test_rx_ack_enqueued); + + RUN_TEST(test_rx_session_ordered); + RUN_TEST(test_rx_session_unordered); + RUN_TEST(test_rx_session_unordered_reject_old); + RUN_TEST(test_rx_session_ordered_reject_stale_after_jump); + RUN_TEST(test_rx_session_unordered_duplicates); + RUN_TEST(test_rx_session_ordered_zero_reordering_window); + + RUN_TEST(test_rx_port); + RUN_TEST(test_rx_port_timeouts); + RUN_TEST(test_rx_port_oom); + RUN_TEST(test_rx_port_free_loop); + RUN_TEST(test_rx_additional_coverage); + return UNITY_END(); } - -// NOLINTEND(clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling) diff --git a/tests/src/test_intrusive_tx.c b/tests/src/test_intrusive_tx.c index 8ab1ad0..c513de2 100644 --- a/tests/src/test_intrusive_tx.c +++ b/tests/src/test_intrusive_tx.c @@ -3,1055 +3,1202 @@ /// Copyright Amazon.com Inc. or its affiliates. /// SPDX-License-Identifier: MIT -#include // NOLINT(bugprone-suspicious-include) +#include // NOLINT(bugprone-suspicious-include) #include "helpers.h" #include -// >>> from pycyphal.transport.commons.crc import CRC32C -// >>> list(CRC32C.new(data).value_as_bytes) -static const char EtherealStrength[] = - "All was silent except for the howl of the wind against the antenna. Ye watched as the remaining birds in the " - "flock gradually settled back into the forest. She stared at the antenna and thought it looked like an enormous " - "hand stretched open toward the sky, possessing an ethereal strength."; -static const size_t EtherealStrengthSize = sizeof(EtherealStrength) - 1; -static const byte_t EtherealStrengthCRC[4] = {209, 88, 130, 43}; - -static const char DetailOfTheCosmos[] = - "For us, the dark forest state is all-important, but it's just a detail of the cosmos."; -static const size_t DetailOfTheCosmosSize = sizeof(DetailOfTheCosmos) - 1; -static const byte_t DetailOfTheCosmosCRC[4] = {125, 113, 207, 171}; - -static const char InterstellarWar[] = "You have not seen what a true interstellar war is like."; -static const size_t InterstellarWarSize = sizeof(InterstellarWar) - 1; -static const byte_t InterstellarWarCRC[4] = {102, 217, 109, 188}; - -// These aliases cannot be defined in the public API section: https://github.com/OpenCyphal-Garage/libudpard/issues/36 -typedef struct UdpardPayload UdpardPayload; -typedef struct UdpardUDPIPEndpoint UdpardUDPIPEndpoint; -typedef struct UdpardTx UdpardTx; -typedef struct UdpardTxItem UdpardTxItem; +typedef struct +{ + size_t count; + bool allow; +} eject_state_t; + +typedef struct +{ + size_t count; + udpard_tx_feedback_t last; +} feedback_state_t; typedef struct { - byte_t data[HEADER_SIZE_BYTES]; -} HeaderBuffer; + size_t count; + udpard_us_t when[8]; +} eject_log_t; -static HeaderBuffer makeHeader(const TransferMetadata meta, const uint32_t frame_index, const bool end_of_transfer) +static void noop_free(void* const user, const size_t size, void* const pointer) { - HeaderBuffer buffer; - (void) txSerializeHeader(&buffer.data[0], meta, frame_index, end_of_transfer); - return buffer; + (void)user; + (void)size; + (void)pointer; } -// Generate reference data using PyCyphal: -// -// >>> from pycyphal.transport.udp import UDPFrame -// >>> from pycyphal.transport import Priority, MessageDataSpecifier -// >>> frame = UDPFrame(priority=Priority.FAST, transfer_id=0xbadc0ffee0ddf00d, index=12345, end_of_transfer=False, -// payload=memoryview(b''), source_node_id=2345, destination_node_id=5432, -// data_specifier=MessageDataSpecifier(7654), user_data=0) -// >>> list(frame.compile_header_and_payload()[0]) -// [1, 2, 41, 9, 56, 21, 230, 29, 13, 240, 221, 224, 254, 15, 220, 186, 57, 48, 0, 0, 0, 0, 224, 60] -static void testTxSerializeHeader(void) +// No-op memory vtable for guard checks. +static const udpard_mem_vtable_t mem_vtable_noop_alloc = { .base = { .free = noop_free }, .alloc = dummy_alloc }; + +// Ejects with a configurable outcome (subject variant). +static bool eject_subject_with_flag(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) { - { - HeaderBuffer buffer; - TEST_ASSERT_EQUAL_PTR(&buffer.data[0] + HEADER_SIZE_BYTES, - txSerializeHeader(buffer.data, - (TransferMetadata) { - .priority = UdpardPriorityFast, - .src_node_id = 2345, - .dst_node_id = 5432, - .data_specifier = 7654, - .transfer_id = 0xBADC0FFEE0DDF00dULL, - }, - 12345, - false)); - const HeaderBuffer ref = { - .data = {1, 2, 41, 9, 56, 21, 230, 29, 13, 240, 221, 224, 254, 15, 220, 186, 57, 48, 0, 0, 0, 0, 224, 60}}; - TEST_ASSERT_EQUAL_MEMORY(ref.data, buffer.data, HEADER_SIZE_BYTES); + (void)ejection; + eject_state_t* const st = (eject_state_t*)tx->user; + if (st != NULL) { + st->count++; + return st->allow; } - { - HeaderBuffer buffer; - TEST_ASSERT_EQUAL(&buffer.data[0] + HEADER_SIZE_BYTES, - txSerializeHeader(buffer.data, - (TransferMetadata) { - .priority = UdpardPriorityLow, - .src_node_id = 0xFEDC, - .dst_node_id = 0xBA98, - .data_specifier = 1234, - .transfer_id = 0x0BADC0DE0BADC0DEULL, - }, - 0x7FFF, - true)); - const HeaderBuffer ref = {.data = {1, 5, 220, 254, 152, 186, 210, 4, 222, 192, 173, 11, - 222, 192, 173, 11, 255, 127, 0, 128, 0, 0, 229, 4}}; - TEST_ASSERT_EQUAL_MEMORY(ref.data, buffer.data, HEADER_SIZE_BYTES); + return true; +} + +// Ejects with a configurable outcome (P2P variant). +static bool eject_p2p_with_flag(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection, udpard_udpip_ep_t dest) +{ + (void)ejection; + (void)dest; + eject_state_t* const st = (eject_state_t*)tx->user; + if (st != NULL) { + st->count++; + return st->allow; } + return true; } -static void testMakeChainEmpty(void) +// Records ejection timestamps for later inspection (subject variant). +static bool eject_subject_with_log(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) { - InstrumentedAllocator alloc; - instrumentedAllocatorNew(&alloc); - const struct UdpardTxMemoryResources mem = { - .fragment = instrumentedAllocatorMakeMemoryResource(&alloc), - .payload = instrumentedAllocatorMakeMemoryResource(&alloc), - }; - char user_transfer_referent = '\0'; - const TransferMetadata meta = { - .priority = UdpardPriorityFast, - .src_node_id = 1234, - .dst_node_id = 2345, - .data_specifier = 5432, - .transfer_id = 0xBADC0FFEE0DDF00DULL, - }; - const TxChain chain = txMakeChain(mem, - (byte_t[]) {11, 22, 33, 44, 55, 66, 77, 88}, - 30, - 1234567890, - meta, - (UdpardUDPIPEndpoint) {.ip_address = 0x0A0B0C0DU, .udp_port = 0x1234}, - (UdpardPayload) {.size = 0, .data = ""}, - &user_transfer_referent); - TEST_ASSERT_EQUAL(1 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(sizeof(struct UdpardTxItem) + HEADER_SIZE_BYTES + 4, alloc.allocated_bytes); - TEST_ASSERT_EQUAL(1, chain.count); - TEST_ASSERT_EQUAL(chain.head, chain.tail); - TEST_ASSERT_EQUAL(NULL, chain.head->next_in_transfer); - TEST_ASSERT_EQUAL(1234567890, chain.head->deadline_usec); - TEST_ASSERT_EQUAL(33, chain.head->dscp); - TEST_ASSERT_EQUAL(0x0A0B0C0DU, chain.head->destination.ip_address); - TEST_ASSERT_EQUAL(0x1234, chain.head->destination.udp_port); - TEST_ASSERT_EQUAL(HEADER_SIZE_BYTES + 4, chain.head->datagram_payload.size); - TEST_ASSERT_EQUAL(0, memcmp(makeHeader(meta, 0, true).data, chain.head->datagram_payload.data, HEADER_SIZE_BYTES)); - TEST_ASSERT_EQUAL(0, - memcmp("\x00\x00\x00\x00", // CRC of the empty transfer. - (byte_t*) (chain.head->datagram_payload.data) + HEADER_SIZE_BYTES, - 4)); - TEST_ASSERT_EQUAL(&user_transfer_referent, chain.head->user_transfer_reference); - udpardTxFree(mem, chain.head); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); + eject_log_t* const st = (eject_log_t*)tx->user; + if ((st != NULL) && (st->count < (sizeof(st->when) / sizeof(st->when[0])))) { + st->when[st->count++] = ejection->now; + } + return true; } -static void testMakeChainSingleMaxMTU(void) +// Records ejection timestamps for later inspection (P2P variant). +static bool eject_p2p_with_log(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection, udpard_udpip_ep_t dest) { - InstrumentedAllocator alloc; - instrumentedAllocatorNew(&alloc); - const struct UdpardTxMemoryResources mem = { - .fragment = instrumentedAllocatorMakeMemoryResource(&alloc), - .payload = instrumentedAllocatorMakeMemoryResource(&alloc), - }; - char user_transfer_referent = '\0'; - const TransferMetadata meta = { - .priority = UdpardPrioritySlow, - .src_node_id = 4321, - .dst_node_id = 5432, - .data_specifier = 7766, - .transfer_id = 0x0123456789ABCDEFULL, - }; - const TxChain chain = txMakeChain(mem, - (byte_t[]) {11, 22, 33, 44, 55, 66, 77, 88}, - DetailOfTheCosmosSize + TRANSFER_CRC_SIZE_BYTES, - 1234567890, - meta, - (UdpardUDPIPEndpoint) {.ip_address = 0x0A0B0C00U, .udp_port = 7474}, - (UdpardPayload) {.size = DetailOfTheCosmosSize, .data = DetailOfTheCosmos}, - &user_transfer_referent); - TEST_ASSERT_EQUAL(1 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(sizeof(struct UdpardTxItem) + HEADER_SIZE_BYTES + DetailOfTheCosmosSize + TRANSFER_CRC_SIZE_BYTES, - alloc.allocated_bytes); - TEST_ASSERT_EQUAL(1, chain.count); - TEST_ASSERT_EQUAL(chain.head, chain.tail); - TEST_ASSERT_EQUAL(NULL, chain.head->next_in_transfer); - TEST_ASSERT_EQUAL(1234567890, chain.head->deadline_usec); - TEST_ASSERT_EQUAL(77, chain.head->dscp); - TEST_ASSERT_EQUAL(0x0A0B0C00U, chain.head->destination.ip_address); - TEST_ASSERT_EQUAL(7474, chain.head->destination.udp_port); - TEST_ASSERT_EQUAL(HEADER_SIZE_BYTES + DetailOfTheCosmosSize + TRANSFER_CRC_SIZE_BYTES, - chain.head->datagram_payload.size); - TEST_ASSERT_EQUAL(0, memcmp(makeHeader(meta, 0, true).data, chain.head->datagram_payload.data, HEADER_SIZE_BYTES)); - TEST_ASSERT_EQUAL(0, - memcmp(DetailOfTheCosmos, - (byte_t*) (chain.head->datagram_payload.data) + HEADER_SIZE_BYTES, - DetailOfTheCosmosSize)); - TEST_ASSERT_EQUAL(0, - memcmp(DetailOfTheCosmosCRC, - (byte_t*) (chain.head->datagram_payload.data) + HEADER_SIZE_BYTES + DetailOfTheCosmosSize, - TRANSFER_CRC_SIZE_BYTES)); - TEST_ASSERT_EQUAL(&user_transfer_referent, chain.head->user_transfer_reference); - udpardTxFree(mem, chain.head); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); + (void)dest; + eject_log_t* const st = (eject_log_t*)tx->user; + if ((st != NULL) && (st->count < (sizeof(st->when) / sizeof(st->when[0])))) { + st->when[st->count++] = ejection->now; + } + return true; } -static void testMakeChainSingleFrameDefaultMTU(void) +// Records feedback into the provided state via user context. +static void record_feedback(udpard_tx_t* const tx, const udpard_tx_feedback_t fb) { - InstrumentedAllocator alloc; - instrumentedAllocatorNew(&alloc); - const struct UdpardTxMemoryResources mem = { - .fragment = instrumentedAllocatorMakeMemoryResource(&alloc), - .payload = instrumentedAllocatorMakeMemoryResource(&alloc), - }; - const byte_t payload[UDPARD_MTU_DEFAULT_MAX_SINGLE_FRAME + 1] = {0}; - { // Ensure UDPARD_MTU_DEFAULT_MAX_SINGLE_FRAME bytes fit in a single frame with the default MTU. - const TxChain chain = - txMakeChain(mem, - (byte_t[]) {11, 22, 33, 44, 55, 66, 77, 88}, - UDPARD_MTU_DEFAULT, - 1234567890, - (TransferMetadata) {.priority = UdpardPrioritySlow, - .src_node_id = 4321, - .dst_node_id = 5432, - .data_specifier = 7766, - .transfer_id = 0x0123456789ABCDEFULL}, - (UdpardUDPIPEndpoint) {.ip_address = 0x0A0B0C00U, .udp_port = 7474}, - (UdpardPayload) {.size = UDPARD_MTU_DEFAULT_MAX_SINGLE_FRAME, .data = payload}, - NULL); - TEST_ASSERT_EQUAL(1 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(sizeof(struct UdpardTxItem) + HEADER_SIZE_BYTES + UDPARD_MTU_DEFAULT_MAX_SINGLE_FRAME + - TRANSFER_CRC_SIZE_BYTES, - alloc.allocated_bytes); - TEST_ASSERT_EQUAL(1, chain.count); - TEST_ASSERT_EQUAL(chain.head, chain.tail); - TEST_ASSERT_EQUAL(NULL, chain.head->next_in_transfer); - udpardTxFree(mem, chain.head); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); + (void)tx; + feedback_state_t* const st = (feedback_state_t*)fb.user.ptr[0]; + if (st != NULL) { + st->count++; + st->last = fb; } - { // Increase the payload by 1 byte and ensure it spills over. - const TxChain chain = - txMakeChain(mem, - (byte_t[]) {11, 22, 33, 44, 55, 66, 77, 88}, - UDPARD_MTU_DEFAULT, - 1234567890, - (TransferMetadata) {.priority = UdpardPrioritySlow, - .src_node_id = 4321, - .dst_node_id = 5432, - .data_specifier = 7766, - .transfer_id = 0x0123456789ABCDEFULL}, - (UdpardUDPIPEndpoint) {.ip_address = 0x0A0B0C00U, .udp_port = 7474}, - (UdpardPayload) {.size = UDPARD_MTU_DEFAULT_MAX_SINGLE_FRAME + 1, .data = payload}, - NULL); - TEST_ASSERT_EQUAL(2 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL((sizeof(struct UdpardTxItem) + HEADER_SIZE_BYTES) * 2 + UDPARD_MTU_DEFAULT_MAX_SINGLE_FRAME + - 1 + TRANSFER_CRC_SIZE_BYTES, - alloc.allocated_bytes); - TEST_ASSERT_EQUAL(2, chain.count); - TEST_ASSERT_NOT_EQUAL(chain.head, chain.tail); - TEST_ASSERT_EQUAL((UdpardTxItem*) chain.tail, chain.head->next_in_transfer); - TEST_ASSERT_EQUAL(NULL, chain.tail->next_in_transfer); - udpardTxFree(mem, chain.head); - udpardTxFree(mem, chain.tail); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); +} + +// Minimal endpoint helper. +static udpard_udpip_ep_t make_ep(const uint32_t ip) { return (udpard_udpip_ep_t){ .ip = ip, .port = 1U }; } + +// Small helpers for intrusive checks. +static size_t frames_for(const size_t mtu, const size_t payload) { return larger(1, (payload + mtu - 1U) / mtu); } +static tx_transfer_t* latest_transfer(udpard_tx_t* const tx) +{ + return LIST_MEMBER(tx->agewise.head, tx_transfer_t, agewise); +} + +static void test_bytes_scattered_read(void) +{ + // Skips empty fragments and spans boundaries. + { + const byte_t frag_a[] = { 1U, 2U, 3U }; + const byte_t frag_c[] = { 4U, 5U, 6U, 7U, 8U }; + const udpard_bytes_scattered_t frag3 = { .bytes = { .size = sizeof(frag_c), .data = frag_c }, .next = NULL }; + const udpard_bytes_scattered_t frag2 = { .bytes = { .size = 0U, .data = NULL }, .next = &frag3 }; + const udpard_bytes_scattered_t frag1 = { .bytes = { .size = sizeof(frag_a), .data = frag_a }, .next = &frag2 }; + const udpard_bytes_scattered_t frag0 = { .bytes = { .size = 0U, .data = NULL }, .next = &frag1 }; + bytes_scattered_reader_t reader = { .cursor = &frag0, .position = 0U }; + byte_t out[7] = { 0 }; + bytes_scattered_read(&reader, sizeof(out), out); + const byte_t expected[] = { 1U, 2U, 3U, 4U, 5U, 6U, 7U }; + TEST_ASSERT_EQUAL_UINT8_ARRAY(expected, out, sizeof(expected)); + TEST_ASSERT_EQUAL_PTR(&frag3, reader.cursor); + TEST_ASSERT_EQUAL_size_t(4U, reader.position); + } + + // Resumes mid-fragment when data remains. + { + const byte_t frag_tail[] = { 9U, 10U, 11U }; + const udpard_bytes_scattered_t frag = { .bytes = { .size = sizeof(frag_tail), .data = frag_tail }, + .next = NULL }; + bytes_scattered_reader_t reader = { .cursor = &frag, .position = 1U }; + byte_t out[2] = { 0 }; + bytes_scattered_read(&reader, sizeof(out), out); + const byte_t expected[] = { 10U, 11U }; + TEST_ASSERT_EQUAL_UINT8_ARRAY(expected, out, sizeof(out)); + TEST_ASSERT_EQUAL_PTR(&frag, reader.cursor); + TEST_ASSERT_EQUAL_size_t(frag.bytes.size, reader.position); + } + + // Size accounts for chained fragments. + { + const byte_t frag_a[] = { 1U, 2U }; + const byte_t frag_b[] = { 3U, 4U, 5U }; + const udpard_bytes_scattered_t tail = { .bytes = { .size = sizeof(frag_b), .data = frag_b }, .next = NULL }; + const udpard_bytes_scattered_t head = { .bytes = { .size = sizeof(frag_a), .data = frag_a }, .next = &tail }; + TEST_ASSERT_EQUAL_size_t(sizeof(frag_a) + sizeof(frag_b), bytes_scattered_size(head)); } } -static void testMakeChainThreeFrames(void) +static void test_tx_serialize_header(void) { - InstrumentedAllocator alloc; - instrumentedAllocatorNew(&alloc); - const struct UdpardTxMemoryResources mem = { - .fragment = instrumentedAllocatorMakeMemoryResource(&alloc), - .payload = instrumentedAllocatorMakeMemoryResource(&alloc), - }; - char user_transfer_referent = '\0'; - const TransferMetadata meta = { - .priority = UdpardPriorityNominal, - .src_node_id = 4321, - .dst_node_id = 5432, - .data_specifier = 7766, - .transfer_id = 0x0123456789ABCDEFULL, - }; - const size_t mtu = (EtherealStrengthSize + 4U + 3U) / 3U; // Force payload split into three frames. - const TxChain chain = txMakeChain(mem, - (byte_t[]) {11, 22, 33, 44, 55, 66, 77, 88}, - mtu, - 223574680, - meta, - (UdpardUDPIPEndpoint) {.ip_address = 0xBABADEDAU, .udp_port = 0xD0ED}, - (UdpardPayload) {.size = EtherealStrengthSize, .data = EtherealStrength}, - &user_transfer_referent); - TEST_ASSERT_EQUAL(3 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(3 * (sizeof(struct UdpardTxItem) + HEADER_SIZE_BYTES) + EtherealStrengthSize + 4U, - alloc.allocated_bytes); - TEST_ASSERT_EQUAL(3, chain.count); - UdpardTxItem* const first = chain.head; - TEST_ASSERT_NOT_EQUAL(NULL, first); - UdpardTxItem* const second = first->next_in_transfer; - TEST_ASSERT_NOT_EQUAL(NULL, second); - UdpardTxItem* const third = second->next_in_transfer; - TEST_ASSERT_NOT_EQUAL(NULL, third); - TEST_ASSERT_EQUAL(NULL, third->next_in_transfer); - TEST_ASSERT_EQUAL((UdpardTxItem*) chain.tail, third); - - // FIRST FRAME -- contains the first part of the payload. - TEST_ASSERT_EQUAL(223574680, first->deadline_usec); - TEST_ASSERT_EQUAL(55, first->dscp); - TEST_ASSERT_EQUAL(0xBABADEDAU, first->destination.ip_address); - TEST_ASSERT_EQUAL(0xD0ED, first->destination.udp_port); - TEST_ASSERT_EQUAL(HEADER_SIZE_BYTES + mtu, first->datagram_payload.size); - TEST_ASSERT_EQUAL(0, memcmp(makeHeader(meta, 0, false).data, first->datagram_payload.data, HEADER_SIZE_BYTES)); - TEST_ASSERT_EQUAL(0, memcmp(EtherealStrength, (byte_t*) (first->datagram_payload.data) + HEADER_SIZE_BYTES, mtu)); - TEST_ASSERT_EQUAL(&user_transfer_referent, first->user_transfer_reference); - - // SECOND FRAME -- contains the second part of the payload. - TEST_ASSERT_EQUAL(223574680, second->deadline_usec); - TEST_ASSERT_EQUAL(55, second->dscp); - TEST_ASSERT_EQUAL(0xBABADEDAU, second->destination.ip_address); - TEST_ASSERT_EQUAL(0xD0ED, second->destination.udp_port); - TEST_ASSERT_EQUAL(HEADER_SIZE_BYTES + mtu, second->datagram_payload.size); - TEST_ASSERT_EQUAL(0, memcmp(makeHeader(meta, 1, false).data, second->datagram_payload.data, HEADER_SIZE_BYTES)); - TEST_ASSERT_EQUAL(0, - memcmp(EtherealStrength + mtu, - (byte_t*) (second->datagram_payload.data) + HEADER_SIZE_BYTES, - mtu)); - TEST_ASSERT_EQUAL(&user_transfer_referent, second->user_transfer_reference); - - // THIRD FRAME -- contains the third part of the payload and the CRC at the end. - TEST_ASSERT_EQUAL(223574680, third->deadline_usec); - TEST_ASSERT_EQUAL(55, third->dscp); - TEST_ASSERT_EQUAL(0xBABADEDAU, third->destination.ip_address); - TEST_ASSERT_EQUAL(0xD0ED, third->destination.udp_port); - const size_t third_payload_size = EtherealStrengthSize - (2 * mtu); - TEST_ASSERT_EQUAL(HEADER_SIZE_BYTES + third_payload_size + 4U, third->datagram_payload.size); - TEST_ASSERT_EQUAL(0, memcmp(makeHeader(meta, 2, true).data, third->datagram_payload.data, HEADER_SIZE_BYTES)); - TEST_ASSERT_EQUAL(0, - memcmp(EtherealStrength + (2 * mtu), - (byte_t*) (third->datagram_payload.data) + HEADER_SIZE_BYTES, - third_payload_size)); - TEST_ASSERT_EQUAL(0, - memcmp(EtherealStrengthCRC, - (byte_t*) (third->datagram_payload.data) + HEADER_SIZE_BYTES + third_payload_size, - TRANSFER_CRC_SIZE_BYTES)); - TEST_ASSERT_EQUAL(&user_transfer_referent, third->user_transfer_reference); - - // Clean up. - udpardTxFree(mem, first); - udpardTxFree(mem, second); - udpardTxFree(mem, third); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); + typedef struct + { + byte_t data[HEADER_SIZE_BYTES]; + } header_buffer_t; + + // Test case 1: Basic header serialization + { + header_buffer_t buffer; + const meta_t meta = { + .priority = udpard_prio_fast, + .flag_reliable = false, + .flag_acknowledgement = false, + .transfer_payload_size = 12345, + .transfer_id = 0xBADC0FFEE0DDF00DULL, + .sender_uid = 0x0123456789ABCDEFULL, + .topic_hash = 0xFEDCBA9876543210ULL, + }; + (void)header_serialize(buffer.data, meta, 12345, 0, 0); + TEST_ASSERT_EQUAL(HEADER_SIZE_BYTES, sizeof(buffer.data)); + // Verify version and priority in first byte + TEST_ASSERT_EQUAL((HEADER_VERSION | ((unsigned)udpard_prio_fast << 5U)), buffer.data[0]); + } + // Test case 2: Reliable flag + { + header_buffer_t buffer; + const meta_t meta = { + .priority = udpard_prio_nominal, + .flag_reliable = true, + .flag_acknowledgement = false, + .transfer_payload_size = 5000, + .transfer_id = 0xAAAAAAAAAAAAAAAAULL, + .sender_uid = 0xBBBBBBBBBBBBBBBBULL, + .topic_hash = 0xCCCCCCCCCCCCCCCCULL, + }; + (void)header_serialize(buffer.data, meta, 100, 200, 0); + TEST_ASSERT_EQUAL((HEADER_VERSION | ((unsigned)udpard_prio_nominal << 5U)), buffer.data[0]); + TEST_ASSERT_EQUAL(HEADER_FLAG_RELIABLE, buffer.data[1]); + } + // Test case 3: ACK flag + { + header_buffer_t buffer; + const meta_t meta = { + .priority = udpard_prio_nominal, + .flag_reliable = false, + .flag_acknowledgement = true, + .transfer_payload_size = 16, + .transfer_id = 0x1111111111111111ULL, + .sender_uid = 0x2222222222222222ULL, + .topic_hash = 0x3333333333333333ULL, + }; + (void)header_serialize(buffer.data, meta, 0, 0, 0); + TEST_ASSERT_EQUAL((HEADER_VERSION | ((unsigned)udpard_prio_nominal << 5U)), buffer.data[0]); + TEST_ASSERT_EQUAL(HEADER_FLAG_ACKNOWLEDGEMENT, buffer.data[1]); + } } -static void testMakeChainCRCSpill1(void) +static void test_tx_validation_and_free(void) { - InstrumentedAllocator alloc; - instrumentedAllocatorNew(&alloc); - const struct UdpardTxMemoryResources mem = { - .fragment = instrumentedAllocatorMakeMemoryResource(&alloc), - .payload = instrumentedAllocatorMakeMemoryResource(&alloc), - }; - char user_transfer_referent = '\0'; - const TransferMetadata meta = { - .priority = UdpardPriorityNominal, - .src_node_id = 4321, - .dst_node_id = 5432, - .data_specifier = 7766, - .transfer_id = 0x0123456789ABCDEFULL, - }; - const size_t mtu = InterstellarWarSize + 3U; - const TxChain chain = txMakeChain(mem, - (byte_t[]) {11, 22, 33, 44, 55, 66, 77, 88}, - mtu, - 223574680, - meta, - (UdpardUDPIPEndpoint) {.ip_address = 0xBABADEDAU, .udp_port = 0xD0ED}, - (UdpardPayload) {.size = InterstellarWarSize, .data = InterstellarWar}, - &user_transfer_referent); - TEST_ASSERT_EQUAL(2 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(2 * (sizeof(struct UdpardTxItem) + HEADER_SIZE_BYTES) + InterstellarWarSize + 4U, - alloc.allocated_bytes); - TEST_ASSERT_EQUAL(2, chain.count); - TEST_ASSERT_NOT_EQUAL(chain.head, chain.tail); - TEST_ASSERT_EQUAL((UdpardTxItem*) chain.tail, chain.head->next_in_transfer); - TEST_ASSERT_EQUAL(NULL, chain.tail->next_in_transfer); - - // FIRST FRAME -- contains the payload and the first three bytes of the CRC. - TEST_ASSERT_EQUAL(223574680, chain.head->deadline_usec); - TEST_ASSERT_EQUAL(55, chain.head->dscp); - TEST_ASSERT_EQUAL(0xBABADEDAU, chain.head->destination.ip_address); - TEST_ASSERT_EQUAL(0xD0ED, chain.head->destination.udp_port); - TEST_ASSERT_EQUAL(HEADER_SIZE_BYTES + mtu, chain.head->datagram_payload.size); - TEST_ASSERT_EQUAL(0, memcmp(makeHeader(meta, 0, false).data, chain.head->datagram_payload.data, HEADER_SIZE_BYTES)); - TEST_ASSERT_EQUAL(0, - memcmp(InterstellarWar, - (byte_t*) (chain.head->datagram_payload.data) + HEADER_SIZE_BYTES, - InterstellarWarSize)); - TEST_ASSERT_EQUAL(0, - memcmp(InterstellarWarCRC, - (byte_t*) (chain.head->datagram_payload.data) + HEADER_SIZE_BYTES + InterstellarWarSize, - 3U)); - TEST_ASSERT_EQUAL(&user_transfer_referent, chain.head->user_transfer_reference); - - // SECOND FRAME -- contains the last byte of the CRC. - TEST_ASSERT_EQUAL(223574680, chain.tail->deadline_usec); - TEST_ASSERT_EQUAL(55, chain.tail->dscp); - TEST_ASSERT_EQUAL(0xBABADEDAU, chain.tail->destination.ip_address); - TEST_ASSERT_EQUAL(0xD0ED, chain.tail->destination.udp_port); - TEST_ASSERT_EQUAL(HEADER_SIZE_BYTES + 1U, chain.tail->datagram_payload.size); - TEST_ASSERT_EQUAL(0, memcmp(makeHeader(meta, 1, true).data, chain.tail->datagram_payload.data, HEADER_SIZE_BYTES)); - TEST_ASSERT_EQUAL(0, - memcmp(InterstellarWarCRC + 3U, - (byte_t*) (chain.tail->datagram_payload.data) + HEADER_SIZE_BYTES, - 1U)); - TEST_ASSERT_EQUAL(&user_transfer_referent, chain.tail->user_transfer_reference); - - // Clean up. - udpardTxFree(mem, chain.head); - udpardTxFree(mem, chain.tail); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); + // Invalid memory config fails fast. + udpard_tx_mem_resources_t bad = { 0 }; + TEST_ASSERT_FALSE(tx_validate_mem_resources(bad)); + // Reject payload vtables with missing hooks. + const udpard_mem_vtable_t vtable_no_free = { .base = { .free = NULL }, .alloc = dummy_alloc }; + const udpard_mem_vtable_t vtable_no_alloc = { .base = { .free = noop_free }, .alloc = NULL }; + const udpard_mem_vtable_t vtable_ok = { .base = { .free = noop_free }, .alloc = dummy_alloc }; + udpard_tx_mem_resources_t bad_payload = { .transfer = { .vtable = &vtable_ok, .context = NULL } }; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + bad_payload.payload[i] = (udpard_mem_t){ .vtable = &vtable_no_free, .context = NULL }; + } + TEST_ASSERT_FALSE(tx_validate_mem_resources(bad_payload)); + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + bad_payload.payload[i] = (udpard_mem_t){ .vtable = &vtable_no_alloc, .context = NULL }; + } + TEST_ASSERT_FALSE(tx_validate_mem_resources(bad_payload)); + // Reject transfer vtables with missing hooks. + udpard_tx_mem_resources_t bad_transfer = bad_payload; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + bad_transfer.payload[i] = (udpard_mem_t){ .vtable = &vtable_ok, .context = NULL }; + } + bad_transfer.transfer = (udpard_mem_t){ .vtable = &vtable_no_free, .context = NULL }; + TEST_ASSERT_FALSE(tx_validate_mem_resources(bad_transfer)); + bad_transfer.transfer = (udpard_mem_t){ .vtable = &vtable_no_alloc, .context = NULL }; + TEST_ASSERT_FALSE(tx_validate_mem_resources(bad_transfer)); + + instrumented_allocator_t alloc_transfer = { 0 }; + instrumented_allocator_t alloc_payload = { 0 }; + instrumented_allocator_new(&alloc_transfer); + instrumented_allocator_new(&alloc_payload); + udpard_tx_mem_resources_t mem = { .transfer = instrumented_allocator_make_resource(&alloc_transfer) }; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + mem.payload[i] = instrumented_allocator_make_resource(&alloc_payload); + } + + // Populate indexes then free to hit all removal paths. + udpard_tx_t tx = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new( + &tx, + 1U, + 1U, + 4U, + mem, + &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); + tx_transfer_t* const tr = mem_alloc(mem.transfer, sizeof(tx_transfer_t)); + mem_zero(sizeof(*tr), tr); + tr->priority = udpard_prio_fast; + tr->deadline = 10; + tr->staged_until = 1; + tr->remote_topic_hash = 99; + tr->remote_transfer_id = 100; + tx_transfer_key_t key = { .topic_hash = 5, .transfer_id = 7 }; + // Insert with stable ordering keys. + const tx_time_key_t staged_key = { .time = tr->staged_until, + .topic_hash = tr->topic_hash, + .transfer_id = tr->transfer_id }; + const tx_time_key_t deadline_key = { .time = tr->deadline, + .topic_hash = tr->topic_hash, + .transfer_id = tr->transfer_id }; + (void)cavl2_find_or_insert( + &tx.index_staged, &staged_key, tx_cavl_compare_staged, &tr->index_staged, cavl2_trivial_factory); + (void)cavl2_find_or_insert( + &tx.index_deadline, &deadline_key, tx_cavl_compare_deadline, &tr->index_deadline, cavl2_trivial_factory); + (void)cavl2_find_or_insert( + &tx.index_transfer, &key, tx_cavl_compare_transfer, &tr->index_transfer, cavl2_trivial_factory); + (void)cavl2_find_or_insert( + &tx.index_transfer_ack, &key, tx_cavl_compare_transfer_remote, &tr->index_transfer_ack, cavl2_trivial_factory); + enlist_head(&tx.agewise, &tr->agewise); + tx_transfer_retire(&tx, tr, true); + TEST_ASSERT_NULL(tx.index_staged); + TEST_ASSERT_NULL(tx.index_transfer_ack); + instrumented_allocator_reset(&alloc_transfer); + instrumented_allocator_reset(&alloc_payload); } -static void testMakeChainCRCSpill2(void) +static void test_tx_comparators_and_feedback(void) { - InstrumentedAllocator alloc; - instrumentedAllocatorNew(&alloc); - const struct UdpardTxMemoryResources mem = { - .fragment = instrumentedAllocatorMakeMemoryResource(&alloc), - .payload = instrumentedAllocatorMakeMemoryResource(&alloc), - }; - char user_transfer_referent = '\0'; - const TransferMetadata meta = { - .priority = UdpardPriorityNominal, - .src_node_id = 4321, - .dst_node_id = 5432, - .data_specifier = 7766, - .transfer_id = 0x0123456789ABCDEFULL, - }; - const size_t mtu = InterstellarWarSize + 2U; - const TxChain chain = txMakeChain(mem, - (byte_t[]) {11, 22, 33, 44, 55, 66, 77, 88}, - mtu, - 223574680, - meta, - (UdpardUDPIPEndpoint) {.ip_address = 0xBABADEDAU, .udp_port = 0xD0ED}, - (UdpardPayload) {.size = InterstellarWarSize, .data = InterstellarWar}, - &user_transfer_referent); - TEST_ASSERT_EQUAL(2 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(2 * (sizeof(struct UdpardTxItem) + HEADER_SIZE_BYTES) + InterstellarWarSize + 4U, - alloc.allocated_bytes); - TEST_ASSERT_EQUAL(2, chain.count); - TEST_ASSERT_NOT_EQUAL(chain.head, chain.tail); - TEST_ASSERT_EQUAL((UdpardTxItem*) chain.tail, chain.head->next_in_transfer); - TEST_ASSERT_EQUAL(NULL, chain.tail->next_in_transfer); - - // FIRST FRAME -- contains the payload and the first two bytes of the CRC. - TEST_ASSERT_EQUAL(223574680, chain.head->deadline_usec); - TEST_ASSERT_EQUAL(55, chain.head->dscp); - TEST_ASSERT_EQUAL(0xBABADEDAU, chain.head->destination.ip_address); - TEST_ASSERT_EQUAL(0xD0ED, chain.head->destination.udp_port); - TEST_ASSERT_EQUAL(HEADER_SIZE_BYTES + mtu, chain.head->datagram_payload.size); - TEST_ASSERT_EQUAL(0, memcmp(makeHeader(meta, 0, false).data, chain.head->datagram_payload.data, HEADER_SIZE_BYTES)); - TEST_ASSERT_EQUAL(0, - memcmp(InterstellarWar, - (byte_t*) (chain.head->datagram_payload.data) + HEADER_SIZE_BYTES, - InterstellarWarSize)); - TEST_ASSERT_EQUAL(0, - memcmp(InterstellarWarCRC, - (byte_t*) (chain.head->datagram_payload.data) + HEADER_SIZE_BYTES + InterstellarWarSize, - 2U)); - TEST_ASSERT_EQUAL(&user_transfer_referent, chain.head->user_transfer_reference); - - // SECOND FRAME -- contains the last two bytes of the CRC. - TEST_ASSERT_EQUAL(223574680, chain.tail->deadline_usec); - TEST_ASSERT_EQUAL(55, chain.tail->dscp); - TEST_ASSERT_EQUAL(0xBABADEDAU, chain.tail->destination.ip_address); - TEST_ASSERT_EQUAL(0xD0ED, chain.tail->destination.udp_port); - TEST_ASSERT_EQUAL(HEADER_SIZE_BYTES + 2U, chain.tail->datagram_payload.size); - TEST_ASSERT_EQUAL(0, memcmp(makeHeader(meta, 1, true).data, chain.tail->datagram_payload.data, HEADER_SIZE_BYTES)); - TEST_ASSERT_EQUAL(0, - memcmp(InterstellarWarCRC + 2U, - (byte_t*) (chain.tail->datagram_payload.data) + HEADER_SIZE_BYTES, - 2U)); - TEST_ASSERT_EQUAL(&user_transfer_referent, chain.tail->user_transfer_reference); - - // Clean up. - udpardTxFree(mem, chain.head); - udpardTxFree(mem, chain.tail); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); + tx_transfer_t tr; + mem_zero(sizeof(tr), &tr); + tr.staged_until = 5; + tr.deadline = 7; + tr.topic_hash = 10; + tr.transfer_id = 20; + tr.remote_topic_hash = 3; + tr.remote_transfer_id = 4; + + // Staged/deadline comparisons both ways. + tx_time_key_t tkey = { .time = 6, .topic_hash = tr.topic_hash, .transfer_id = tr.transfer_id }; + TEST_ASSERT_EQUAL(1, tx_cavl_compare_staged(&tkey, &tr.index_staged)); + tkey.time = 4; + TEST_ASSERT_EQUAL(-1, tx_cavl_compare_staged(&tkey, &tr.index_staged)); + tkey.time = 8; + TEST_ASSERT_EQUAL(1, tx_cavl_compare_deadline(&tkey, &tr.index_deadline)); + tkey.time = 6; + TEST_ASSERT_EQUAL(-1, tx_cavl_compare_deadline(&tkey, &tr.index_deadline)); + // Staged comparator covers topic_hash/transfer_id branches. + tkey = (tx_time_key_t){ .time = tr.staged_until, .topic_hash = tr.topic_hash - 1, .transfer_id = tr.transfer_id }; + TEST_ASSERT_EQUAL(-1, tx_cavl_compare_staged(&tkey, &tr.index_staged)); + tkey.topic_hash = tr.topic_hash + 1; + TEST_ASSERT_EQUAL(1, tx_cavl_compare_staged(&tkey, &tr.index_staged)); + tkey.topic_hash = tr.topic_hash; + tkey.transfer_id = tr.transfer_id - 1; + TEST_ASSERT_EQUAL(-1, tx_cavl_compare_staged(&tkey, &tr.index_staged)); + tkey.transfer_id = tr.transfer_id + 1; + TEST_ASSERT_EQUAL(1, tx_cavl_compare_staged(&tkey, &tr.index_staged)); + // Deadline comparator covers topic_hash/transfer_id branches. + tkey = (tx_time_key_t){ .time = tr.deadline, .topic_hash = tr.topic_hash - 1, .transfer_id = tr.transfer_id }; + TEST_ASSERT_EQUAL(-1, tx_cavl_compare_deadline(&tkey, &tr.index_deadline)); + tkey.topic_hash = tr.topic_hash + 1; + TEST_ASSERT_EQUAL(1, tx_cavl_compare_deadline(&tkey, &tr.index_deadline)); + tkey.topic_hash = tr.topic_hash; + tkey.transfer_id = tr.transfer_id - 1; + TEST_ASSERT_EQUAL(-1, tx_cavl_compare_deadline(&tkey, &tr.index_deadline)); + tkey.transfer_id = tr.transfer_id + 1; + TEST_ASSERT_EQUAL(1, tx_cavl_compare_deadline(&tkey, &tr.index_deadline)); + + // Transfer comparator covers all branches. + tx_transfer_key_t key = { .topic_hash = 5, .transfer_id = 1 }; + TEST_ASSERT_EQUAL(-1, tx_cavl_compare_transfer(&key, &tr.index_transfer)); + key.topic_hash = 15; + TEST_ASSERT_EQUAL(1, tx_cavl_compare_transfer(&key, &tr.index_transfer)); + key.topic_hash = tr.topic_hash; + key.transfer_id = 15; + TEST_ASSERT_EQUAL(-1, tx_cavl_compare_transfer(&key, &tr.index_transfer)); + key.transfer_id = 25; + TEST_ASSERT_EQUAL(1, tx_cavl_compare_transfer(&key, &tr.index_transfer)); + key.transfer_id = tr.transfer_id; + TEST_ASSERT_EQUAL(0, tx_cavl_compare_transfer(&key, &tr.index_transfer)); + + // Remote comparator mirrors the above. + tx_transfer_key_t rkey = { .topic_hash = 2, .transfer_id = 1 }; + TEST_ASSERT_EQUAL(-1, tx_cavl_compare_transfer_remote(&rkey, &tr.index_transfer_ack)); + rkey.topic_hash = 5; + TEST_ASSERT_EQUAL(1, tx_cavl_compare_transfer_remote(&rkey, &tr.index_transfer_ack)); + rkey.topic_hash = tr.remote_topic_hash; + rkey.transfer_id = 2; + TEST_ASSERT_EQUAL(-1, tx_cavl_compare_transfer_remote(&rkey, &tr.index_transfer_ack)); + rkey.transfer_id = 6; + TEST_ASSERT_EQUAL(1, tx_cavl_compare_transfer_remote(&rkey, &tr.index_transfer_ack)); + rkey.transfer_id = tr.remote_transfer_id; + TEST_ASSERT_EQUAL(0, tx_cavl_compare_transfer_remote(&rkey, &tr.index_transfer_ack)); } -static void testMakeChainCRCSpill3(void) +static void test_tx_spool_and_queue_errors(void) { - InstrumentedAllocator alloc; - instrumentedAllocatorNew(&alloc); - const struct UdpardTxMemoryResources mem = { - .fragment = instrumentedAllocatorMakeMemoryResource(&alloc), - .payload = instrumentedAllocatorMakeMemoryResource(&alloc), - }; - char user_transfer_referent = '\0'; - const TransferMetadata meta = { - .priority = UdpardPriorityNominal, - .src_node_id = 4321, - .dst_node_id = 5432, - .data_specifier = 7766, - .transfer_id = 0x0123456789ABCDEFULL, + // OOM in spool after first frame. + instrumented_allocator_t alloc_payload = { 0 }; + instrumented_allocator_new(&alloc_payload); + alloc_payload.limit_fragments = 1; + udpard_tx_t tx = { .enqueued_frames_limit = 1, .enqueued_frames_count = 0 }; + tx.memory.payload[0] = instrumented_allocator_make_resource(&alloc_payload); + byte_t buffer[64] = { 0 }; + const udpard_bytes_scattered_t payload = make_scattered(buffer, sizeof(buffer)); + const meta_t meta = { + .priority = udpard_prio_fast, + .flag_reliable = false, + .flag_acknowledgement = false, + .transfer_payload_size = (uint32_t)payload.bytes.size, + .transfer_id = 1, + .sender_uid = 1, + .topic_hash = 1, }; - const size_t mtu = InterstellarWarSize + 1U; - const TxChain chain = txMakeChain(mem, - (byte_t[]) {11, 22, 33, 44, 55, 66, 77, 88}, - mtu, - 223574680, - meta, - (UdpardUDPIPEndpoint) {.ip_address = 0xBABADEDAU, .udp_port = 0xD0ED}, - (UdpardPayload) {.size = InterstellarWarSize, .data = InterstellarWar}, - &user_transfer_referent); - TEST_ASSERT_EQUAL(2 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(2 * (sizeof(struct UdpardTxItem) + HEADER_SIZE_BYTES) + InterstellarWarSize + 4U, - alloc.allocated_bytes); - TEST_ASSERT_EQUAL(2, chain.count); - TEST_ASSERT_NOT_EQUAL(chain.head, chain.tail); - TEST_ASSERT_EQUAL((UdpardTxItem*) chain.tail, chain.head->next_in_transfer); - TEST_ASSERT_EQUAL(NULL, chain.tail->next_in_transfer); - - // FIRST FRAME -- contains the payload and the first byte of the CRC. - TEST_ASSERT_EQUAL(223574680, chain.head->deadline_usec); - TEST_ASSERT_EQUAL(55, chain.head->dscp); - TEST_ASSERT_EQUAL(0xBABADEDAU, chain.head->destination.ip_address); - TEST_ASSERT_EQUAL(0xD0ED, chain.head->destination.udp_port); - TEST_ASSERT_EQUAL(HEADER_SIZE_BYTES + mtu, chain.head->datagram_payload.size); - TEST_ASSERT_EQUAL(0, memcmp(makeHeader(meta, 0, false).data, chain.head->datagram_payload.data, HEADER_SIZE_BYTES)); - TEST_ASSERT_EQUAL(0, - memcmp(InterstellarWar, - (byte_t*) (chain.head->datagram_payload.data) + HEADER_SIZE_BYTES, - InterstellarWarSize)); - TEST_ASSERT_EQUAL(0, - memcmp(InterstellarWarCRC, - (byte_t*) (chain.head->datagram_payload.data) + HEADER_SIZE_BYTES + InterstellarWarSize, - 1U)); - TEST_ASSERT_EQUAL(&user_transfer_referent, chain.head->user_transfer_reference); - - // SECOND FRAME -- contains the last three bytes of the CRC. - TEST_ASSERT_EQUAL(223574680, chain.tail->deadline_usec); - TEST_ASSERT_EQUAL(55, chain.tail->dscp); - TEST_ASSERT_EQUAL(0xBABADEDAU, chain.tail->destination.ip_address); - TEST_ASSERT_EQUAL(0xD0ED, chain.tail->destination.udp_port); - TEST_ASSERT_EQUAL(HEADER_SIZE_BYTES + 3U, chain.tail->datagram_payload.size); - TEST_ASSERT_EQUAL(0, memcmp(makeHeader(meta, 1, true).data, chain.tail->datagram_payload.data, HEADER_SIZE_BYTES)); - TEST_ASSERT_EQUAL(0, - memcmp(InterstellarWarCRC + 1U, - (byte_t*) (chain.tail->datagram_payload.data) + HEADER_SIZE_BYTES, - 3U)); - TEST_ASSERT_EQUAL(&user_transfer_referent, chain.tail->user_transfer_reference); - - // Clean up. - udpardTxFree(mem, chain.head); - udpardTxFree(mem, chain.tail); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); + TEST_ASSERT_NULL(tx_spool(&tx, tx.memory.payload[0], 32, meta, payload)); + TEST_ASSERT_EQUAL_size_t(0, tx.enqueued_frames_count); + TEST_ASSERT_EQUAL_UINT64(80, tx_ack_timeout(5, udpard_prio_high, 1)); + instrumented_allocator_reset(&alloc_payload); + + // Capacity exhaustion. + instrumented_allocator_new(&alloc_payload); + udpard_tx_mem_resources_t mem = { .transfer = instrumented_allocator_make_resource(&alloc_payload) }; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + mem.payload[i] = instrumented_allocator_make_resource(&alloc_payload); + } + TEST_ASSERT_TRUE(udpard_tx_new( + &tx, + 2U, + 2U, + 1U, + mem, + &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); + byte_t big_buf[2000] = { 0 }; + const udpard_bytes_scattered_t big_payload = make_scattered(big_buf, sizeof(big_buf)); + const uint16_t iface_bitmap_01 = (1U << 0U); + TEST_ASSERT_FALSE(udpard_tx_push( + &tx, 0, 1000, iface_bitmap_01, udpard_prio_fast, 11, 1, big_payload, NULL, UDPARD_USER_CONTEXT_NULL)); + TEST_ASSERT_EQUAL_size_t(1, tx.errors_capacity); + + // Immediate rejection when the request exceeds limits. + udpard_tx_t tx_limit; + mem_zero(sizeof(tx_limit), &tx_limit); + tx_limit.enqueued_frames_limit = 1; + tx_limit.enqueued_frames_count = 0; + tx_limit.memory.transfer = (udpard_mem_t){ .vtable = &mem_vtable_noop_alloc, .context = NULL }; + TEST_ASSERT_FALSE(tx_ensure_queue_space(&tx_limit, 3)); + + // Sacrifice clears space when the queue is full. + udpard_tx_t tx_sac; + mem_zero(sizeof(tx_sac), &tx_sac); + tx_sac.enqueued_frames_limit = 1; + tx_sac.enqueued_frames_count = 1; + tx_sac.errors_sacrifice = 0; + tx_sac.memory.transfer = (udpard_mem_t){ .vtable = &mem_vtable_noop_alloc, .context = NULL }; + tx_transfer_t victim; + mem_zero(sizeof(victim), &victim); + victim.priority = udpard_prio_fast; + victim.deadline = 1; + victim.topic_hash = 7; + victim.transfer_id = 9; + // Insert into deadline index with stable key. + const tx_time_key_t deadline_key = { .time = victim.deadline, + .topic_hash = victim.topic_hash, + .transfer_id = victim.transfer_id }; + (void)cavl2_find_or_insert( + &tx_sac.index_deadline, &deadline_key, tx_cavl_compare_deadline, &victim.index_deadline, cavl2_trivial_factory); + (void)cavl2_find_or_insert( + &tx_sac.index_transfer, + &(tx_transfer_key_t){ .topic_hash = victim.topic_hash, .transfer_id = victim.transfer_id }, + tx_cavl_compare_transfer, + &victim.index_transfer, + cavl2_trivial_factory); + enlist_head(&tx_sac.agewise, &victim.agewise); + TEST_ASSERT_FALSE(tx_ensure_queue_space(&tx_sac, 1)); + TEST_ASSERT_EQUAL_size_t(1, tx_sac.errors_sacrifice); + + // Transfer allocation OOM. + alloc_payload.limit_fragments = 0; + tx.errors_capacity = 0; + TEST_ASSERT_TRUE(udpard_tx_new( + &tx, + 3U, + 3U, + 2U, + mem, + &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); + TEST_ASSERT_FALSE(udpard_tx_push( + &tx, 0, 1000, iface_bitmap_01, udpard_prio_fast, 12, 2, make_scattered(NULL, 0), NULL, UDPARD_USER_CONTEXT_NULL)); + TEST_ASSERT_EQUAL_size_t(1, tx.errors_oom); + + // Spool OOM inside tx_push. + alloc_payload.limit_fragments = 1; + TEST_ASSERT_TRUE(udpard_tx_new( + &tx, + 4U, + 4U, + 4U, + mem, + &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); + TEST_ASSERT_FALSE(udpard_tx_push( + &tx, 0, 1000, iface_bitmap_01, udpard_prio_fast, 13, 3, big_payload, NULL, UDPARD_USER_CONTEXT_NULL)); + TEST_ASSERT_EQUAL_size_t(1, tx.errors_oom); + + // Reliable transfer gets staged. + alloc_payload.limit_fragments = SIZE_MAX; + feedback_state_t fstate = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new( + &tx, + 5U, + 5U, + 4U, + mem, + &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); + tx.ack_baseline_timeout = 1; + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + 0, + 100000, + iface_bitmap_01, + udpard_prio_nominal, + 14, + 4, + make_scattered(NULL, 0), + record_feedback, + make_user_context(&fstate))); + TEST_ASSERT_NOT_NULL(tx.index_staged); + udpard_tx_free(&tx); + instrumented_allocator_reset(&alloc_payload); } -static void testMakeChainCRCSpillFull(void) +static void test_tx_ack_and_scheduler(void) { - InstrumentedAllocator alloc; - instrumentedAllocatorNew(&alloc); - const struct UdpardTxMemoryResources mem = { - .fragment = instrumentedAllocatorMakeMemoryResource(&alloc), - .payload = instrumentedAllocatorMakeMemoryResource(&alloc), - }; - char user_transfer_referent = '\0'; - const TransferMetadata meta = { - .priority = UdpardPriorityNominal, - .src_node_id = 4321, - .dst_node_id = 5432, - .data_specifier = 7766, - .transfer_id = 0x0123456789ABCDEFULL, - }; - const size_t mtu = InterstellarWarSize; - const TxChain chain = txMakeChain(mem, - (byte_t[]) {11, 22, 33, 44, 55, 66, 77, 88}, - mtu, - 223574680, - meta, - (UdpardUDPIPEndpoint) {.ip_address = 0xBABADEDAU, .udp_port = 0xD0ED}, - (UdpardPayload) {.size = InterstellarWarSize, .data = InterstellarWar}, - &user_transfer_referent); - TEST_ASSERT_EQUAL(2 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(2 * (sizeof(struct UdpardTxItem) + HEADER_SIZE_BYTES) + InterstellarWarSize + 4U, - alloc.allocated_bytes); - TEST_ASSERT_EQUAL(2, chain.count); - TEST_ASSERT_NOT_EQUAL(chain.head, chain.tail); - TEST_ASSERT_EQUAL((UdpardTxItem*) chain.tail, chain.head->next_in_transfer); - TEST_ASSERT_EQUAL(NULL, chain.tail->next_in_transfer); - - // FIRST FRAME -- contains the payload only. - TEST_ASSERT_EQUAL(223574680, chain.head->deadline_usec); - TEST_ASSERT_EQUAL(55, chain.head->dscp); - TEST_ASSERT_EQUAL(0xBABADEDAU, chain.head->destination.ip_address); - TEST_ASSERT_EQUAL(0xD0ED, chain.head->destination.udp_port); - TEST_ASSERT_EQUAL(HEADER_SIZE_BYTES + mtu, chain.head->datagram_payload.size); - TEST_ASSERT_EQUAL(HEADER_SIZE_BYTES + InterstellarWarSize, chain.head->datagram_payload.size); - TEST_ASSERT_EQUAL(0, memcmp(makeHeader(meta, 0, false).data, chain.head->datagram_payload.data, HEADER_SIZE_BYTES)); - TEST_ASSERT_EQUAL(0, - memcmp(InterstellarWar, - (byte_t*) (chain.head->datagram_payload.data) + HEADER_SIZE_BYTES, - InterstellarWarSize)); - TEST_ASSERT_EQUAL(&user_transfer_referent, chain.head->user_transfer_reference); - - // SECOND FRAME -- contains the last byte of the CRC. - TEST_ASSERT_EQUAL(223574680, chain.tail->deadline_usec); - TEST_ASSERT_EQUAL(55, chain.tail->dscp); - TEST_ASSERT_EQUAL(0xBABADEDAU, chain.tail->destination.ip_address); - TEST_ASSERT_EQUAL(0xD0ED, chain.tail->destination.udp_port); - TEST_ASSERT_EQUAL(HEADER_SIZE_BYTES + 4U, chain.tail->datagram_payload.size); - TEST_ASSERT_EQUAL(0, memcmp(makeHeader(meta, 1, true).data, chain.tail->datagram_payload.data, HEADER_SIZE_BYTES)); - TEST_ASSERT_EQUAL(0, - memcmp(InterstellarWarCRC, - (byte_t*) (chain.tail->datagram_payload.data) + HEADER_SIZE_BYTES, - 4U)); - TEST_ASSERT_EQUAL(&user_transfer_referent, chain.tail->user_transfer_reference); - - // Clean up. - udpardTxFree(mem, chain.head); - udpardTxFree(mem, chain.tail); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); + instrumented_allocator_t alloc = { 0 }; + instrumented_allocator_new(&alloc); + udpard_tx_mem_resources_t mem = { .transfer = instrumented_allocator_make_resource(&alloc) }; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + mem.payload[i] = instrumented_allocator_make_resource(&alloc); + } + const uint16_t iface_bitmap_01 = (1U << 0U); + + // Ack reception triggers feedback. + feedback_state_t fstate = { 0 }; + udpard_tx_t tx1 = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new( + &tx1, + 10U, + 1U, + 8U, + mem, + &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); + TEST_ASSERT_TRUE(udpard_tx_push(&tx1, + 0, + 1000, + iface_bitmap_01, + udpard_prio_fast, + 21, + 42, + make_scattered(NULL, 0), + record_feedback, + make_user_context(&fstate))); + TEST_ASSERT_EQUAL_UINT32(1U << 0U, udpard_tx_pending_ifaces(&tx1)); + udpard_rx_t rx = { .tx = &tx1 }; + tx_receive_ack(&rx, 21, 42); + TEST_ASSERT_EQUAL_size_t(1, fstate.count); + TEST_ASSERT_EQUAL_UINT32(0U, udpard_tx_pending_ifaces(&tx1)); + // Ignore ACKs when RX has no TX. + rx.tx = NULL; + tx_receive_ack(&rx, 21, 42); + udpard_tx_free(&tx1); + + // Best-effort transfers ignore ACKs. + udpard_tx_t tx_be = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new( + &tx_be, + 10U, + 1U, + 8U, + mem, + &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); + TEST_ASSERT_TRUE(udpard_tx_push(&tx_be, + 0, + 1000, + iface_bitmap_01, + udpard_prio_fast, + 22, + 43, + make_scattered(NULL, 0), + NULL, + UDPARD_USER_CONTEXT_NULL)); + udpard_rx_t rx_be = { .tx = &tx_be }; + tx_receive_ack(&rx_be, 22, 43); + TEST_ASSERT_NOT_NULL(tx_transfer_find(&tx_be, 22, 43)); + udpard_tx_free(&tx_be); + + // Ack suppressed when coverage not improved. + udpard_tx_t tx2 = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new( + &tx2, + 11U, + 2U, + 4U, + mem, + &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); + tx_transfer_t prior; + mem_zero(sizeof(prior), &prior); + prior.p2p_destination[0] = make_ep(3); + prior.iface_bitmap = 1U; // matches p2p_destination[0] being valid + prior.remote_topic_hash = 7; + prior.remote_transfer_id = 8; + cavl2_find_or_insert(&tx2.index_transfer_ack, + &(tx_transfer_key_t){ .topic_hash = 7, .transfer_id = 8 }, + tx_cavl_compare_transfer_remote, + &prior.index_transfer_ack, + cavl2_trivial_factory); + rx.errors_ack_tx = 0; + rx.tx = &tx2; + tx_send_ack(&rx, 0, udpard_prio_fast, 7, 8, (udpard_remote_t){ .uid = 9, .endpoints = { make_ep(3) } }); + TEST_ASSERT_EQUAL_UINT64(0, rx.errors_ack_tx); + TEST_ASSERT_EQUAL_UINT32(0U, udpard_tx_pending_ifaces(&tx2)); + udpard_tx_free(&tx2); + + // Ack replaced with broader coverage. + udpard_tx_t tx3 = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new( + &tx3, + 12U, + 3U, + 4U, + mem, + &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); + rx.tx = &tx3; + tx_send_ack(&rx, 0, udpard_prio_fast, 9, 9, (udpard_remote_t){ .uid = 11, .endpoints = { make_ep(4) } }); + tx_send_ack( + &rx, 0, udpard_prio_fast, 9, 9, (udpard_remote_t){ .uid = 11, .endpoints = { make_ep(4), make_ep(5) } }); + TEST_ASSERT_NOT_EQUAL(0U, udpard_tx_pending_ifaces(&tx3)); + udpard_tx_free(&tx3); + + // Ack push failure with TX present. + udpard_tx_mem_resources_t fail_mem = { .transfer = { .vtable = &mem_vtable_noop_alloc, .context = NULL } }; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + fail_mem.payload[i] = fail_mem.transfer; + } + udpard_tx_t tx6 = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new( + &tx6, + 15U, + 6U, + 1U, + fail_mem, + &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); + rx.errors_ack_tx = 0; + rx.tx = &tx6; + tx_send_ack(&rx, 0, udpard_prio_fast, 2, 2, (udpard_remote_t){ .uid = 1, .endpoints = { make_ep(6) } }); + TEST_ASSERT_GREATER_THAN_UINT64(0, rx.errors_ack_tx); + udpard_tx_free(&tx6); + + // Ack push failure increments error. + udpard_rx_t rx_fail = { .tx = NULL }; + tx_send_ack(&rx_fail, 0, udpard_prio_fast, 1, 1, (udpard_remote_t){ 0 }); + TEST_ASSERT_GREATER_THAN_UINT64(0, rx_fail.errors_ack_tx); + + // Expired transfer purge with feedback. + udpard_tx_t tx4 = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new( + &tx4, + 13U, + 4U, + 4U, + mem, + &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); + tx4.errors_expiration = 0; + tx_transfer_t* exp = mem_alloc(mem.transfer, sizeof(tx_transfer_t)); + mem_zero(sizeof(*exp), exp); + exp->deadline = 1; + exp->priority = udpard_prio_slow; + exp->topic_hash = 55; + exp->transfer_id = 66; + exp->user = make_user_context(&fstate); + exp->reliable = true; + exp->feedback = record_feedback; + // Insert into deadline index with stable key. + const tx_time_key_t tx4_deadline_key = { .time = exp->deadline, + .topic_hash = exp->topic_hash, + .transfer_id = exp->transfer_id }; + (void)cavl2_find_or_insert( + &tx4.index_deadline, &tx4_deadline_key, tx_cavl_compare_deadline, &exp->index_deadline, cavl2_trivial_factory); + (void)cavl2_find_or_insert(&tx4.index_transfer, + &(tx_transfer_key_t){ .topic_hash = 55, .transfer_id = 66 }, + tx_cavl_compare_transfer, + &exp->index_transfer, + cavl2_trivial_factory); + tx_purge_expired_transfers(&tx4, 2); + TEST_ASSERT_GREATER_THAN_UINT64(0, tx4.errors_expiration); + udpard_tx_free(&tx4); + + // Staged promotion re-enqueues transfer. + udpard_tx_t tx5 = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new( + &tx5, + 14U, + 5U, + 4U, + mem, + &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); + tx_transfer_t staged; + mem_zero(sizeof(staged), &staged); + staged.staged_until = 0; + staged.deadline = 100; + staged.priority = udpard_prio_fast; + staged.iface_bitmap = (1U << 0U); + staged.p2p_destination[0] = make_ep(7); + tx_frame_t dummy_frame = { 0 }; + staged.head[0] = staged.cursor[0] = &dummy_frame; + // Insert into staged index with stable key. + const tx_time_key_t tx5_staged_key = { .time = staged.staged_until, + .topic_hash = staged.topic_hash, + .transfer_id = staged.transfer_id }; + cavl2_find_or_insert( + &tx5.index_staged, &tx5_staged_key, tx_cavl_compare_staged, &staged.index_staged, cavl2_trivial_factory); + tx5.ack_baseline_timeout = 1; + tx_promote_staged_transfers(&tx5, 1); + TEST_ASSERT_NOT_NULL(tx5.queue[0][staged.priority].head); + TEST_ASSERT_EQUAL_UINT32(1U << 0U, udpard_tx_pending_ifaces(&tx5)); + // Already-listed transfers stay in the queue. + tx_promote_staged_transfers(&tx5, 1000); + TEST_ASSERT_EQUAL_PTR(&staged.queue[0], tx5.queue[0][staged.priority].head); + + // Ejection stops when NIC refuses. + staged.cursor[0] = staged.head[0]; + staged.queue[0].next = NULL; + staged.queue[0].prev = NULL; + tx5.queue[0][staged.priority].head = &staged.queue[0]; + tx5.queue[0][staged.priority].tail = &staged.queue[0]; + eject_state_t eject_flag = { .count = 0, .allow = false }; + tx5.vtable = &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag }; + tx5.user = &eject_flag; + tx_eject_pending_frames(&tx5, 5, 0); + TEST_ASSERT_EQUAL_size_t(1, eject_flag.count); + udpard_tx_free(&tx5); + + instrumented_allocator_reset(&alloc); } -static void testPushPeekPopFree(void) +static void test_tx_stage_if(void) { - InstrumentedAllocator alloc; - instrumentedAllocatorNew(&alloc); - const struct UdpardTxMemoryResources mem = { - .fragment = instrumentedAllocatorMakeMemoryResource(&alloc), - .payload = instrumentedAllocatorMakeMemoryResource(&alloc), - }; - const UdpardNodeID node_id = 1234; - // - UdpardTx tx = { - .local_node_id = &node_id, - .queue_capacity = 3, - .mtu = (EtherealStrengthSize + 4U + 3U) / 3U, - .dscp_value_per_priority = {0, 1, 2, 3, 4, 5, 6, 7}, - .memory = mem, - .queue_size = 0, - .root = NULL, - }; - const TransferMetadata meta = { - .priority = UdpardPriorityNominal, - .src_node_id = 4321, - .dst_node_id = 5432, - .data_specifier = 7766, - .transfer_id = 0x0123456789ABCDEFULL, - }; - char user_transfer_referent = '\0'; - TEST_ASSERT_EQUAL(3, - txPush(&tx, - 1234567890U, - meta, - (UdpardUDPIPEndpoint) {.ip_address = 0xBABADEDAU, .udp_port = 0xD0ED}, - (UdpardPayload) {.size = EtherealStrengthSize, .data = EtherealStrength}, - &user_transfer_referent)); - TEST_ASSERT_EQUAL(3 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(3 * (sizeof(struct UdpardTxItem) + HEADER_SIZE_BYTES) + EtherealStrengthSize + 4U, - alloc.allocated_bytes); - TEST_ASSERT_EQUAL(3, tx.queue_size); - - UdpardTxItem* frame = udpardTxPeek(&tx); - TEST_ASSERT_NOT_EQUAL(NULL, frame); - TEST_ASSERT_NOT_EQUAL(NULL, frame->next_in_transfer); - TEST_ASSERT_EQUAL(1234567890U, frame->deadline_usec); - TEST_ASSERT_EQUAL(4, frame->dscp); - TEST_ASSERT_EQUAL(0xBABADEDAU, frame->destination.ip_address); - TEST_ASSERT_EQUAL(0xD0ED, frame->destination.udp_port); - TEST_ASSERT_EQUAL(HEADER_SIZE_BYTES + tx.mtu, frame->datagram_payload.size); - TEST_ASSERT_EQUAL(0, memcmp(makeHeader(meta, 0, false).data, frame->datagram_payload.data, HEADER_SIZE_BYTES)); - udpardTxFree(tx.memory, udpardTxPop(&tx, frame)); - - TEST_ASSERT_EQUAL(2 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(2, tx.queue_size); - - frame = udpardTxPeek(&tx); - TEST_ASSERT_NOT_EQUAL(NULL, frame); - TEST_ASSERT_NOT_EQUAL(NULL, frame->next_in_transfer); - TEST_ASSERT_EQUAL(1234567890U, frame->deadline_usec); - TEST_ASSERT_EQUAL(4, frame->dscp); - TEST_ASSERT_EQUAL(0xBABADEDAU, frame->destination.ip_address); - TEST_ASSERT_EQUAL(0xD0ED, frame->destination.udp_port); - TEST_ASSERT_EQUAL(HEADER_SIZE_BYTES + tx.mtu, frame->datagram_payload.size); - TEST_ASSERT_EQUAL(0, memcmp(makeHeader(meta, 1, false).data, frame->datagram_payload.data, HEADER_SIZE_BYTES)); - udpardTxFree(tx.memory, udpardTxPop(&tx, frame)); - - TEST_ASSERT_EQUAL(1 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(1, tx.queue_size); - - frame = udpardTxPeek(&tx); - TEST_ASSERT_NOT_EQUAL(NULL, frame); - TEST_ASSERT_EQUAL(NULL, frame->next_in_transfer); - TEST_ASSERT_EQUAL(1234567890U, frame->deadline_usec); - TEST_ASSERT_EQUAL(4, frame->dscp); - TEST_ASSERT_EQUAL(0xBABADEDAU, frame->destination.ip_address); - TEST_ASSERT_EQUAL(0xD0ED, frame->destination.udp_port); - TEST_ASSERT_EQUAL(HEADER_SIZE_BYTES + EtherealStrengthSize - 2 * tx.mtu + 4U, frame->datagram_payload.size); - TEST_ASSERT_EQUAL(0, memcmp(makeHeader(meta, 2, true).data, frame->datagram_payload.data, HEADER_SIZE_BYTES)); - udpardTxFree(tx.memory, udpardTxPop(&tx, frame)); - - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(0, tx.queue_size); - TEST_ASSERT_EQUAL(NULL, udpardTxPeek(&tx)); + // Exercises retransmission gating near deadline. + udpard_tx_t tx = { 0 }; + tx.ack_baseline_timeout = 10; + + tx_transfer_t tr; + mem_zero(sizeof(tr), &tr); + tr.priority = udpard_prio_nominal; + tr.deadline = 1000; + tr.staged_until = 100; + + udpard_us_t expected = tr.staged_until; + + tx_stage_if(&tx, &tr); + expected += tx_ack_timeout(tx.ack_baseline_timeout, tr.priority, 0); + TEST_ASSERT_EQUAL_UINT8(1, tr.epoch); + TEST_ASSERT_EQUAL(expected, tr.staged_until); + TEST_ASSERT_NOT_NULL(tx.index_staged); + cavl2_remove(&tx.index_staged, &tr.index_staged); + + tx_stage_if(&tx, &tr); + expected += tx_ack_timeout(tx.ack_baseline_timeout, tr.priority, 1); + TEST_ASSERT_EQUAL_UINT8(2, tr.epoch); + TEST_ASSERT_EQUAL(expected, tr.staged_until); + TEST_ASSERT_NOT_NULL(tx.index_staged); + cavl2_remove(&tx.index_staged, &tr.index_staged); + + tx_stage_if(&tx, &tr); + expected += tx_ack_timeout(tx.ack_baseline_timeout, tr.priority, 2); + TEST_ASSERT_EQUAL_UINT8(3, tr.epoch); + TEST_ASSERT_EQUAL(expected, tr.staged_until); + TEST_ASSERT_NULL(tx.index_staged); } -static void testPushPrioritization(void) +static void test_tx_stage_if_via_tx_push(void) { - InstrumentedAllocator alloc; - instrumentedAllocatorNew(&alloc); - const struct UdpardTxMemoryResources mem = { - .fragment = instrumentedAllocatorMakeMemoryResource(&alloc), - .payload = instrumentedAllocatorMakeMemoryResource(&alloc), - }; - const UdpardNodeID node_id = 1234; - // - UdpardTx tx = { - .local_node_id = &node_id, - .queue_capacity = 7, - .mtu = 140, // This is chosen to match the test data. - .dscp_value_per_priority = {0, 1, 2, 3, 4, 5, 6, 7}, - .memory = mem, - .queue_size = 0, - .root = NULL, - }; - // A -- Push the first multi-frame transfer at nominal priority level. - const TransferMetadata meta_a = { - .priority = UdpardPriorityNominal, - .src_node_id = 100, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 200, - .transfer_id = 5000, - }; - TEST_ASSERT_EQUAL(3, - txPush(&tx, - 0, - meta_a, - (UdpardUDPIPEndpoint) {.ip_address = 0xAAAAAAAA, .udp_port = 0xAAAA}, - (UdpardPayload) {.size = EtherealStrengthSize, .data = EtherealStrength}, - NULL)); - TEST_ASSERT_EQUAL(3 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(3, tx.queue_size); - UdpardTxItem* frame = udpardTxPeek(&tx); - TEST_ASSERT_NOT_EQUAL(NULL, frame); - TEST_ASSERT_EQUAL(0xAAAAAAAA, frame->destination.ip_address); - - // B -- Next, push a higher-priority transfer and ensure it takes precedence. - TEST_ASSERT_EQUAL(1, - txPush(&tx, - 0, - (TransferMetadata) { - .priority = UdpardPriorityHigh, - .src_node_id = 100, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 200, - .transfer_id = 100000, - }, - (UdpardUDPIPEndpoint) {.ip_address = 0xBBBBBBBB, .udp_port = 0xBBBB}, - (UdpardPayload) {.size = DetailOfTheCosmosSize, .data = DetailOfTheCosmos}, - NULL)); - TEST_ASSERT_EQUAL(4 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(4, tx.queue_size); - frame = udpardTxPeek(&tx); - TEST_ASSERT_NOT_EQUAL(NULL, frame); - TEST_ASSERT_EQUAL(0xBBBBBBBB, frame->destination.ip_address); - - // C -- Next, push a lower-priority transfer and ensure it goes towards the back. - TEST_ASSERT_EQUAL(1, - txPush(&tx, - 1002, - (TransferMetadata) { - .priority = UdpardPriorityLow, - .src_node_id = 100, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 200, - .transfer_id = 10000, - }, - (UdpardUDPIPEndpoint) {.ip_address = 0xCCCCCCCC, .udp_port = 0xCCCC}, - (UdpardPayload) {.size = InterstellarWarSize, .data = InterstellarWar}, - NULL)); - TEST_ASSERT_EQUAL(5 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(5, tx.queue_size); - frame = udpardTxPeek(&tx); - TEST_ASSERT_NOT_EQUAL(NULL, frame); - TEST_ASSERT_EQUAL(0xBBBBBBBB, frame->destination.ip_address); - - // D -- Add another transfer like the previous one and ensure it goes in the back. - TEST_ASSERT_EQUAL(1, - txPush(&tx, - 1003, - (TransferMetadata) { - .priority = UdpardPriorityLow, - .src_node_id = 100, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 200, - .transfer_id = 10001, - }, - (UdpardUDPIPEndpoint) {.ip_address = 0xDDDDDDDD, .udp_port = 0xDDDD}, - (UdpardPayload) {.size = InterstellarWarSize, .data = InterstellarWar}, - NULL)); - TEST_ASSERT_EQUAL(6 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(6, tx.queue_size); - frame = udpardTxPeek(&tx); - TEST_ASSERT_NOT_EQUAL(NULL, frame); - TEST_ASSERT_EQUAL(0xBBBBBBBB, frame->destination.ip_address); - - // E -- Add an even higher priority transfer. - TEST_ASSERT_EQUAL(1, - txPush(&tx, - 1003, - (TransferMetadata) { - .priority = UdpardPriorityFast, - .src_node_id = 100, - .dst_node_id = UDPARD_NODE_ID_UNSET, - .data_specifier = 200, - .transfer_id = 1000, - }, - (UdpardUDPIPEndpoint) {.ip_address = 0xEEEEEEEE, .udp_port = 0xEEEE}, - (UdpardPayload) {.size = InterstellarWarSize, .data = InterstellarWar}, - NULL)); - TEST_ASSERT_EQUAL(7 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(7, tx.queue_size); - frame = udpardTxPeek(&tx); - TEST_ASSERT_NOT_EQUAL(NULL, frame); - TEST_ASSERT_EQUAL(0xEEEEEEEE, frame->destination.ip_address); - - // Now, unwind the queue and ensure the frames are popped in the right order. - // E - udpardTxFree(tx.memory, udpardTxPop(&tx, frame)); - TEST_ASSERT_EQUAL(6 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(6, tx.queue_size); - // B - frame = udpardTxPeek(&tx); - TEST_ASSERT_NOT_EQUAL(NULL, frame); - TEST_ASSERT_EQUAL(0xBBBBBBBB, frame->destination.ip_address); - udpardTxFree(tx.memory, udpardTxPop(&tx, frame)); - TEST_ASSERT_EQUAL(5 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(5, tx.queue_size); - // A1, three frames. - frame = udpardTxPeek(&tx); - TEST_ASSERT_NOT_EQUAL(NULL, frame); - TEST_ASSERT_EQUAL(0xAAAAAAAA, frame->destination.ip_address); - TEST_ASSERT_EQUAL(0, memcmp(makeHeader(meta_a, 0, false).data, frame->datagram_payload.data, HEADER_SIZE_BYTES)); - udpardTxFree(tx.memory, udpardTxPop(&tx, frame)); - TEST_ASSERT_EQUAL(4 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(4, tx.queue_size); - // A2 - frame = udpardTxPeek(&tx); - TEST_ASSERT_NOT_EQUAL(NULL, frame); - TEST_ASSERT_EQUAL(0xAAAAAAAA, frame->destination.ip_address); - TEST_ASSERT_EQUAL(0, memcmp(makeHeader(meta_a, 1, false).data, frame->datagram_payload.data, HEADER_SIZE_BYTES)); - udpardTxFree(tx.memory, udpardTxPop(&tx, frame)); - TEST_ASSERT_EQUAL(3 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(3, tx.queue_size); - // A3 - frame = udpardTxPeek(&tx); - TEST_ASSERT_NOT_EQUAL(NULL, frame); - TEST_ASSERT_EQUAL(0xAAAAAAAA, frame->destination.ip_address); - TEST_ASSERT_EQUAL(0, memcmp(makeHeader(meta_a, 2, true).data, frame->datagram_payload.data, HEADER_SIZE_BYTES)); - udpardTxFree(tx.memory, udpardTxPop(&tx, frame)); - TEST_ASSERT_EQUAL(2 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(2, tx.queue_size); - // C - frame = udpardTxPeek(&tx); - TEST_ASSERT_NOT_EQUAL(NULL, frame); - TEST_ASSERT_EQUAL(0xCCCCCCCC, frame->destination.ip_address); - udpardTxFree(tx.memory, udpardTxPop(&tx, frame)); - TEST_ASSERT_EQUAL(1 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(1, tx.queue_size); - // D - frame = udpardTxPeek(&tx); - TEST_ASSERT_NOT_EQUAL(NULL, frame); - TEST_ASSERT_EQUAL(0xDDDDDDDD, frame->destination.ip_address); - udpardTxFree(tx.memory, udpardTxPop(&tx, frame)); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(0, tx.queue_size); - - TEST_ASSERT_EQUAL(NULL, udpardTxPeek(&tx)); + // Tracks retransmission times via the scheduler. + instrumented_allocator_t alloc = { 0 }; + instrumented_allocator_new(&alloc); + udpard_tx_mem_resources_t mem = { .transfer = instrumented_allocator_make_resource(&alloc) }; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + mem.payload[i] = instrumented_allocator_make_resource(&alloc); + } + + udpard_tx_t tx = { 0 }; + eject_log_t log = { 0 }; + feedback_state_t fb = { 0 }; + udpard_tx_vtable_t vt = { .eject_subject = eject_subject_with_log, .eject_p2p = eject_p2p_with_log }; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 30U, 1U, 4U, mem, &vt)); + tx.user = &log; + tx.ack_baseline_timeout = 10; + const uint16_t iface_bitmap_12 = (1U << 0U) | (1U << 1U); + + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + 0, + 500, + iface_bitmap_12, + udpard_prio_nominal, + 77, + 1, + make_scattered(NULL, 0), + record_feedback, + make_user_context(&fb))); + TEST_ASSERT_EQUAL_UINT32(iface_bitmap_12, udpard_tx_pending_ifaces(&tx)); + + udpard_tx_poll(&tx, 0, UDPARD_IFACE_BITMAP_ALL); + udpard_tx_poll(&tx, 160, UDPARD_IFACE_BITMAP_ALL); + udpard_tx_poll(&tx, 400, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_EQUAL_UINT32(0U, udpard_tx_pending_ifaces(&tx)); + + TEST_ASSERT_EQUAL_size_t(4, log.count); + TEST_ASSERT_EQUAL(0, log.when[0]); + TEST_ASSERT_EQUAL(0, log.when[1]); + TEST_ASSERT_EQUAL(160, log.when[2]); + TEST_ASSERT_EQUAL(160, log.when[3]); + TEST_ASSERT_NULL(tx.index_staged); + udpard_tx_free(&tx); + instrumented_allocator_reset(&alloc); } -static void testPushCapacityLimit(void) +static void test_tx_stage_if_short_deadline(void) { - InstrumentedAllocator alloc; - instrumentedAllocatorNew(&alloc); - const struct UdpardTxMemoryResources mem = { - .fragment = instrumentedAllocatorMakeMemoryResource(&alloc), - .payload = instrumentedAllocatorMakeMemoryResource(&alloc), - }; - const UdpardNodeID node_id = 1234; - // - UdpardTx tx = { - .local_node_id = &node_id, - .queue_capacity = 2, - .mtu = 10U, - .dscp_value_per_priority = {0, 1, 2, 3, 4, 5, 6, 7}, - .memory = mem, - .queue_size = 0, - .root = NULL, - }; - const TransferMetadata meta = { - .priority = UdpardPriorityNominal, - .src_node_id = 4321, - .dst_node_id = 5432, - .data_specifier = 7766, - .transfer_id = 0x0123456789ABCDEFULL, - }; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_CAPACITY, - txPush(&tx, - 1234567890U, - meta, - (UdpardUDPIPEndpoint) {.ip_address = 0xBABADEDAU, .udp_port = 0xD0ED}, - (UdpardPayload) {.size = EtherealStrengthSize, .data = EtherealStrength}, - NULL)); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc.allocated_bytes); - TEST_ASSERT_EQUAL(0, tx.queue_size); + // Ensures retransmission is skipped when deadline is too close. + instrumented_allocator_t alloc = { 0 }; + instrumented_allocator_new(&alloc); + udpard_tx_mem_resources_t mem = { .transfer = instrumented_allocator_make_resource(&alloc) }; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + mem.payload[i] = instrumented_allocator_make_resource(&alloc); + } + + udpard_tx_t tx = { 0 }; + eject_log_t log = { 0 }; + feedback_state_t fb = { 0 }; + udpard_tx_vtable_t vt = { .eject_subject = eject_subject_with_log, .eject_p2p = eject_p2p_with_log }; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 31U, 1U, 4U, mem, &vt)); + tx.user = &log; + tx.ack_baseline_timeout = 10; + const uint16_t iface_bitmap_1 = (1U << 0U); + + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + 0, + 50, + iface_bitmap_1, + udpard_prio_nominal, + 78, + 1, + make_scattered(NULL, 0), + record_feedback, + make_user_context(&fb))); + + udpard_tx_poll(&tx, 0, UDPARD_IFACE_BITMAP_ALL); + udpard_tx_poll(&tx, 30, UDPARD_IFACE_BITMAP_ALL); + udpard_tx_poll(&tx, 60, UDPARD_IFACE_BITMAP_ALL); + + TEST_ASSERT_EQUAL_size_t(1, log.count); + TEST_ASSERT_EQUAL(0, log.when[0]); + udpard_tx_free(&tx); + instrumented_allocator_reset(&alloc); } -static void testPushOOM(void) +// Cancels transfers and reports outcome. +static void test_tx_cancel(void) { - InstrumentedAllocator alloc; - instrumentedAllocatorNew(&alloc); - const struct UdpardTxMemoryResources mem = { - .fragment = instrumentedAllocatorMakeMemoryResource(&alloc), - .payload = instrumentedAllocatorMakeMemoryResource(&alloc), - }; - const UdpardNodeID node_id = 1234; - // - UdpardTx tx = { - .local_node_id = &node_id, - .queue_capacity = 10000U, - .mtu = (EtherealStrengthSize + 4U + 3U) / 3U, - .dscp_value_per_priority = {0, 1, 2, 3, 4, 5, 6, 7}, - .memory = mem, - .queue_size = 0, - .root = NULL, - }; - const TransferMetadata meta = { - .priority = UdpardPriorityNominal, - .src_node_id = 4321, - .dst_node_id = 5432, - .data_specifier = 7766, - .transfer_id = 0x0123456789ABCDEFULL, - }; - alloc.limit_bytes = EtherealStrengthSize; // No memory for the overheads. - TEST_ASSERT_EQUAL(-UDPARD_ERROR_MEMORY, - txPush(&tx, - 1234567890U, - meta, - (UdpardUDPIPEndpoint) {.ip_address = 0xBABADEDAU, .udp_port = 0xD0ED}, - (UdpardPayload) {.size = EtherealStrengthSize, .data = EtherealStrength}, - NULL)); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc.allocated_bytes); - TEST_ASSERT_EQUAL(0, tx.queue_size); + TEST_ASSERT_FALSE(udpard_tx_cancel(NULL, 0, 0)); + + instrumented_allocator_t alloc = { 0 }; + instrumented_allocator_new(&alloc); + udpard_tx_mem_resources_t mem = { .transfer = instrumented_allocator_make_resource(&alloc) }; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + mem.payload[i] = instrumented_allocator_make_resource(&alloc); + } + + udpard_tx_t tx = { 0 }; + feedback_state_t fstate = { 0 }; + const uint16_t iface_bitmap_1 = (1U << 0U); + udpard_tx_vtable_t vt = { .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag }; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 20U, 1U, 8U, mem, &vt)); + + // Reliable transfer cancels with failure feedback. + TEST_ASSERT_GREATER_THAN_UINT32(0, + udpard_tx_push(&tx, + 0, + 100, + iface_bitmap_1, + udpard_prio_fast, + 200, + 1, + make_scattered(NULL, 0), + record_feedback, + make_user_context(&fstate))); + TEST_ASSERT_NOT_NULL(tx_transfer_find(&tx, 200, 1)); + TEST_ASSERT_TRUE(udpard_tx_cancel(&tx, 200, 1)); + TEST_ASSERT_NULL(tx_transfer_find(&tx, 200, 1)); + TEST_ASSERT_EQUAL_size_t(1, fstate.count); + TEST_ASSERT_EQUAL_UINT32(0, fstate.last.acknowledgements); + TEST_ASSERT_EQUAL_size_t(0, tx.enqueued_frames_count); + TEST_ASSERT_FALSE(udpard_tx_cancel(&tx, 200, 1)); + + // Best-effort transfer cancels quietly. + TEST_ASSERT_GREATER_THAN_UINT32(0, + udpard_tx_push(&tx, + 0, + 100, + iface_bitmap_1, + udpard_prio_fast, + 201, + 2, + make_scattered(NULL, 0), + NULL, + UDPARD_USER_CONTEXT_NULL)); + TEST_ASSERT_TRUE(udpard_tx_cancel(&tx, 201, 2)); + TEST_ASSERT_EQUAL_size_t(0, tx.enqueued_frames_count); + + udpard_tx_free(&tx); + instrumented_allocator_reset(&alloc); } -static void testPushPayloadOOM(void) +// Cancels all transfers matching a topic hash. +static void test_tx_cancel_all(void) { - InstrumentedAllocator alloc; - instrumentedAllocatorNew(&alloc); - const struct UdpardTxMemoryResources mem = { - .fragment = instrumentedAllocatorMakeMemoryResource(&alloc), - .payload = instrumentedAllocatorMakeMemoryResource(&alloc), - }; - const UdpardNodeID node_id = 1234; - // - UdpardTx tx = { - .local_node_id = &node_id, - .queue_capacity = 10000U, - .mtu = EtherealStrengthSize + HEADER_CRC_SIZE_BYTES, - .dscp_value_per_priority = {0, 1, 2, 3, 4, 5, 6, 7}, - .memory = mem, - .queue_size = 0, - .root = NULL, - }; - const TransferMetadata meta = { - .priority = UdpardPriorityNominal, - .src_node_id = 4321, - .dst_node_id = 5432, - .data_specifier = 7766, - .transfer_id = 0x0123456789ABCDEFULL, - }; - // There is memory of the item, but 1 byte short for payload. - alloc.limit_bytes = sizeof(UdpardTxItem) + (HEADER_SIZE_BYTES + EtherealStrengthSize + HEADER_CRC_SIZE_BYTES - 1); - TEST_ASSERT_EQUAL(-UDPARD_ERROR_MEMORY, - txPush(&tx, - 1234567890U, - meta, - (UdpardUDPIPEndpoint) {.ip_address = 0xBABADEDAU, .udp_port = 0xD0ED}, - (UdpardPayload) {.size = EtherealStrengthSize, .data = EtherealStrength}, - NULL)); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc.allocated_bytes); - TEST_ASSERT_EQUAL(0, tx.queue_size); + // NULL self returns zero. + TEST_ASSERT_EQUAL_size_t(0, udpard_tx_cancel_all(NULL, 0)); + + instrumented_allocator_t alloc = { 0 }; + instrumented_allocator_new(&alloc); + udpard_tx_mem_resources_t mem = { .transfer = instrumented_allocator_make_resource(&alloc) }; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + mem.payload[i] = instrumented_allocator_make_resource(&alloc); + } + + udpard_tx_t tx = { 0 }; + feedback_state_t fstate = { 0 }; + eject_state_t eject = { .count = 0, .allow = false }; // Block ejection to retain frames. + const uint16_t iface_bitmap_1 = (1U << 0U); + udpard_tx_vtable_t vt = { .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag }; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 40U, 1U, 16U, mem, &vt)); + tx.user = &eject; + + // Cancel with no matching transfers returns zero. + TEST_ASSERT_EQUAL_size_t(0, udpard_tx_cancel_all(&tx, 999)); + + // Push multiple transfers with different topic hashes. + // Topic 100: transfers 1, 2, 3 (reliable) + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + 0, + 1000, + iface_bitmap_1, + udpard_prio_fast, + 100, + 1, + make_scattered(NULL, 0), + record_feedback, + make_user_context(&fstate))); + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + 0, + 1000, + iface_bitmap_1, + udpard_prio_fast, + 100, + 2, + make_scattered(NULL, 0), + record_feedback, + make_user_context(&fstate))); + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + 0, + 1000, + iface_bitmap_1, + udpard_prio_fast, + 100, + 3, + make_scattered(NULL, 0), + record_feedback, + make_user_context(&fstate))); + // Topic 200: transfers 1, 2 (best-effort) + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + 0, + 1000, + iface_bitmap_1, + udpard_prio_nominal, + 200, + 1, + make_scattered(NULL, 0), + NULL, + UDPARD_USER_CONTEXT_NULL)); + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + 0, + 1000, + iface_bitmap_1, + udpard_prio_nominal, + 200, + 2, + make_scattered(NULL, 0), + NULL, + UDPARD_USER_CONTEXT_NULL)); + // Topic 300: transfer 1 (reliable) + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + 0, + 1000, + iface_bitmap_1, + udpard_prio_low, + 300, + 1, + make_scattered(NULL, 0), + record_feedback, + make_user_context(&fstate))); + + TEST_ASSERT_EQUAL_size_t(6, tx.enqueued_frames_count); + TEST_ASSERT_NOT_NULL(tx_transfer_find(&tx, 100, 1)); + TEST_ASSERT_NOT_NULL(tx_transfer_find(&tx, 100, 2)); + TEST_ASSERT_NOT_NULL(tx_transfer_find(&tx, 100, 3)); + TEST_ASSERT_NOT_NULL(tx_transfer_find(&tx, 200, 1)); + TEST_ASSERT_NOT_NULL(tx_transfer_find(&tx, 200, 2)); + TEST_ASSERT_NOT_NULL(tx_transfer_find(&tx, 300, 1)); + + // Cancel all topic 100 transfers; feedback invoked for each reliable transfer. + fstate.count = 0; + TEST_ASSERT_EQUAL_size_t(3, udpard_tx_cancel_all(&tx, 100)); + TEST_ASSERT_EQUAL_size_t(3, fstate.count); + TEST_ASSERT_EQUAL_UINT32(0, fstate.last.acknowledgements); + TEST_ASSERT_NULL(tx_transfer_find(&tx, 100, 1)); + TEST_ASSERT_NULL(tx_transfer_find(&tx, 100, 2)); + TEST_ASSERT_NULL(tx_transfer_find(&tx, 100, 3)); + // Other topics remain. + TEST_ASSERT_NOT_NULL(tx_transfer_find(&tx, 200, 1)); + TEST_ASSERT_NOT_NULL(tx_transfer_find(&tx, 200, 2)); + TEST_ASSERT_NOT_NULL(tx_transfer_find(&tx, 300, 1)); + TEST_ASSERT_EQUAL_size_t(3, tx.enqueued_frames_count); + + // Cancel topic 200 (best-effort, no feedback). + fstate.count = 0; + TEST_ASSERT_EQUAL_size_t(2, udpard_tx_cancel_all(&tx, 200)); + TEST_ASSERT_EQUAL_size_t(0, fstate.count); + TEST_ASSERT_NULL(tx_transfer_find(&tx, 200, 1)); + TEST_ASSERT_NULL(tx_transfer_find(&tx, 200, 2)); + TEST_ASSERT_NOT_NULL(tx_transfer_find(&tx, 300, 1)); + TEST_ASSERT_EQUAL_size_t(1, tx.enqueued_frames_count); + + // Cancel already-cancelled topic returns zero. + TEST_ASSERT_EQUAL_size_t(0, udpard_tx_cancel_all(&tx, 100)); + + // Cancel last remaining topic. + fstate.count = 0; + TEST_ASSERT_EQUAL_size_t(1, udpard_tx_cancel_all(&tx, 300)); + TEST_ASSERT_EQUAL_size_t(1, fstate.count); + TEST_ASSERT_NULL(tx_transfer_find(&tx, 300, 1)); + TEST_ASSERT_EQUAL_size_t(0, tx.enqueued_frames_count); + + udpard_tx_free(&tx); + instrumented_allocator_reset(&alloc); } -static void testPushAnonymousMultiFrame(void) +static void test_tx_spool_deduplication(void) { - InstrumentedAllocator alloc; - instrumentedAllocatorNew(&alloc); - const struct UdpardTxMemoryResources mem = { - .fragment = instrumentedAllocatorMakeMemoryResource(&alloc), - .payload = instrumentedAllocatorMakeMemoryResource(&alloc), - }; - const UdpardNodeID node_id = 0xFFFFU; - // - UdpardTx tx = { - .local_node_id = &node_id, - .queue_capacity = 10000U, - .mtu = (EtherealStrengthSize + 4U + 3U) / 3U, - .dscp_value_per_priority = {0, 1, 2, 3, 4, 5, 6, 7}, - .memory = mem, - .queue_size = 0, - .root = NULL, - }; - const TransferMetadata meta = { - .priority = UdpardPriorityNominal, - .src_node_id = 4321, - .dst_node_id = 5432, - .data_specifier = 7766, - .transfer_id = 0x0123456789ABCDEFULL, - }; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ANONYMOUS, - txPush(&tx, - 1234567890U, - meta, - (UdpardUDPIPEndpoint) {.ip_address = 0xBABADEDAU, .udp_port = 0xD0ED}, - (UdpardPayload) {.size = EtherealStrengthSize, .data = EtherealStrength}, - NULL)); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc.allocated_bytes); - TEST_ASSERT_EQUAL(0, tx.queue_size); + instrumented_allocator_t alloc_a = { 0 }; + instrumented_allocator_t alloc_b = { 0 }; + instrumented_allocator_new(&alloc_a); + instrumented_allocator_new(&alloc_b); + udpard_tx_mem_resources_t mem = { .transfer = instrumented_allocator_make_resource(&alloc_a) }; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + mem.payload[i] = instrumented_allocator_make_resource(&alloc_a); + } + + // Dedup when MTU and allocator match (multi-frame). + udpard_tx_t tx = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new( + &tx, + 99U, + 1U, + 16U, + mem, + &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); + tx.mtu[0] = 600; + tx.mtu[1] = 600; + const uint16_t iface_bitmap_12 = (1U << 0U) | (1U << 1U); + byte_t payload_big[1300] = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + 0, + 1000, + iface_bitmap_12, + udpard_prio_nominal, + 1, + 1, + make_scattered(payload_big, sizeof(payload_big)), + NULL, + UDPARD_USER_CONTEXT_NULL)); + tx_transfer_t* tr = latest_transfer(&tx); + TEST_ASSERT_EQUAL_size_t(frames_for(tx.mtu[0], sizeof(payload_big)), tx.enqueued_frames_count); + TEST_ASSERT_EQUAL_PTR(tr->head[0], tr->head[1]); + for (tx_frame_t* f = tr->head[0]; f != NULL; f = f->next) { + TEST_ASSERT_EQUAL_size_t(2, f->refcount); + } + udpard_tx_free(&tx); + + // Dedup when payload fits both MTU despite mismatch. + TEST_ASSERT_TRUE(udpard_tx_new( + &tx, + 99U, + 1U, + 8U, + mem, + &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); + tx.mtu[0] = 500; + tx.mtu[1] = 900; + byte_t payload_small[300] = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + 0, + 1000, + iface_bitmap_12, + udpard_prio_nominal, + 2, + 2, + make_scattered(payload_small, sizeof(payload_small)), + NULL, + UDPARD_USER_CONTEXT_NULL)); + tr = latest_transfer(&tx); + TEST_ASSERT_EQUAL_size_t(1, tx.enqueued_frames_count); + TEST_ASSERT_EQUAL_PTR(tr->head[0], tr->head[1]); + TEST_ASSERT_EQUAL_size_t(2, tr->head[0]->refcount); + udpard_tx_free(&tx); + + // No dedup when MTU differs and payload exceeds the smaller MTU. + TEST_ASSERT_TRUE(udpard_tx_new( + &tx, + 99U, + 1U, + 8U, + mem, + &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); + tx.mtu[0] = 500; + tx.mtu[1] = 900; + byte_t payload_split[800] = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + 0, + 1000, + iface_bitmap_12, + udpard_prio_nominal, + 3, + 3, + make_scattered(payload_split, sizeof(payload_split)), + NULL, + UDPARD_USER_CONTEXT_NULL)); + tr = latest_transfer(&tx); + TEST_ASSERT_EQUAL_size_t(frames_for(tx.mtu[0], sizeof(payload_split)) + + frames_for(tx.mtu[1], sizeof(payload_split)), + tx.enqueued_frames_count); + TEST_ASSERT_TRUE(tr->head[0] != tr->head[1]); + TEST_ASSERT_EQUAL_size_t(1, tr->head[0]->refcount); + TEST_ASSERT_EQUAL_size_t(1, tr->head[1]->refcount); + udpard_tx_free(&tx); + + // No dedup when allocators differ even with matching MTU and single frame. + udpard_tx_mem_resources_t mem_split = { .transfer = instrumented_allocator_make_resource(&alloc_a) }; + mem_split.payload[0] = instrumented_allocator_make_resource(&alloc_a); + mem_split.payload[1] = instrumented_allocator_make_resource(&alloc_b); + mem_split.payload[2] = mem_split.payload[0]; + TEST_ASSERT_TRUE(udpard_tx_new( + &tx, + 99U, + 1U, + 8U, + mem_split, + &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); + tx.mtu[0] = 600; + tx.mtu[1] = 600; + byte_t payload_one[400] = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + 0, + 1000, + iface_bitmap_12, + udpard_prio_nominal, + 4, + 4, + make_scattered(payload_one, sizeof(payload_one)), + NULL, + UDPARD_USER_CONTEXT_NULL)); + tr = latest_transfer(&tx); + TEST_ASSERT_EQUAL_size_t(2, tx.enqueued_frames_count); + TEST_ASSERT_TRUE(tr->head[0] != tr->head[1]); + udpard_tx_free(&tx); + + TEST_ASSERT_EQUAL_size_t(0, alloc_a.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_b.allocated_fragments); } -static void testPushAnonymousService(void) +// Verifies that eject callbacks are ONLY invoked from udpard_tx_poll(), never from push functions. +static void test_tx_eject_only_from_poll(void) { - InstrumentedAllocator alloc; - instrumentedAllocatorNew(&alloc); - const struct UdpardTxMemoryResources mem = { - .fragment = instrumentedAllocatorMakeMemoryResource(&alloc), - .payload = instrumentedAllocatorMakeMemoryResource(&alloc), - }; - const UdpardNodeID node_id = 0xFFFFU; - // - UdpardTx tx = { - .local_node_id = &node_id, - .queue_capacity = 10000, - .mtu = 1500, - .dscp_value_per_priority = {0, 1, 2, 3, 4, 5, 6, 7}, - .memory = mem, - .queue_size = 0, - .root = NULL, - }; - const TransferMetadata meta = { - .priority = UdpardPriorityNominal, - .src_node_id = 4321, - .dst_node_id = 5432, - .data_specifier = 0x8099U, // Service response. - .transfer_id = 0x0123456789ABCDEFULL, - }; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ANONYMOUS, - txPush(&tx, - 1234567890U, - meta, - (UdpardUDPIPEndpoint) {.ip_address = 0xBABADEDAU, .udp_port = 0xD0ED}, - (UdpardPayload) {.size = EtherealStrengthSize, .data = EtherealStrength}, - NULL)); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc.allocated_bytes); - TEST_ASSERT_EQUAL(0, tx.queue_size); + instrumented_allocator_t alloc = { 0 }; + instrumented_allocator_new(&alloc); + udpard_tx_mem_resources_t mem = { .transfer = instrumented_allocator_make_resource(&alloc) }; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + mem.payload[i] = instrumented_allocator_make_resource(&alloc); + } + + udpard_tx_t tx = { 0 }; + eject_state_t eject = { .count = 0, .allow = true }; + udpard_tx_vtable_t vt = { .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag }; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 60U, 1U, 16U, mem, &vt)); + tx.user = &eject; + + const uint16_t iface_bitmap_1 = (1U << 0U); + + // Push a subject transfer; eject must NOT be called. + eject.count = 0; + TEST_ASSERT_TRUE(udpard_tx_push( + &tx, 0, 1000, iface_bitmap_1, udpard_prio_fast, 100, 1, make_scattered(NULL, 0), NULL, UDPARD_USER_CONTEXT_NULL)); + TEST_ASSERT_EQUAL_size_t(0, eject.count); // eject NOT called from push + + // Push a P2P transfer; eject must NOT be called. + const udpard_remote_t remote = { .uid = 999, .endpoints = { make_ep(10) } }; + TEST_ASSERT_TRUE(udpard_tx_push_p2p( + &tx, 0, 1000, udpard_prio_fast, remote, make_scattered(NULL, 0), NULL, UDPARD_USER_CONTEXT_NULL, NULL)); + TEST_ASSERT_EQUAL_size_t(0, eject.count); // eject NOT called from push_p2p + + // Now poll; eject MUST be called. + udpard_tx_poll(&tx, 0, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_GREATER_THAN_size_t(0, eject.count); // eject called from poll + + // Push more transfers while frames are pending; eject still must NOT be called. + const size_t eject_count_before = eject.count; + eject.allow = false; // block ejection to keep frames pending + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + 0, + 1000, + iface_bitmap_1, + udpard_prio_nominal, + 200, + 2, + make_scattered(NULL, 0), + NULL, + UDPARD_USER_CONTEXT_NULL)); + TEST_ASSERT_EQUAL_size_t(eject_count_before, eject.count); // eject NOT called from push + + TEST_ASSERT_TRUE(udpard_tx_push_p2p( + &tx, 0, 1000, udpard_prio_nominal, remote, make_scattered(NULL, 0), NULL, UDPARD_USER_CONTEXT_NULL, NULL)); + TEST_ASSERT_EQUAL_size_t(eject_count_before, eject.count); // eject NOT called from push_p2p + + // Poll again; eject called again (but rejected by callback). + udpard_tx_poll(&tx, 0, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_GREATER_THAN_size_t(eject_count_before, eject.count); // eject called from poll + + udpard_tx_free(&tx); + instrumented_allocator_reset(&alloc); } void setUp(void) {} @@ -1061,21 +1208,18 @@ void tearDown(void) {} int main(void) { UNITY_BEGIN(); - RUN_TEST(testTxSerializeHeader); - RUN_TEST(testMakeChainEmpty); - RUN_TEST(testMakeChainSingleMaxMTU); - RUN_TEST(testMakeChainSingleFrameDefaultMTU); - RUN_TEST(testMakeChainThreeFrames); - RUN_TEST(testMakeChainCRCSpill1); - RUN_TEST(testMakeChainCRCSpill2); - RUN_TEST(testMakeChainCRCSpill3); - RUN_TEST(testMakeChainCRCSpillFull); - RUN_TEST(testPushPeekPopFree); - RUN_TEST(testPushPrioritization); - RUN_TEST(testPushCapacityLimit); - RUN_TEST(testPushOOM); - RUN_TEST(testPushPayloadOOM); - RUN_TEST(testPushAnonymousMultiFrame); - RUN_TEST(testPushAnonymousService); + RUN_TEST(test_bytes_scattered_read); + RUN_TEST(test_tx_serialize_header); + RUN_TEST(test_tx_validation_and_free); + RUN_TEST(test_tx_comparators_and_feedback); + RUN_TEST(test_tx_spool_and_queue_errors); + RUN_TEST(test_tx_stage_if); + RUN_TEST(test_tx_stage_if_via_tx_push); + RUN_TEST(test_tx_stage_if_short_deadline); + RUN_TEST(test_tx_cancel); + RUN_TEST(test_tx_cancel_all); + RUN_TEST(test_tx_spool_deduplication); + RUN_TEST(test_tx_eject_only_from_poll); + RUN_TEST(test_tx_ack_and_scheduler); return UNITY_END(); } diff --git a/tests/src/test_misc.cpp b/tests/src/test_misc.cpp deleted file mode 100644 index 1830a16..0000000 --- a/tests/src/test_misc.cpp +++ /dev/null @@ -1,83 +0,0 @@ -/// This software is distributed under the terms of the MIT License. -/// Copyright (C) OpenCyphal Development Team -/// Copyright Amazon.com Inc. or its affiliates. -/// SPDX-License-Identifier: MIT - -#include -#include "helpers.h" -#include "hexdump.hpp" -#include -#include -#include -#include - -namespace -{ -void testGather() -{ - const std::string_view payload = - "It's very simple. The attacker must first transform themselves into life forms that can survive in a " - "low-dimensional universe. For instance, a four-dimensional species can transform itself into " - "three-dimensional creatures, or a three-dimensional species can transform itself into two-dimensional life. " - "After the entire civilization has entered a lower dimension, they can initiate a dimensional strike against " - "the enemy without concern for the consequences."; - - std::array frags{{}}; - frags.at(0).next = &frags.at(1); - frags.at(1).next = &frags.at(2); - frags.at(2).next = &frags.at(3); - frags.at(3).next = nullptr; - - frags.at(0).view.data = payload.data(); - frags.at(0).view.size = 100; - - frags.at(1).view.data = payload.data() + frags.at(0).view.size; - frags.at(1).view.size = 100; - - frags.at(2).view.data = payload.data() + frags.at(1).view.size + frags.at(0).view.size; - frags.at(2).view.size = 0; // Edge case. - - frags.at(3).view.data = payload.data() + frags.at(2).view.size + frags.at(1).view.size + frags.at(0).view.size; - frags.at(3).view.size = payload.size() - frags.at(2).view.size - frags.at(1).view.size - frags.at(0).view.size; - - std::array mono{}; - - // Copy full size payload. - std::generate(mono.begin(), mono.end(), [] { return std::rand() % 256; }); - TEST_ASSERT_EQUAL(payload.size(), udpardGather(frags.at(0), mono.size(), mono.data())); - TEST_ASSERT_EQUAL_MEMORY(payload.data(), mono.data(), payload.size()); - - // Truncation mid-fragment. - std::generate(mono.begin(), mono.end(), [] { return std::rand() % 256; }); - TEST_ASSERT_EQUAL(150, udpardGather(frags.at(0), 150, mono.data())); - TEST_ASSERT_EQUAL_MEMORY(payload.data(), mono.data(), 150); - - // Truncation at the fragment boundary. - std::generate(mono.begin(), mono.end(), [] { return std::rand() % 256; }); - TEST_ASSERT_EQUAL(200, udpardGather(frags.at(0), 200, mono.data())); - TEST_ASSERT_EQUAL_MEMORY(payload.data(), mono.data(), 200); - - // Empty destination. - mono.fill(0xA5); - TEST_ASSERT_EQUAL(0, udpardGather(frags.at(0), 0, mono.data())); - TEST_ASSERT_EQUAL(0, std::count_if(mono.begin(), mono.end(), [](const auto x) { return x != 0xA5; })); - - // Edge cases. - TEST_ASSERT_EQUAL(0, udpardGather(frags.at(0), 0, nullptr)); - TEST_ASSERT_EQUAL(0, udpardGather(frags.at(0), 100, nullptr)); -} -} // namespace - -void setUp() -{ - seedRandomNumberGenerator(); // Re-seed the RNG for each test to avoid coupling. -} - -void tearDown() {} - -int main() -{ - UNITY_BEGIN(); - RUN_TEST(testGather); - return UNITY_END(); -} diff --git a/tests/src/test_rx.cpp b/tests/src/test_rx.cpp deleted file mode 100644 index e94fd85..0000000 --- a/tests/src/test_rx.cpp +++ /dev/null @@ -1,511 +0,0 @@ -/// This software is distributed under the terms of the MIT License. -/// Copyright (C) OpenCyphal Development Team -/// Copyright Amazon.com Inc. or its affiliates. -/// SPDX-License-Identifier: MIT - -#include -#include "helpers.h" -#include "hexdump.hpp" -#include -#include -#include - -namespace -{ -void testRxSubscriptionInit() -{ - InstrumentedAllocator mem_session{}; - InstrumentedAllocator mem_fragment{}; - InstrumentedAllocator mem_payload{}; - instrumentedAllocatorNew(&mem_session); - instrumentedAllocatorNew(&mem_fragment); - instrumentedAllocatorNew(&mem_payload); - UdpardRxSubscription sub{}; - TEST_ASSERT_EQUAL(0, - udpardRxSubscriptionInit(&sub, - 0x1234, - 1000, - { - .session = instrumentedAllocatorMakeMemoryResource(&mem_session), - .fragment = instrumentedAllocatorMakeMemoryResource(&mem_fragment), - .payload = instrumentedAllocatorMakeMemoryDeleter(&mem_payload), - })); - TEST_ASSERT_EQUAL(&instrumentedAllocatorAllocate, sub.memory.session.allocate); - TEST_ASSERT_EQUAL(&instrumentedAllocatorDeallocate, sub.memory.session.deallocate); - TEST_ASSERT_EQUAL(1000, sub.port.extent); - TEST_ASSERT_EQUAL(UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, sub.port.transfer_id_timeout_usec); - TEST_ASSERT_EQUAL(nullptr, sub.port.sessions); - TEST_ASSERT_EQUAL(0xEF001234UL, sub.udp_ip_endpoint.ip_address); - TEST_ASSERT_EQUAL(9382, sub.udp_ip_endpoint.udp_port); - TEST_ASSERT_EQUAL(0, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - udpardRxSubscriptionFree(&sub); - TEST_ASSERT_EQUAL(0, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - udpardRxSubscriptionFree(nullptr); // No-op. - // Invalid arguments. - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, - udpardRxSubscriptionInit(nullptr, - 0xFFFF, - 1000, - { - .session = instrumentedAllocatorMakeMemoryResource(&mem_session), - .fragment = instrumentedAllocatorMakeMemoryResource(&mem_fragment), - .payload = instrumentedAllocatorMakeMemoryDeleter(&mem_payload), - })); - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, - udpardRxSubscriptionInit(&sub, - 0xFFFF, - 1000, - { - .session = instrumentedAllocatorMakeMemoryResource(&mem_session), - .fragment = instrumentedAllocatorMakeMemoryResource(&mem_fragment), - .payload = instrumentedAllocatorMakeMemoryDeleter(&mem_payload), - })); - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, udpardRxSubscriptionInit(&sub, 1234, 1000, {})); -} - -void testRxSubscriptionReceive() -{ - InstrumentedAllocator mem_session{}; - InstrumentedAllocator mem_fragment{}; - InstrumentedAllocator mem_payload{}; - instrumentedAllocatorNew(&mem_session); - instrumentedAllocatorNew(&mem_fragment); - instrumentedAllocatorNew(&mem_payload); - UdpardRxSubscription sub{}; - TEST_ASSERT_EQUAL(0, - udpardRxSubscriptionInit(&sub, - 0x1234, - 1000, - { - .session = instrumentedAllocatorMakeMemoryResource(&mem_session), - .fragment = instrumentedAllocatorMakeMemoryResource(&mem_fragment), - .payload = instrumentedAllocatorMakeMemoryDeleter(&mem_payload), - })); - TEST_ASSERT_EQUAL(1000, sub.port.extent); - TEST_ASSERT_EQUAL(UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, sub.port.transfer_id_timeout_usec); - TEST_ASSERT_EQUAL(nullptr, sub.port.sessions); - TEST_ASSERT_EQUAL(0xEF001234UL, sub.udp_ip_endpoint.ip_address); - TEST_ASSERT_EQUAL(9382, sub.udp_ip_endpoint.udp_port); - TEST_ASSERT_EQUAL(0, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - UdpardRxTransfer transfer{}; - // Feed a single-frame transfer. Remember that in Cyphal/UDP, the payload CRC is part of the payload itself. - // - //>>> from pycyphal.transport.commons.crc import CRC32C - //>>> CRC32C.new(b"Hello!").value_as_bytes - // - // >>> from pycyphal.transport.udp import UDPFrame - // >>> from pycyphal.transport import Priority, MessageDataSpecifier, ServiceDataSpecifier - // >>> frame = UDPFrame(priority=Priority.FAST, transfer_id=0xbadc0ffee0ddf00d, index=0, end_of_transfer=True, - // payload=memoryview(b'Hello!\xd6\xeb\xfd\t'), source_node_id=2345, destination_node_id=0xFFFF, - // data_specifier=MessageDataSpecifier(0x1234), user_data=0) - // >>> list(frame.compile_header_and_payload()[0]) - // >>> list(frame.compile_header_and_payload()[1]) - { - const std::array data{{1, 2, 41, 9, 255, 255, 52, 18, 13, 240, 221, 224, - 254, 15, 220, 186, 0, 0, 0, 128, 0, 0, 246, 129, // - 72, 101, 108, 108, 111, 33, 214, 235, 253, 9}}; - const UdpardMutablePayload datagram{ - .size = sizeof(data), - .data = instrumentedAllocatorAllocate(&mem_payload, sizeof(data)), - }; - TEST_ASSERT_NOT_NULL(datagram.data); - std::memcpy(datagram.data, data.data(), data.size()); - TEST_ASSERT_EQUAL(1, udpardRxSubscriptionReceive(&sub, 10'000'000, datagram, 0, &transfer)); - } - TEST_ASSERT_EQUAL(1, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); // Head optimization in effect. - TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); - TEST_ASSERT_EQUAL(10'000'000, transfer.timestamp_usec); - TEST_ASSERT_EQUAL(UdpardPriorityFast, transfer.priority); - TEST_ASSERT_EQUAL(2345, transfer.source_node_id); - TEST_ASSERT_EQUAL(0xBADC0FFEE0DDF00DUL, transfer.transfer_id); - TEST_ASSERT_EQUAL(6, transfer.payload_size); - TEST_ASSERT_EQUAL(6, transfer.payload.view.size); - TEST_ASSERT_EQUAL_MEMORY("Hello!", transfer.payload.view.data, 6); - TEST_ASSERT_NULL(transfer.payload.next); - // Free the subscription, ensure the payload is not affected because its ownership has been transferred to us. - udpardRxSubscriptionFree(&sub); - udpardRxSubscriptionFree(&sub); // The API does not guarantee anything but this is for extra safety. - TEST_ASSERT_EQUAL(0, mem_session.allocated_fragments); // Session gone. Bye bye. - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); // Stayin' alive. - // Free the payload as well. - udpardRxFragmentFree(transfer.payload, - instrumentedAllocatorMakeMemoryResource(&mem_fragment), - instrumentedAllocatorMakeMemoryDeleter(&mem_payload)); - TEST_ASSERT_EQUAL(0, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); // Yeah. -} - -void testRxSubscriptionReceiveInvalidArgument() -{ - InstrumentedAllocator mem_session{}; - InstrumentedAllocator mem_fragment{}; - InstrumentedAllocator mem_payload{}; - instrumentedAllocatorNew(&mem_session); - instrumentedAllocatorNew(&mem_fragment); - instrumentedAllocatorNew(&mem_payload); - UdpardRxSubscription sub{}; - TEST_ASSERT_EQUAL(0, - udpardRxSubscriptionInit(&sub, - 0x1234, - 1000, - { - .session = instrumentedAllocatorMakeMemoryResource(&mem_session), - .fragment = instrumentedAllocatorMakeMemoryResource(&mem_fragment), - .payload = instrumentedAllocatorMakeMemoryDeleter(&mem_payload), - })); - TEST_ASSERT_EQUAL(1000, sub.port.extent); - TEST_ASSERT_EQUAL(UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, sub.port.transfer_id_timeout_usec); - TEST_ASSERT_EQUAL(nullptr, sub.port.sessions); - TEST_ASSERT_EQUAL(0xEF001234UL, sub.udp_ip_endpoint.ip_address); - TEST_ASSERT_EQUAL(9382, sub.udp_ip_endpoint.udp_port); - TEST_ASSERT_EQUAL(0, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - // Pass invalid arguments with a valid instance; the memory will be freed anyway to avoid leaks. - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, - udpardRxSubscriptionReceive(&sub, - 0xFFFF'FFFF'FFFF'FFFFUL, - UdpardMutablePayload{.size = 100, - .data = - instrumentedAllocatorAllocate(&mem_payload, - 100)}, - 0xFF, - nullptr)); - TEST_ASSERT_EQUAL(0, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); // Memory freed on exit despite the error. - // Calls with an invalid self pointer also result in the invalid argument error but the memory won't be freed. - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, - udpardRxSubscriptionReceive(nullptr, - 0xFFFF'FFFF'FFFF'FFFFUL, - UdpardMutablePayload{}, - 0xFF, - nullptr)); - // Free the subscription. - udpardRxSubscriptionFree(&sub); - udpardRxSubscriptionFree(&sub); // The API does not guarantee anything but this is for extra safety. - TEST_ASSERT_EQUAL(0, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); -} - -void testRxRPCDispatcher() -{ - InstrumentedAllocator mem_session{}; - InstrumentedAllocator mem_fragment{}; - InstrumentedAllocator mem_payload{}; - instrumentedAllocatorNew(&mem_session); - instrumentedAllocatorNew(&mem_fragment); - instrumentedAllocatorNew(&mem_payload); - - // Initialize the RPC dispatcher. - UdpardRxRPCDispatcher self{}; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, - udpardRxRPCDispatcherInit(&self, - { - .session = {nullptr, nullptr, nullptr}, - .fragment = instrumentedAllocatorMakeMemoryResource(&mem_fragment), - .payload = instrumentedAllocatorMakeMemoryDeleter(&mem_payload), - })); - TEST_ASSERT_EQUAL(0, - udpardRxRPCDispatcherInit(&self, - { - .session = instrumentedAllocatorMakeMemoryResource(&mem_session), - .fragment = instrumentedAllocatorMakeMemoryResource(&mem_fragment), - .payload = instrumentedAllocatorMakeMemoryDeleter(&mem_payload), - })); - TEST_ASSERT_EQUAL(&instrumentedAllocatorAllocate, self.memory.session.allocate); - TEST_ASSERT_EQUAL(&instrumentedAllocatorDeallocate, self.memory.session.deallocate); - TEST_ASSERT_NULL(self.request_ports); - TEST_ASSERT_NULL(self.response_ports); - TEST_ASSERT_EQUAL(0xFFFF, self.local_node_id); - TEST_ASSERT_EQUAL(0, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - - // Start the dispatcher by setting the local node ID. - UdpardUDPIPEndpoint udp_ip_endpoint{}; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, udpardRxRPCDispatcherStart(&self, 0xFFFF, &udp_ip_endpoint)); - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, udpardRxRPCDispatcherStart(&self, 0x1042, nullptr)); - TEST_ASSERT_EQUAL(0, udpardRxRPCDispatcherStart(&self, 0x1042, &udp_ip_endpoint)); - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, udpardRxRPCDispatcherStart(&self, 0x1042, &udp_ip_endpoint)); - TEST_ASSERT_EQUAL(0x1042, self.local_node_id); - TEST_ASSERT_EQUAL(0xEF011042UL, udp_ip_endpoint.ip_address); - TEST_ASSERT_EQUAL(9382, udp_ip_endpoint.udp_port); - - // Add a request port. - UdpardRxRPCPort port_request_foo{}; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, udpardRxRPCDispatcherListen(&self, nullptr, 511, true, 100)); - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, udpardRxRPCDispatcherListen(&self, &port_request_foo, 0xFFFF, true, 100)); - TEST_ASSERT_EQUAL(1, udpardRxRPCDispatcherListen(&self, &port_request_foo, 511, true, 0)); // Added successfully. - TEST_ASSERT_EQUAL(0, udpardRxRPCDispatcherListen(&self, &port_request_foo, 511, true, 0)); // Re-added. - TEST_ASSERT_EQUAL(511, port_request_foo.service_id); - TEST_ASSERT_EQUAL(UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, port_request_foo.port.transfer_id_timeout_usec); - TEST_ASSERT_EQUAL(0, port_request_foo.port.extent); - TEST_ASSERT_NULL(port_request_foo.port.sessions); - TEST_ASSERT_NULL(port_request_foo.user_reference); - TEST_ASSERT_NOT_NULL(self.request_ports); - TEST_ASSERT_NULL(self.response_ports); - - // Add a response port. - UdpardRxRPCPort port_response_bar{}; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, udpardRxRPCDispatcherListen(&self, nullptr, 0, false, 0)); - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, udpardRxRPCDispatcherListen(&self, &port_response_bar, 0xFFFF, false, 0)); - TEST_ASSERT_EQUAL(1, udpardRxRPCDispatcherListen(&self, &port_response_bar, 0, false, 100)); // Added successfully. - TEST_ASSERT_EQUAL(0, udpardRxRPCDispatcherListen(&self, &port_response_bar, 0, false, 100)); // Re-added. - TEST_ASSERT_EQUAL(0, port_response_bar.service_id); - TEST_ASSERT_EQUAL(UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, port_response_bar.port.transfer_id_timeout_usec); - TEST_ASSERT_EQUAL(100, port_response_bar.port.extent); - TEST_ASSERT_NULL(port_response_bar.port.sessions); - TEST_ASSERT_NULL(port_response_bar.user_reference); - TEST_ASSERT_NOT_NULL(self.request_ports); - TEST_ASSERT_NOT_NULL(self.response_ports); - - // Add another response port. - UdpardRxRPCPort port_response_baz{}; - TEST_ASSERT_EQUAL(1, udpardRxRPCDispatcherListen(&self, &port_response_baz, 9, false, 50)); // Added successfully. - TEST_ASSERT_EQUAL(0, udpardRxRPCDispatcherListen(&self, &port_response_baz, 9, false, 50)); // Re-added. - TEST_ASSERT_EQUAL(9, port_response_baz.service_id); - TEST_ASSERT_EQUAL(UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, port_response_baz.port.transfer_id_timeout_usec); - TEST_ASSERT_EQUAL(50, port_response_baz.port.extent); - TEST_ASSERT_NULL(port_response_baz.port.sessions); - TEST_ASSERT_NULL(port_response_baz.user_reference); - TEST_ASSERT_NOT_NULL(self.request_ports); - TEST_ASSERT_NOT_NULL(self.response_ports); - - // Check the global states. - TEST_ASSERT_EQUAL(0, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - - // Feed a valid request for the existing port we created above. - // - //>>> from pycyphal.transport.commons.crc import CRC32C - //>>> CRC32C.new(b"Hello!").value_as_bytes - // - // >>> from pycyphal.transport.udp import UDPFrame - // >>> from pycyphal.transport import Priority, MessageDataSpecifier, ServiceDataSpecifier - // >>> frame = UDPFrame(priority=Priority.SLOW, transfer_id=0xbadc0ffee0ddf00d, index=0, end_of_transfer=True, - // payload=memoryview(b'Hello!\xd6\xeb\xfd\t'), source_node_id=2345, destination_node_id=0x1042, - // data_specifier=ServiceDataSpecifier(511, ServiceDataSpecifier.Role.REQUEST), user_data=0) - // >>> list(frame.compile_header_and_payload()[0]) - // >>> list(frame.compile_header_and_payload()[1]) - UdpardRxRPCPort* out_port = nullptr; - UdpardRxRPCTransfer transfer{}; - { - const std::array data{{1, 6, 41, 9, 66, 16, 255, 193, 13, 240, 221, 224, - 254, 15, 220, 186, 0, 0, 0, 128, 0, 0, 111, 105, // - 72, 101, 108, 108, 111, 33, 214, 235, 253, 9}}; - const UdpardMutablePayload datagram{ - .size = sizeof(data), - .data = instrumentedAllocatorAllocate(&mem_payload, sizeof(data)), - }; - TEST_ASSERT_NOT_NULL(datagram.data); - std::memcpy(datagram.data, data.data(), data.size()); - TEST_ASSERT_EQUAL(1, udpardRxRPCDispatcherReceive(&self, 10'000'000, datagram, 2, &out_port, &transfer)); - } - TEST_ASSERT_EQUAL(&port_request_foo, out_port); // Points to the correct port. - TEST_ASSERT_EQUAL(511, transfer.service_id); - TEST_ASSERT_EQUAL(true, transfer.is_request); - TEST_ASSERT_EQUAL(10'000'000, transfer.base.timestamp_usec); - TEST_ASSERT_EQUAL(UdpardPrioritySlow, transfer.base.priority); - TEST_ASSERT_EQUAL(2345, transfer.base.source_node_id); - TEST_ASSERT_EQUAL(0xBADC0FFEE0DDF00D, transfer.base.transfer_id); - TEST_ASSERT_EQUAL(0, transfer.base.payload_size); // Truncated away because extent zero. - TEST_ASSERT_EQUAL(0, transfer.base.payload.view.size); - TEST_ASSERT_NULL(transfer.base.payload.next); - // Check the global states. - TEST_ASSERT_EQUAL(1, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); // Because the payload was truncated away. - udpardRxFragmentFree(transfer.base.payload, self.memory.fragment, self.memory.payload); // No-op. - TEST_ASSERT_EQUAL(1, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - - // Feed the same transfer as before through another interface. It will be rejected as it is a duplicate. - { - const std::array data{{1, 6, 41, 9, 66, 16, 255, 193, 13, 240, 221, 224, - 254, 15, 220, 186, 0, 0, 0, 128, 0, 0, 111, 105, // - 72, 101, 108, 108, 111, 33, 214, 235, 253, 9}}; - const UdpardMutablePayload datagram{ - .size = sizeof(data), - .data = instrumentedAllocatorAllocate(&mem_payload, sizeof(data)), - }; - TEST_ASSERT_NOT_NULL(datagram.data); - std::memcpy(datagram.data, data.data(), data.size()); - TEST_ASSERT_EQUAL(0, udpardRxRPCDispatcherReceive(&self, 10'001'000, datagram, 0, nullptr, &transfer)); - } - // Check the global states. - TEST_ASSERT_EQUAL(1, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - - // Feed a valid response for the existing port we created above. - // - // >>> frame = UDPFrame(priority=Priority.OPTIONAL, transfer_id=0x123456789ABCDEF, index=0, end_of_transfer=True, - // payload=memoryview(b'Hello!\xd6\xeb\xfd\t'), source_node_id=5432, destination_node_id=0x1042, - // data_specifier=ServiceDataSpecifier(0, ServiceDataSpecifier.Role.RESPONSE), user_data=0) - { - const std::array data{{1, 7, 56, 21, 66, 16, 0, 128, 239, 205, 171, 137, - 103, 69, 35, 1, 0, 0, 0, 128, 0, 0, 164, 48, // - 72, 101, 108, 108, 111, 33, 214, 235, 253, 9}}; - const UdpardMutablePayload datagram{ - .size = sizeof(data), - .data = instrumentedAllocatorAllocate(&mem_payload, sizeof(data)), - }; - TEST_ASSERT_NOT_NULL(datagram.data); - std::memcpy(datagram.data, data.data(), data.size()); - TEST_ASSERT_EQUAL(1, udpardRxRPCDispatcherReceive(&self, 10'002'000, datagram, 1, &out_port, &transfer)); - } - TEST_ASSERT_EQUAL(&port_response_bar, out_port); // Points to the correct port. - TEST_ASSERT_EQUAL(0, transfer.service_id); - TEST_ASSERT_EQUAL(false, transfer.is_request); - TEST_ASSERT_EQUAL(10'002'000, transfer.base.timestamp_usec); - TEST_ASSERT_EQUAL(UdpardPriorityOptional, transfer.base.priority); - TEST_ASSERT_EQUAL(5432, transfer.base.source_node_id); - TEST_ASSERT_EQUAL(0x123456789ABCDEF, transfer.base.transfer_id); - TEST_ASSERT_EQUAL(6, transfer.base.payload_size); - TEST_ASSERT_EQUAL(6, transfer.base.payload.view.size); - TEST_ASSERT_EQUAL_MEMORY("Hello!", transfer.base.payload.view.data, 6); - TEST_ASSERT_NULL(transfer.base.payload.next); - // Check the global states. - TEST_ASSERT_EQUAL(2, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); - udpardRxFragmentFree(transfer.base.payload, self.memory.fragment, self.memory.payload); - TEST_ASSERT_EQUAL(2, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - - // Feed another valid transfer for which there is no port. It will be ignored. - // >>> frame = UDPFrame(priority=Priority.OPTIONAL, transfer_id=0x123456789ABCDEF, index=0, end_of_transfer=True, - // payload=memoryview(b'Hello!\xd6\xeb\xfd\t'), source_node_id=5432, destination_node_id=0x1042, - // data_specifier=ServiceDataSpecifier(123, ServiceDataSpecifier.Role.RESPONSE), user_data=0) - { - const std::array data{{1, 7, 56, 21, 66, 16, 123, 128, 239, 205, 171, 137, - 103, 69, 35, 1, 0, 0, 0, 128, 0, 0, 180, 206, // - 72, 101, 108, 108, 111, 33, 214, 235, 253, 9}}; - const UdpardMutablePayload datagram{ - .size = sizeof(data), - .data = instrumentedAllocatorAllocate(&mem_payload, sizeof(data)), - }; - TEST_ASSERT_NOT_NULL(datagram.data); - std::memcpy(datagram.data, data.data(), data.size()); - TEST_ASSERT_EQUAL(0, udpardRxRPCDispatcherReceive(&self, 10'003'000, datagram, 1, &out_port, &transfer)); - } - TEST_ASSERT_NULL(out_port); // No port. - TEST_ASSERT_EQUAL(2, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - - // Feed another valid transfer on the correct port but addressed to the wrong node. - // >>> frame = UDPFrame(priority=Priority.OPTIONAL, transfer_id=0x123456789ABCDEF, index=0, end_of_transfer=True, - // payload=memoryview(b'Hello!\xd6\xeb\xfd\t'), source_node_id=5432, destination_node_id=1234, - // data_specifier=ServiceDataSpecifier(0, ServiceDataSpecifier.Role.RESPONSE), user_data=0) - { - const std::array data{{1, 7, 56, 21, 210, 4, 0, 128, 239, 205, 171, 137, - 103, 69, 35, 1, 0, 0, 0, 128, 0, 0, 236, 89, // - 72, 101, 108, 108, 111, 33, 214, 235, 253, 9}}; - const UdpardMutablePayload datagram{ - .size = sizeof(data), - .data = instrumentedAllocatorAllocate(&mem_payload, sizeof(data)), - }; - TEST_ASSERT_NOT_NULL(datagram.data); - std::memcpy(datagram.data, data.data(), data.size()); - TEST_ASSERT_EQUAL(0, udpardRxRPCDispatcherReceive(&self, 10'004'000, datagram, 1, nullptr, &transfer)); - } - TEST_ASSERT_EQUAL(2, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - - // Feed an invalid frame. Ensure it is freed regardless. - TEST_ASSERT_EQUAL(0, - udpardRxRPCDispatcherReceive(&self, - 10'005'000, - UdpardMutablePayload{ - .size = 100, - .data = instrumentedAllocatorAllocate(&mem_payload, 100), - }, - 1, - nullptr, - &transfer)); - TEST_ASSERT_EQUAL(2, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); // Freed. - - // Invalid arguments. The memory is freed as long as self is valid. - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, - udpardRxRPCDispatcherReceive(&self, - 0, - UdpardMutablePayload{ - .size = 100, - .data = instrumentedAllocatorAllocate(&mem_payload, 100), - }, - 1, - nullptr, - nullptr)); - TEST_ASSERT_EQUAL(2, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); // Freed. - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, - udpardRxRPCDispatcherReceive(nullptr, 0, UdpardMutablePayload{}, 1, nullptr, nullptr)); - - // Remove the ports. - TEST_ASSERT_EQUAL(0, udpardRxRPCDispatcherCancel(&self, 511, false)); // No such port. - TEST_ASSERT_EQUAL(0, udpardRxRPCDispatcherCancel(&self, 0, true)); // No such port. - TEST_ASSERT_EQUAL(0, udpardRxRPCDispatcherCancel(&self, 9, true)); // No such port. - - TEST_ASSERT_EQUAL(1, udpardRxRPCDispatcherCancel(&self, 511, true)); // Removed. - TEST_ASSERT_NULL(self.request_ports); - TEST_ASSERT_NOT_NULL(self.response_ports); - TEST_ASSERT_EQUAL(1, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - - TEST_ASSERT_EQUAL(1, udpardRxRPCDispatcherCancel(&self, 0, false)); // Removed. - TEST_ASSERT_NULL(self.request_ports); - TEST_ASSERT_NOT_NULL(self.response_ports); - TEST_ASSERT_EQUAL(0, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - - TEST_ASSERT_EQUAL(1, udpardRxRPCDispatcherCancel(&self, 9, false)); // Removed. - TEST_ASSERT_NULL(self.request_ports); - TEST_ASSERT_NULL(self.response_ports); - TEST_ASSERT_EQUAL(0, mem_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); - TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); - - TEST_ASSERT_EQUAL(0, udpardRxRPCDispatcherCancel(&self, 511, true)); // Idempotency. - TEST_ASSERT_EQUAL(0, udpardRxRPCDispatcherCancel(&self, 0, false)); // Idempotency. - TEST_ASSERT_EQUAL(0, udpardRxRPCDispatcherCancel(&self, 9, false)); // Idempotency. - - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, udpardRxRPCDispatcherCancel(&self, 0xFFFF, true)); - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, udpardRxRPCDispatcherCancel(nullptr, 123, false)); -} - -} // namespace - -void setUp() {} - -void tearDown() {} - -int main() -{ - UNITY_BEGIN(); - RUN_TEST(testRxSubscriptionInit); - RUN_TEST(testRxSubscriptionReceive); - RUN_TEST(testRxSubscriptionReceiveInvalidArgument); - RUN_TEST(testRxRPCDispatcher); - return UNITY_END(); -} diff --git a/tests/src/test_tx.cpp b/tests/src/test_tx.cpp deleted file mode 100644 index 5f5a025..0000000 --- a/tests/src/test_tx.cpp +++ /dev/null @@ -1,546 +0,0 @@ -/// This software is distributed under the terms of the MIT License. -/// Copyright (C) OpenCyphal Development Team -/// Copyright Amazon.com Inc. or its affiliates. -/// SPDX-License-Identifier: MIT - -#include -#include "helpers.h" -#include "hexdump.hpp" -#include -#include -#include -#include -#include -#include - -namespace -{ -constexpr std::string_view FleetingEvents = - "What was the human world like in the eyes of the mountains? Perhaps just something they saw on a leisurely " - "afternoon. First, a few small living beings appeared on the plain. After a while, they multiplied, and after " - "another while they erected structures like anthills that quickly filled the region. The structures shone from the " - "inside, and some of them let off smoke. After another while, the lights and smoke disappeared, and the small " - "things vanished as well, and then their structures toppled and were buried in the sand. That was all. Among the " - "countless things the mountains had witnessed, these fleeting events were not necessarily the most interesting."; -constexpr std::array FleetingEventsCRC{{26, 198, 18, 137}}; - -void testInit() -{ - std::monostate user_referent; - const UdpardNodeID node_id = 0; - { - const UdpardMemoryResource mr{ - .user_reference = &user_referent, - .deallocate = &dummyAllocatorDeallocate, - .allocate = &dummyAllocatorAllocate, - }; - const UdpardTxMemoryResources memory = {.fragment = mr, .payload = mr}; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, udpardTxInit(nullptr, &node_id, 0, memory)); - } - { - UdpardTx tx{}; - const UdpardMemoryResource mr{ - .user_reference = &user_referent, - .deallocate = &dummyAllocatorDeallocate, - .allocate = &dummyAllocatorAllocate, - }; - const UdpardTxMemoryResources memory = {.fragment = mr, .payload = mr}; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, udpardTxInit(&tx, nullptr, 0, memory)); - } - { - UdpardTx tx{}; - const UdpardMemoryResource mr{ - .user_reference = &user_referent, - .deallocate = &dummyAllocatorDeallocate, - .allocate = nullptr, - }; - const UdpardTxMemoryResources memory = {.fragment = mr, .payload = mr}; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, udpardTxInit(&tx, &node_id, 0, memory)); - } - { - UdpardTx tx{}; - const UdpardMemoryResource mr{ - .user_reference = &user_referent, - .deallocate = nullptr, - .allocate = &dummyAllocatorAllocate, - }; - const UdpardTxMemoryResources memory = {.fragment = mr, .payload = mr}; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, udpardTxInit(&tx, &node_id, 0, memory)); - } - { - UdpardTx tx{}; - const UdpardMemoryResource mr{ - .user_reference = &user_referent, - .deallocate = &dummyAllocatorDeallocate, - .allocate = &dummyAllocatorAllocate, - }; - const UdpardTxMemoryResources memory = {.fragment = mr, .payload = mr}; - TEST_ASSERT_EQUAL(0, udpardTxInit(&tx, &node_id, 0, memory)); - TEST_ASSERT_EQUAL(&user_referent, tx.memory.fragment.user_reference); - TEST_ASSERT_EQUAL(&user_referent, tx.memory.payload.user_reference); - TEST_ASSERT_EQUAL(UDPARD_MTU_DEFAULT, tx.mtu); - } -} - -void testPublish() -{ - InstrumentedAllocator alloc; - instrumentedAllocatorNew(&alloc); - const UdpardTxMemoryResources mem = { - .fragment = instrumentedAllocatorMakeMemoryResource(&alloc), - .payload = instrumentedAllocatorMakeMemoryResource(&alloc), - }; - const UdpardNodeID node_id = 1234; - // - UdpardTx tx{ - .local_node_id = &node_id, - .queue_capacity = 1U, - .mtu = UDPARD_MTU_DEFAULT, - .dscp_value_per_priority = {0, 1, 2, 3, 4, 5, 6, 7}, - .memory = mem, - .queue_size = 0, - .root = nullptr, - }; - std::monostate user_transfer_referent; - UdpardTransferID transfer_id = 0; - TEST_ASSERT_EQUAL(1, - udpardTxPublish(&tx, - 1234567890, - UdpardPriorityNominal, - 0x1432, - transfer_id++, - {.size = FleetingEvents.size(), .data = FleetingEvents.data()}, - &user_transfer_referent)); - TEST_ASSERT_EQUAL(1 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(1, tx.queue_size); - auto* frame = udpardTxPeek(&tx); - std::cout << hexdump::hexdump(frame->datagram_payload.data, frame->datagram_payload.size) << "\n\n"; - TEST_ASSERT_NOT_EQUAL(nullptr, frame); - TEST_ASSERT_EQUAL(nullptr, frame->next_in_transfer); - TEST_ASSERT_EQUAL(1234567890, frame->deadline_usec); - TEST_ASSERT_EQUAL(4, frame->dscp); - TEST_ASSERT_EQUAL(0xEF00'1432UL, frame->destination.ip_address); - TEST_ASSERT_EQUAL(9382, frame->destination.udp_port); - TEST_ASSERT_EQUAL(&user_transfer_referent, frame->user_transfer_reference); - TEST_ASSERT_EQUAL(24 + FleetingEvents.size() + 4, frame->datagram_payload.size); - TEST_ASSERT_EQUAL(0, - std::memcmp(static_cast(frame->datagram_payload.data) + 24, - FleetingEvents.data(), - FleetingEvents.size())); - TEST_ASSERT_EQUAL(0, - std::memcmp(static_cast(frame->datagram_payload.data) + 24 + - FleetingEvents.size(), - FleetingEventsCRC.data(), - FleetingEventsCRC.size())); - udpardTxFree(tx.memory, udpardTxPop(&tx, frame)); - udpardTxFree(tx.memory, udpardTxPop(&tx, nullptr)); // No-op. - - // Out of queue; transfer-ID not incremented. - TEST_ASSERT_EQUAL(-UDPARD_ERROR_CAPACITY, - udpardTxPublish(&tx, - 1234567890, - UdpardPriorityNominal, - 0x1432, - transfer_id, - {.size = tx.mtu * 2, .data = FleetingEvents.data()}, - nullptr)); - - // Attempt to publish a multi-frame transfer with an anonymous local node. - { - auto tx_bad = tx; - const UdpardNodeID anonymous_node_id = 0xFFFFU; - tx_bad.queue_size = 1000; - tx_bad.mtu = 10; // Force multi-frame. - tx_bad.local_node_id = &anonymous_node_id; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ANONYMOUS, - udpardTxPublish(&tx_bad, - 1234567890, - UdpardPriorityNominal, - 0x1432, - transfer_id, - {.size = FleetingEvents.size(), .data = FleetingEvents.data()}, - nullptr)); - } - - // Invalid Tx. - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, - udpardTxPublish(nullptr, - 1234567890, - UdpardPriorityNominal, - 0x1432, - transfer_id, - {.size = FleetingEvents.size(), .data = FleetingEvents.data()}, - nullptr)); - // Invalid local node-ID. - { - auto tx_bad = tx; - tx_bad.local_node_id = nullptr; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, - udpardTxPublish(&tx_bad, - 1234567890, - UdpardPriorityNominal, - 0x1432, - transfer_id, - {.size = FleetingEvents.size(), .data = FleetingEvents.data()}, - nullptr)); - } - // Invalid priority. - { - auto bad_priority = UdpardPriorityOptional; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, - udpardTxPublish(&tx, - 1234567890, - (UdpardPriority) (bad_priority + 1), - 0x1432, - transfer_id, - {.size = FleetingEvents.size(), .data = FleetingEvents.data()}, - nullptr)); - } - // Invalid subject. - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, - udpardTxPublish(&tx, - 1234567890, - UdpardPriorityNominal, - 0xFFFFU, - transfer_id, - {.size = FleetingEvents.size(), .data = FleetingEvents.data()}, - nullptr)); - // Invalid payload pointer. - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, - udpardTxPublish(&tx, - 1234567890, - UdpardPriorityNominal, - 0x1432, - transfer_id, - {.size = FleetingEvents.size(), .data = nullptr}, - nullptr)); - TEST_ASSERT_EQUAL(1, transfer_id); -} - -void testRequest() -{ - InstrumentedAllocator alloc; - instrumentedAllocatorNew(&alloc); - const UdpardTxMemoryResources mem = { - .fragment = instrumentedAllocatorMakeMemoryResource(&alloc), - .payload = instrumentedAllocatorMakeMemoryResource(&alloc), - }; - const UdpardNodeID node_id = 1234; - // - UdpardTx tx{ - .local_node_id = &node_id, - .queue_capacity = 1U, - .mtu = UDPARD_MTU_DEFAULT, - .dscp_value_per_priority = {0, 1, 2, 3, 4, 5, 6, 7}, - .memory = mem, - .queue_size = 0, - .root = nullptr, - }; - std::monostate user_transfer_referent; - UdpardTransferID transfer_id = 0; - TEST_ASSERT_EQUAL(1, - udpardTxRequest(&tx, - 1234567890, - UdpardPriorityNominal, - 0x123, - 0x1538, - transfer_id++, - {.size = FleetingEvents.size(), .data = FleetingEvents.data()}, - &user_transfer_referent)); - TEST_ASSERT_EQUAL(1 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(1, tx.queue_size); - auto* frame = udpardTxPeek(&tx); - std::cout << hexdump::hexdump(frame->datagram_payload.data, frame->datagram_payload.size) << "\n\n"; - TEST_ASSERT_NOT_EQUAL(nullptr, frame); - TEST_ASSERT_EQUAL(nullptr, frame->next_in_transfer); - TEST_ASSERT_EQUAL(1234567890, frame->deadline_usec); - TEST_ASSERT_EQUAL(4, frame->dscp); - TEST_ASSERT_EQUAL(0xEF01'1538UL, frame->destination.ip_address); - TEST_ASSERT_EQUAL(9382, frame->destination.udp_port); - TEST_ASSERT_EQUAL(&user_transfer_referent, frame->user_transfer_reference); - TEST_ASSERT_EQUAL(24 + FleetingEvents.size() + 4, frame->datagram_payload.size); - TEST_ASSERT_EQUAL(0, - std::memcmp(static_cast(frame->datagram_payload.data) + 24, - FleetingEvents.data(), - FleetingEvents.size())); - TEST_ASSERT_EQUAL(0, - std::memcmp(static_cast(frame->datagram_payload.data) + 24 + - FleetingEvents.size(), - FleetingEventsCRC.data(), - FleetingEventsCRC.size())); - udpardTxFree(tx.memory, udpardTxPop(&tx, frame)); - udpardTxFree(tx.memory, udpardTxPop(&tx, nullptr)); // No-op. - - // Out of queue; transfer-ID not incremented. - TEST_ASSERT_EQUAL(-UDPARD_ERROR_CAPACITY, - udpardTxRequest(&tx, - 1234567890, - UdpardPriorityNominal, - 0x123, - 0x1538, - transfer_id, - {.size = tx.mtu * 2, .data = FleetingEvents.data()}, - nullptr)); - - // Attempt to send a service transfer from an anonymous node. - { - auto tx_bad = tx; - const UdpardNodeID anonymous_node_id = 0xFFFFU; - tx_bad.queue_size = 1000; - tx_bad.mtu = 10; // Force multi-frame. - tx_bad.local_node_id = &anonymous_node_id; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ANONYMOUS, - udpardTxRequest(&tx_bad, - 1234567890, - UdpardPriorityNominal, - 0x123, - 0x1538, - transfer_id, - {.size = FleetingEvents.size(), .data = FleetingEvents.data()}, - nullptr)); - } - - // Invalid Tx. - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, - udpardTxRequest(nullptr, - 1234567890, - UdpardPriorityNominal, - 0x123, - 0x1538, - transfer_id, - {.size = FleetingEvents.size(), .data = FleetingEvents.data()}, - nullptr)); - // Invalid local node-ID. - { - auto tx_bad = tx; - tx_bad.local_node_id = nullptr; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, - udpardTxRequest(&tx_bad, - 1234567890, - UdpardPriorityNominal, - 0x123, - 0x1538, - transfer_id, - {.size = FleetingEvents.size(), .data = FleetingEvents.data()}, - nullptr)); - } - // Invalid priority. - { - auto bad_priority = UdpardPriorityOptional; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, - udpardTxRequest(&tx, - 1234567890, - (UdpardPriority) (bad_priority + 1), - 0x123, - 0x1538, - transfer_id, - {.size = FleetingEvents.size(), .data = FleetingEvents.data()}, - nullptr)); - } - // Invalid remote node-ID. - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, - udpardTxRequest(&tx, - 1234567890, - UdpardPriorityNominal, - 0x123, - 0xFFFF, - transfer_id, - {.size = FleetingEvents.size(), .data = FleetingEvents.data()}, - nullptr)); - // Invalid service-ID. - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, - udpardTxRequest(&tx, - 1234567890, - UdpardPriorityNominal, - 0xFFFFU, - 0x1538, - transfer_id, - {.size = FleetingEvents.size(), .data = FleetingEvents.data()}, - nullptr)); - // Invalid payload pointer. - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, - udpardTxRequest(&tx, - 1234567890, - UdpardPriorityNominal, - 0x123, - 0x1538, - transfer_id, - {.size = FleetingEvents.size(), .data = nullptr}, - nullptr)); - TEST_ASSERT_EQUAL(1, transfer_id); -} - -void testRespond() -{ - InstrumentedAllocator alloc; - instrumentedAllocatorNew(&alloc); - const UdpardTxMemoryResources mem = { - .fragment = instrumentedAllocatorMakeMemoryResource(&alloc), - .payload = instrumentedAllocatorMakeMemoryResource(&alloc), - }; - const UdpardNodeID node_id = 1234; - // - UdpardTx tx{ - .local_node_id = &node_id, - .queue_capacity = 1U, - .mtu = UDPARD_MTU_DEFAULT, - .dscp_value_per_priority = {0, 1, 2, 3, 4, 5, 6, 7}, - .memory = mem, - .queue_size = 0, - .root = nullptr, - }; - std::monostate user_transfer_referent; - TEST_ASSERT_EQUAL(1, - udpardTxRespond(&tx, - 1234567890, - UdpardPriorityNominal, - 0x123, - 0x1538, - 9876543210, - {.size = FleetingEvents.size(), .data = FleetingEvents.data()}, - &user_transfer_referent)); - TEST_ASSERT_EQUAL(1 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(1, tx.queue_size); - auto* frame = udpardTxPeek(&tx); - std::cout << hexdump::hexdump(frame->datagram_payload.data, frame->datagram_payload.size) << "\n\n"; - TEST_ASSERT_NOT_EQUAL(nullptr, frame); - TEST_ASSERT_EQUAL(nullptr, frame->next_in_transfer); - TEST_ASSERT_EQUAL(1234567890, frame->deadline_usec); - TEST_ASSERT_EQUAL(4, frame->dscp); - TEST_ASSERT_EQUAL(0xEF01'1538UL, frame->destination.ip_address); - TEST_ASSERT_EQUAL(9382, frame->destination.udp_port); - TEST_ASSERT_EQUAL(&user_transfer_referent, frame->user_transfer_reference); - TEST_ASSERT_EQUAL(24 + FleetingEvents.size() + 4, frame->datagram_payload.size); - TEST_ASSERT_EQUAL(0, - std::memcmp(static_cast(frame->datagram_payload.data) + 24, - FleetingEvents.data(), - FleetingEvents.size())); - TEST_ASSERT_EQUAL(0, - std::memcmp(static_cast(frame->datagram_payload.data) + 24 + - FleetingEvents.size(), - FleetingEventsCRC.data(), - FleetingEventsCRC.size())); - udpardTxFree(tx.memory, udpardTxPop(&tx, frame)); - udpardTxFree(tx.memory, udpardTxPop(&tx, nullptr)); // No-op. - - // Out of queue; transfer-ID not incremented. - TEST_ASSERT_EQUAL(-UDPARD_ERROR_CAPACITY, - udpardTxRespond(&tx, - 1234567890, - UdpardPriorityNominal, - 0x123, - 0x1538, - 0, - {.size = tx.mtu * 2, .data = FleetingEvents.data()}, - nullptr)); - - // Attempt to send a service transfer from an anonymous node. - { - auto tx_bad = tx; - const UdpardNodeID anonymous_node_id = 0xFFFFU; - tx_bad.queue_size = 1000; - tx_bad.mtu = 10; // Force multi-frame. - tx_bad.local_node_id = &anonymous_node_id; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ANONYMOUS, - udpardTxRespond(&tx_bad, - 1234567890, - UdpardPriorityNominal, - 0x123, - 0x1538, - 0, - {.size = FleetingEvents.size(), .data = FleetingEvents.data()}, - nullptr)); - } - - // Invalid Tx. - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, - udpardTxRespond(nullptr, - 1234567890, - UdpardPriorityNominal, - 0x123, - 0x1538, - 0, - {.size = FleetingEvents.size(), .data = FleetingEvents.data()}, - nullptr)); - // Invalid local node-ID. - { - auto tx_bad = tx; - tx_bad.local_node_id = nullptr; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, - udpardTxRespond(&tx_bad, - 1234567890, - UdpardPriorityNominal, - 0x123, - 0x1538, - 0, - {.size = FleetingEvents.size(), .data = FleetingEvents.data()}, - nullptr)); - } - // Invalid priority. - { - auto bad_priority = UdpardPriorityOptional; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, - udpardTxRespond(&tx, - 1234567890, - (UdpardPriority) (bad_priority + 1), - 0x123, - 0x1538, - 0, - {.size = FleetingEvents.size(), .data = FleetingEvents.data()}, - nullptr)); - } - // Invalid remote node-ID. - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, - udpardTxRespond(&tx, - 1234567890, - UdpardPriorityNominal, - 0x123, - 0xFFFF, - 0, - {.size = FleetingEvents.size(), .data = FleetingEvents.data()}, - nullptr)); - // Invalid service-ID. - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, - udpardTxRespond(&tx, - 1234567890, - UdpardPriorityNominal, - 0xFFFFU, - 0x1538, - 0, - {.size = FleetingEvents.size(), .data = FleetingEvents.data()}, - nullptr)); - // Invalid payload pointer. - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, - udpardTxRespond(&tx, - 1234567890, - UdpardPriorityNominal, - 0x123, - 0x1538, - 0, - {.size = FleetingEvents.size(), .data = nullptr}, - nullptr)); -} - -void testPeekPopFreeNULL() // Just make sure we don't crash. -{ - TEST_ASSERT_EQUAL(nullptr, udpardTxPeek(nullptr)); - TEST_ASSERT_EQUAL(nullptr, udpardTxPop(nullptr, nullptr)); - udpardTxFree({}, nullptr); -} - -} // namespace - -void setUp() {} - -void tearDown() {} - -int main() -{ - UNITY_BEGIN(); - RUN_TEST(testInit); - RUN_TEST(testPublish); - RUN_TEST(testRequest); - RUN_TEST(testRespond); - RUN_TEST(testPeekPopFreeNULL); - return UNITY_END(); -}