From a90daf2d6c4d947355de7f276d3d9d2469f5837e Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Mon, 2 Feb 2026 21:12:08 +0200 Subject: [PATCH 01/13] Remove topic hash --- .idea/dictionaries/project.xml | 1 + cyphal_udp_header.dsdl | 40 +++ libudpard/udpard.c | 611 ++++++++++++++------------------- libudpard/udpard.h | 70 ++-- 4 files changed, 328 insertions(+), 394 deletions(-) create mode 100644 cyphal_udp_header.dsdl diff --git a/.idea/dictionaries/project.xml b/.idea/dictionaries/project.xml index 8d9a0c0..73c7425 100644 --- a/.idea/dictionaries/project.xml +++ b/.idea/dictionaries/project.xml @@ -9,6 +9,7 @@ efgh fghij fstate + incompat klmno klmnopqrst lmnopqrst diff --git a/cyphal_udp_header.dsdl b/cyphal_udp_header.dsdl new file mode 100644 index 0000000..c4ce87a --- /dev/null +++ b/cyphal_udp_header.dsdl @@ -0,0 +1,40 @@ +# All Cyphal/UDP traffic is sent to port 9382. +# The subject multicast group address is composed as 239.0.0.0 (=0xEF000000) + subject_id (23 bits). +# All frames of a transfer must share the same field values unless otherwise noted. +# Frames may arrive out-of-order, possibly interleaved with neighboring transfers; implementations must cope. + +uint5 version #=2 in this version. +uint3 priority # 0=highest, 7=lowest. + +uint2 KIND_MSG_BEST_EFFORT = 0 # No ack must be sent. +uint2 KIND_MSG_RELIABLE = 1 # Remote must acknowledge reception by sending an ACK frame back. +uint2 KIND_ACK = 2 # Sent P2P; the transfer_id is of the acknowledged frame. Payload empty/ignored. +uint2 kind +uint6 reserved_incompat # Discard frame if any incompatibility flags are set that are not understood. + +void16 # Reserved for compatibility flags and fields (transmit zero, ignore on reception). + +# Payload reassembly information. +# We provide both the frame index and the frame payload offset to allow various reassembly strategies depending on the +# preferences of the implementation. The provided information is sufficient for zero-copy out-of-order reassembly. +# Offset 4 bytes. + +uint24 frame_index # Zero-based index of the payload fragment carried by this frame. +void8 +uint32 frame_payload_offset # The offset of the frame payload relative to the start of the transfer payload. +uint32 transfer_payload_size # Total for all frames. + +# Transfer identification information. +# The transfer-ID is a single field that segregates transfers by topic hash and epoch (publisher sequence restarts). +# Offset 16 bytes. + +uint64 transfer_id # For multi-frame reassembly and dedup. ACK specifies the acked tfer here. +uint64 sender_uid # Origin identifier ensures invariance to the source IP address for reassembly. + +# Integrity checking information. +# Offset 32 bytes. + +uint32 prefix_crc32c # crc32c(payload[0:(frame_payload_offset+payload_size)]) +uint32 header_crc32c # Covers all fields above. Same as the transfer payload CRC. + +# End of header at 40 bytes. Payload follows. diff --git a/libudpard/udpard.c b/libudpard/udpard.c index b6a685b..b7f500a 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -81,13 +81,6 @@ static_assert((UDPARD_IPv4_SUBJECT_ID_MAX & (UDPARD_IPv4_SUBJECT_ID_MAX + 1)) == /// Pending ack transfers expire after this long if not transmitted. #define ACK_TX_DEADLINE MEGA -/// The ACK message payload is structured as follows, in DSDL notation: -/// -/// uint64 topic_hash # Topic hash of the original message being acknowledged. -/// uint64 transfer_id # Transfer-ID of the original message being acknowledged. -/// # If there is any additional data not defined by the format, it must be ignored. -#define ACK_SIZE_BYTES 16U - static size_t smaller(const size_t a, const size_t b) { return (a < b) ? a : b; } static size_t larger(const size_t a, const size_t b) { return (a > b) ? a : b; } static int64_t min_i64(const int64_t a, const int64_t b) { return (a < b) ? a : b; } @@ -456,23 +449,28 @@ static void* ptr_unbias(const void* const ptr, const size_t offset) // --------------------------------------------- HEADER --------------------------------------------- // --------------------------------------------------------------------------------------------------------------------- -#define HEADER_SIZE_BYTES 48U -#define HEADER_VERSION 2U -#define HEADER_FLAG_RELIABLE 0x01U -#define HEADER_FLAG_ACKNOWLEDGEMENT 0x02U -#define HEADER_FRAME_INDEX_MAX 0xFFFFFFU /// 4 GiB with 256-byte MTU; 21.6 GiB with 1384-byte MTU +/// See cyphal_udp_header.dsdl for the layout. +#define HEADER_SIZE_BYTES 40U +#define HEADER_VERSION 2U +#define HEADER_FRAME_INDEX_MAX 0xFFFFFFU /// 4 GiB with 256-byte MTU; 21.6 GiB with 1384-byte MTU +typedef enum frame_kind_t +{ + frame_msg_best, + frame_msg_reliable, + frame_ack, +} frame_kind_t; + +/// The transfer-ID is designed to be unique per pending transfer. The uniquness is achieved by randomization. +/// For extra entropy, P2P transfers have their transfer-ID computed as (base_counter++)+destination_uid; +/// the base counter is seeded with a random value. typedef struct { udpard_prio_t priority; - - bool flag_reliable; - bool flag_acknowledgement; - - uint32_t transfer_payload_size; - uint64_t transfer_id; - uint64_t sender_uid; - uint64_t topic_hash; + frame_kind_t kind; + uint32_t transfer_payload_size; + uint64_t transfer_id; + uint64_t sender_uid; } meta_t; static byte_t* header_serialize(byte_t* const buffer, @@ -481,26 +479,19 @@ static byte_t* header_serialize(byte_t* const buffer, const uint32_t frame_payload_offset, const uint32_t prefix_crc) { - byte_t* ptr = buffer; - byte_t flags = 0; - if (meta.flag_reliable) { - flags |= HEADER_FLAG_RELIABLE; - } - if (meta.flag_acknowledgement) { - flags |= HEADER_FLAG_ACKNOWLEDGEMENT; - } - *ptr++ = (byte_t)(HEADER_VERSION | (meta.priority << 5U)); - *ptr++ = flags; - *ptr++ = 0; - *ptr++ = 0; - ptr = serialize_u32(ptr, frame_index & HEADER_FRAME_INDEX_MAX); - ptr = serialize_u32(ptr, frame_payload_offset); - ptr = serialize_u32(ptr, meta.transfer_payload_size); - ptr = serialize_u64(ptr, meta.transfer_id); - ptr = serialize_u64(ptr, meta.sender_uid); - ptr = serialize_u64(ptr, meta.topic_hash); - ptr = serialize_u32(ptr, prefix_crc); - ptr = serialize_u32(ptr, crc_full(HEADER_SIZE_BYTES - CRC_SIZE_BYTES, buffer)); + UDPARD_ASSERT((meta.kind == frame_msg_best) || (meta.kind == frame_msg_reliable) || (meta.kind == frame_ack)); + byte_t* ptr = buffer; + *ptr++ = (byte_t)(HEADER_VERSION | (meta.priority << 5U)); + *ptr++ = (byte_t)meta.kind; + *ptr++ = 0; + *ptr++ = 0; + ptr = serialize_u32(ptr, frame_index & HEADER_FRAME_INDEX_MAX); + ptr = serialize_u32(ptr, frame_payload_offset); + ptr = serialize_u32(ptr, meta.transfer_payload_size); + ptr = serialize_u64(ptr, meta.transfer_id); + ptr = serialize_u64(ptr, meta.sender_uid); + ptr = serialize_u32(ptr, prefix_crc); + ptr = serialize_u32(ptr, crc_full(HEADER_SIZE_BYTES - CRC_SIZE_BYTES, buffer)); UDPARD_ASSERT((size_t)(ptr - buffer) == HEADER_SIZE_BYTES); return ptr; } @@ -520,18 +511,14 @@ static bool header_deserialize(const udpard_bytes_mut_t dgram_payload, const byte_t head = *ptr++; const byte_t version = head & 0x1FU; if (version == HEADER_VERSION) { - out_meta->priority = (udpard_prio_t)((byte_t)(head >> 5U) & 0x07U); - const byte_t flags = *ptr++; - out_meta->flag_reliable = (flags & HEADER_FLAG_RELIABLE) != 0U; - out_meta->flag_acknowledgement = (flags & HEADER_FLAG_ACKNOWLEDGEMENT) != 0U; - const byte_t incompatibility = (byte_t)(flags & ~(HEADER_FLAG_RELIABLE | HEADER_FLAG_ACKNOWLEDGEMENT)); + out_meta->priority = (udpard_prio_t)((byte_t)(head >> 5U) & 0x07U); + out_meta->kind = (frame_kind_t)*ptr++; ptr += 2U; ptr = deserialize_u32(ptr, frame_index); ptr = deserialize_u32(ptr, frame_payload_offset); ptr = deserialize_u32(ptr, &out_meta->transfer_payload_size); ptr = deserialize_u64(ptr, &out_meta->transfer_id); ptr = deserialize_u64(ptr, &out_meta->sender_uid); - ptr = deserialize_u64(ptr, &out_meta->topic_hash); ptr = deserialize_u32(ptr, prefix_crc); (void)ptr; // Set up the output payload view. @@ -540,16 +527,15 @@ static bool header_deserialize(const udpard_bytes_mut_t dgram_payload, // Finalize the fields. *frame_index = HEADER_FRAME_INDEX_MAX & *frame_index; // Validate the fields. - ok = ok && (incompatibility == 0U); + ok = ok && ((out_meta->kind == frame_msg_best) || (out_meta->kind == frame_msg_reliable) || + (out_meta->kind == frame_ack)); ok = ok && (((uint64_t)*frame_payload_offset + (uint64_t)out_payload->size) <= (uint64_t)out_meta->transfer_payload_size); ok = ok && ((0 == *frame_index) == (0 == *frame_payload_offset)); // The prefix-CRC of the first frame of a transfer equals the CRC of its payload. ok = ok && ((0 < *frame_payload_offset) || (crc_full(out_payload->size, out_payload->data) == *prefix_crc)); - // ACK frame requires zero offset. - ok = ok && ((!out_meta->flag_acknowledgement) || (*frame_payload_offset == 0U)); - // Detect impossible flag combinations. - ok = ok && (!(out_meta->flag_reliable && out_meta->flag_acknowledgement)); + // ACK frame requires zero offset, single-frame transfer. + ok = ok && ((out_meta->kind != frame_ack) || (*frame_payload_offset == 0U)); } else { ok = false; } @@ -597,56 +583,41 @@ static tx_frame_t* tx_frame_new(udpard_tx_t* const tx, const udpard_mem_t mem, c return frame; } -/// The ordering is by topic hash first, then by transfer-ID. -/// Therefore, it orders all transfers by topic hash, allowing quick lookup by topic with an arbitrary transfer-ID. -typedef struct -{ - uint64_t topic_hash; - uint64_t transfer_id; -} tx_transfer_key_t; - /// The transmission scheduler maintains several indexes for the transfers in the pipeline. /// The segregated priority queue only contains transfers that are ready for transmission. /// The staged index contains transfers ordered by readiness for retransmission; /// transfers that will no longer be transmitted but are retained waiting for the ack are in neither of these. /// The deadline index contains ALL transfers, ordered by their deadlines, used for purging expired transfers. -/// The transfer index contains ALL transfers, used for lookup by (topic_hash, transfer_id). typedef struct tx_transfer_t { - udpard_tree_t index_staged; ///< Soonest to be ready on the left. Key: staged_until + transfer identity - udpard_tree_t index_deadline; ///< Soonest to expire on the left. Key: deadline + transfer identity - udpard_tree_t index_transfer; ///< Specific transfer lookup for ack management. Key: tx_transfer_key_t udpard_listed_t queue[UDPARD_IFACE_COUNT_MAX]; ///< Listed when ready for transmission. - udpard_listed_t agewise; ///< Listed when created; oldest at the tail. - udpard_tree_t index_transfer_ack; ///< Only for acks. Key: tx_transfer_key_t but referencing remote_*. + udpard_tree_t index_transfer_id; ///< ALL transfers by transfer_id, then by seq_no. + udpard_tree_t index_deadline; ///< Soonest to expire on the left. Key: deadline + transfer identity + udpard_tree_t index_staged; ///< Soonest to be ready on the left. Key: staged_until + transfer identity + udpard_listed_t agewise; ///< Listed when created; oldest at the tail. + /// Mutable transmission state. All other fields, except for the index handles, are immutable. /// We always keep a pointer to the head, plus a cursor that scans the frames during transmission. /// Both are NULL if the payload is destroyed. + /// The transmission iface set is indicated by which head[] entries are non-NULL. /// The head points to the first frame unless it is known that no (further) retransmissions are needed, /// in which case the old head is deleted and the head points to the next frame to transmit. - tx_frame_t* head[UDPARD_IFACE_COUNT_MAX]; - - /// Mutable transmission state. All other fields, except for the index handles, are immutable. + tx_frame_t* head[UDPARD_IFACE_COUNT_MAX]; tx_frame_t* cursor[UDPARD_IFACE_COUNT_MAX]; uint_fast8_t epoch; ///< Does not overflow due to exponential backoff; e.g. 1us with epoch=48 => 9 years. udpard_us_t staged_until; /// Constant transfer properties supplied by the client. - /// The remote_* fields are identical to the local ones except in the case of ack transfers, where they contain the - /// values encoded in the ack message. This is needed to find pending acks (to minimize duplicates); - /// in the future we may even remove them and accept potential ack duplication, since they are idempotent and cheap. - /// By default, upon construction, the remote_* fields equal the local ones, which is valid for ordinary messages. - uint64_t topic_hash; - uint64_t transfer_id; - uint64_t remote_topic_hash; - uint64_t remote_transfer_id; - udpard_us_t deadline; - bool reliable; - udpard_prio_t priority; - uint16_t iface_bitmap; ///< Guaranteed to have at least one bit set within UDPARD_IFACE_COUNT_MAX. - udpard_udpip_ep_t p2p_destination[UDPARD_IFACE_COUNT_MAX]; ///< Only for P2P transfers. + uint64_t seq_no; ///< Tie breaker; greater in later transfers; agewise orders by this. + uint64_t transfer_id; + udpard_us_t deadline; + frame_kind_t kind; + udpard_prio_t priority; + bool is_p2p; + udpard_remote_t p2p_remote; ///< Only valid if is_p2p. + + /// Application closure. udpard_user_context_t user; - void (*feedback)(udpard_tx_t*, udpard_tx_feedback_t); } tx_transfer_t; @@ -697,11 +668,11 @@ static void tx_transfer_free_payload(tx_transfer_t* const tr) /// where pub/sub associations are established and removed automatically, transparently to the application. static void tx_transfer_retire(udpard_tx_t* const tx, tx_transfer_t* const tr, const bool success) { - // Construct the feedback object first before the transfer is destroyed. + // Save the feedback state first before the transfer is destroyed. const udpard_tx_feedback_t fb = { .user = tr->user, .acknowledgements = success ? 1 : 0 }; - UDPARD_ASSERT(tr->reliable == (tr->feedback != NULL)); - // save the feedback pointer void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t) = tr->feedback; + UDPARD_ASSERT((feedback == NULL) ? ((tr->kind == frame_msg_best) || (tr->kind == frame_ack)) + : (tr->kind == frame_msg_reliable)); // Remove from all indexes and lists. for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { @@ -709,9 +680,10 @@ static void tx_transfer_retire(udpard_tx_t* const tx, tx_transfer_t* const tr, c } delist(&tx->agewise, &tr->agewise); (void)cavl2_remove_if(&tx->index_staged, &tr->index_staged); + UDPARD_ASSERT(cavl2_is_inserted(tx->index_deadline, &tr->index_deadline)); + UDPARD_ASSERT(cavl2_is_inserted(tx->index_transfer_id, &tr->index_transfer_id)); cavl2_remove(&tx->index_deadline, &tr->index_deadline); - cavl2_remove(&tx->index_transfer, &tr->index_transfer); - (void)cavl2_remove_if(&tx->index_transfer_ack, &tr->index_transfer_ack); + cavl2_remove(&tx->index_transfer_id, &tr->index_transfer_id); // Free the memory. The payload memory may already be empty depending on where we were invoked from. tx_transfer_free_payload(tr); @@ -747,68 +719,47 @@ static bool tx_ensure_queue_space(udpard_tx_t* const tx, const size_t total_fram return total_frames_needed <= (tx->enqueued_frames_limit - tx->enqueued_frames_count); } -// Key for time-ordered TX indices with stable tiebreaking. -typedef struct -{ - udpard_us_t time; - uint64_t topic_hash; - uint64_t transfer_id; -} tx_time_key_t; - -// Compare staged transfers by time then by transfer identity. static int32_t tx_cavl_compare_staged(const void* const user, const udpard_tree_t* const node) { - const tx_time_key_t* const key = (const tx_time_key_t*)user; - const tx_transfer_t* const tr = CAVL2_TO_OWNER(node, tx_transfer_t, index_staged); // clang-format off - if (key->time < tr->staged_until) { return -1; } - if (key->time > tr->staged_until) { return +1; } - if (key->topic_hash < tr->topic_hash) { return -1; } - if (key->topic_hash > tr->topic_hash) { return +1; } - if (key->transfer_id < tr->transfer_id) { return -1; } - if (key->transfer_id > tr->transfer_id) { return +1; } + const tx_transfer_t* const outer = (const tx_transfer_t*)user; + const tx_transfer_t* const inner = CAVL2_TO_OWNER(node, tx_transfer_t, index_staged); // clang-format off + if (outer->staged_until < inner->staged_until) { return -1; } + if (outer->staged_until > inner->staged_until) { return +1; } + if (outer->seq_no < inner->seq_no) { return -1; } + if (outer->seq_no > inner->seq_no) { return +1; } return 0; // clang-format on } -// Compare deadlines by time then by transfer identity. static int32_t tx_cavl_compare_deadline(const void* const user, const udpard_tree_t* const node) { - const tx_time_key_t* const key = (const tx_time_key_t*)user; - const tx_transfer_t* const tr = CAVL2_TO_OWNER(node, tx_transfer_t, index_deadline); // clang-format off - if (key->time < tr->deadline) { return -1; } - if (key->time > tr->deadline) { return +1; } - if (key->topic_hash < tr->topic_hash) { return -1; } - if (key->topic_hash > tr->topic_hash) { return +1; } - if (key->transfer_id < tr->transfer_id) { return -1; } - if (key->transfer_id > tr->transfer_id) { return +1; } + const tx_transfer_t* const outer = (const tx_transfer_t*)user; + const tx_transfer_t* const inner = CAVL2_TO_OWNER(node, tx_transfer_t, index_deadline); // clang-format off + if (outer->deadline < inner->deadline) { return -1; } + if (outer->deadline > inner->deadline) { return +1; } + if (outer->seq_no < inner->seq_no) { return -1; } + if (outer->seq_no > inner->seq_no) { return +1; } return 0; // clang-format on } -static int32_t tx_cavl_compare_transfer(const void* const user, const udpard_tree_t* const node) + +/// Shall a transfer-ID collision occur due to PRNG faults, we want to handle it correctly, which is to allow +/// non-unique transfer-IDs in the reliable index such that they are co-located, then use lower bound search. +/// Lookups will then disambiguate ad-hoc. +typedef struct tx_key_transfer_id_t { - const tx_transfer_key_t* const key = (const tx_transfer_key_t*)user; - const tx_transfer_t* const tr = CAVL2_TO_OWNER(node, tx_transfer_t, index_transfer); // clang-format off - if (key->topic_hash < tr->topic_hash) { return -1; } - if (key->topic_hash > tr->topic_hash) { return +1; } + uint64_t transfer_id; + uint64_t seq_no; +} tx_key_transfer_id_t; + +static int32_t tx_cavl_compare_transfer_id(const void* const user, const udpard_tree_t* const node) +{ + const tx_key_transfer_id_t* const key = (const tx_key_transfer_id_t*)user; + const tx_transfer_t* const tr = CAVL2_TO_OWNER(node, tx_transfer_t, index_transfer_id); // clang-format off if (key->transfer_id < tr->transfer_id) { return -1; } if (key->transfer_id > tr->transfer_id) { return +1; } + if (key->seq_no < tr->seq_no) { return -1; } + if (key->seq_no > tr->seq_no) { return +1; } return 0; // clang-format on } -static int32_t tx_cavl_compare_transfer_remote(const void* const user, const udpard_tree_t* const node) -{ - const tx_transfer_key_t* const key = (const tx_transfer_key_t*)user; - const tx_transfer_t* const tr = CAVL2_TO_OWNER(node, tx_transfer_t, index_transfer_ack); // clang-format off - if (key->topic_hash < tr->remote_topic_hash) { return -1; } - if (key->topic_hash > tr->remote_topic_hash) { return +1; } - if (key->transfer_id < tr->remote_transfer_id) { return -1; } - if (key->transfer_id > tr->remote_transfer_id) { return +1; } - return 0; // clang-format on -} - -static tx_transfer_t* tx_transfer_find(udpard_tx_t* const tx, const uint64_t topic_hash, const uint64_t transfer_id) -{ - const tx_transfer_key_t key = { .topic_hash = topic_hash, .transfer_id = transfer_id }; - return CAVL2_TO_OWNER( - cavl2_find(tx->index_transfer, &key, &tx_cavl_compare_transfer), tx_transfer_t, index_transfer); -} /// True iff listed in at least one interface queue. static bool tx_is_pending(const udpard_tx_t* const tx, const tx_transfer_t* const tr) @@ -891,20 +842,13 @@ static udpard_us_t tx_ack_timeout(const udpard_us_t baseline, const udpard_prio_ static void tx_stage_if(udpard_tx_t* const tx, tx_transfer_t* const tr) { UDPARD_ASSERT(!cavl2_is_inserted(tx->index_staged, &tr->index_staged)); + UDPARD_ASSERT(tr->kind == frame_msg_reliable); const uint_fast8_t epoch = tr->epoch++; const udpard_us_t timeout = tx_ack_timeout(tx->ack_baseline_timeout, tr->priority, epoch); tr->staged_until += timeout; if ((tr->deadline - timeout) >= tr->staged_until) { - // Insert into staged index with deterministic tie-breaking. - const tx_time_key_t key = { .time = tr->staged_until, - .topic_hash = tr->topic_hash, - .transfer_id = tr->transfer_id }; - // Ensure we didn't collide with another entry that should be unique. - const udpard_tree_t* const tree_staged = cavl2_find_or_insert(&tx->index_staged, // - &key, - tx_cavl_compare_staged, - &tr->index_staged, - cavl2_trivial_factory); + const udpard_tree_t* const tree_staged = + cavl2_find_or_insert(&tx->index_staged, tr, tx_cavl_compare_staged, &tr->index_staged, cavl2_trivial_factory); UDPARD_ASSERT(tree_staged == &tr->index_staged); (void)tree_staged; } @@ -933,8 +877,7 @@ static void tx_promote_staged_transfers(udpard_tx_t* const self, const udpard_us tx_stage_if(self, tr); // Enqueue for transmission unless it's been there since the last attempt (stalled interface?) for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - if (((tr->iface_bitmap & (1U << i)) != 0) && !is_listed(&self->queue[i][tr->priority], &tr->queue[i])) { - UDPARD_ASSERT(tr->head[i] != NULL); // cannot stage without payload, doesn't make sense + if ((tr->head[i] != NULL) && !is_listed(&self->queue[i][tr->priority], &tr->queue[i])) { UDPARD_ASSERT(tr->cursor[i] == tr->head[i]); // must have been rewound after last attempt enlist_head(&self->queue[i][tr->priority], &tr->queue[i]); } @@ -989,11 +932,10 @@ static bool tx_push(udpard_tx_t* const tx, const udpard_us_t deadline, const meta_t meta, const uint16_t iface_bitmap, - const udpard_udpip_ep_t p2p_destination[UDPARD_IFACE_COUNT_MAX], + const udpard_remote_t* const p2p_remote, // only for P2P transfers const udpard_bytes_scattered_t payload, void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), - const udpard_user_context_t user, - tx_transfer_t** const out_transfer) + const udpard_user_context_t user) { UDPARD_ASSERT(now <= deadline); UDPARD_ASSERT(tx != NULL); @@ -1014,20 +956,18 @@ static bool tx_push(udpard_tx_t* const tx, return false; } mem_zero(sizeof(*tr), tr); - tr->epoch = 0; - tr->staged_until = now; - tr->topic_hash = meta.topic_hash; - tr->transfer_id = meta.transfer_id; - tr->remote_topic_hash = meta.topic_hash; - tr->remote_transfer_id = meta.transfer_id; - tr->deadline = deadline; - tr->reliable = meta.flag_reliable; - tr->priority = meta.priority; - tr->iface_bitmap = iface_bitmap; - tr->user = user; - tr->feedback = feedback; + tr->epoch = 0; + tr->staged_until = now; + tr->seq_no = tx->next_seq_no++; + tr->transfer_id = meta.transfer_id; + tr->deadline = deadline; + tr->kind = meta.kind; + tr->priority = meta.priority; + tr->user = user; + tr->feedback = feedback; + tr->is_p2p = (p2p_remote != NULL); + tr->p2p_remote = (p2p_remote != NULL) ? *p2p_remote : (udpard_remote_t){ 0 }; for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - tr->p2p_destination[i] = p2p_destination[i]; tr->head[i] = tr->cursor[i] = NULL; } @@ -1048,7 +988,7 @@ static bool tx_push(udpard_tx_t* const tx, const size_t enqueued_frames_before = tx->enqueued_frames_count; bool oom = false; for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - if ((tr->iface_bitmap & (1U << i)) != 0) { + if ((iface_bitmap & (1U << i)) != 0) { if (tr->head[i] == NULL) { tr->head[i] = tx_spool(tx, tx->memory.payload[i], tx->mtu[i], meta, payload); tr->cursor[i] = tr->head[i]; @@ -1058,11 +998,11 @@ static bool tx_push(udpard_tx_t* const tx, } // Detect which interfaces can use the same spool to conserve memory. for (size_t j = i + 1; j < UDPARD_IFACE_COUNT_MAX; j++) { - if (((tr->iface_bitmap & (1U << j)) != 0) && tx_spool_shareable(tx->mtu[i], - tx->memory.payload[i], - tx->mtu[j], - tx->memory.payload[j], - meta.transfer_payload_size)) { + if (((iface_bitmap & (1U << j)) != 0) && tx_spool_shareable(tx->mtu[i], + tx->memory.payload[i], + tx->mtu[j], + tx->memory.payload[j], + meta.transfer_payload_size)) { tr->head[j] = tr->head[i]; tr->cursor[j] = tr->cursor[i]; tx_frame_t* frame = tr->head[j]; @@ -1087,47 +1027,60 @@ static bool tx_push(udpard_tx_t* const tx, // Enqueue for transmission immediately. for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - if ((tr->iface_bitmap & (1U << i)) != 0) { + if ((iface_bitmap & (1U << i)) != 0) { + UDPARD_ASSERT(tr->head[i] != NULL); enlist_head(&tx->queue[i][tr->priority], &tr->queue[i]); + } else { + UDPARD_ASSERT(tr->head[i] == NULL); } } + // Add to the staged index so that it is repeatedly re-enqueued later until acknowledged or expired. - if (meta.flag_reliable) { + if (meta.kind == frame_msg_reliable) { tx_stage_if(tx, tr); } + // Add to the deadline index for expiration management. - // Insert into deadline index with deterministic tie-breaking. - const tx_time_key_t deadline_key = { .time = tr->deadline, - .topic_hash = tr->topic_hash, - .transfer_id = tr->transfer_id }; - // Ensure we didn't collide with another entry that should be unique. const udpard_tree_t* const tree_deadline = cavl2_find_or_insert( - &tx->index_deadline, &deadline_key, tx_cavl_compare_deadline, &tr->index_deadline, cavl2_trivial_factory); + &tx->index_deadline, tr, tx_cavl_compare_deadline, &tr->index_deadline, cavl2_trivial_factory); UDPARD_ASSERT(tree_deadline == &tr->index_deadline); (void)tree_deadline; - // Add to the transfer index for incoming ack management. - const tx_transfer_key_t transfer_key = { .topic_hash = tr->topic_hash, .transfer_id = tr->transfer_id }; - const udpard_tree_t* const tree_transfer = cavl2_find_or_insert( - &tx->index_transfer, &transfer_key, tx_cavl_compare_transfer, &tr->index_transfer, cavl2_trivial_factory); - UDPARD_ASSERT(tree_transfer == &tr->index_transfer); // ensure no duplicates; checked at the API level - (void)tree_transfer; + + // Add to the transfer index for incoming ack management and cancellation. + const tx_key_transfer_id_t key_id = { .transfer_id = tr->transfer_id, .seq_no = tr->seq_no }; + const udpard_tree_t* const tree_id = cavl2_find_or_insert( + &tx->index_transfer_id, &key_id, tx_cavl_compare_transfer_id, &tr->index_transfer_id, cavl2_trivial_factory); + UDPARD_ASSERT(tree_id == &tr->index_transfer_id); + (void)tree_id; + // Add to the agewise list for sacrifice management on queue exhaustion. enlist_head(&tx->agewise, &tr->agewise); - // Finalize. - if (out_transfer != NULL) { - *out_transfer = tr; - } return true; } /// Handle an ACK received from a remote node. -static void tx_receive_ack(udpard_rx_t* const rx, const uint64_t topic_hash, const uint64_t transfer_id) +static void tx_receive_ack(udpard_rx_t* const rx, const uint64_t sender_uid, const uint64_t transfer_id) { if (rx->tx != NULL) { - tx_transfer_t* const tr = tx_transfer_find(rx->tx, topic_hash, transfer_id); - if ((tr != NULL) && tr->reliable) { - tx_transfer_retire(rx->tx, tr, true); + // A transfer-ID collision is astronomically unlikely: given 10k simultaneously pending reliable transfers, + // which is outside typical usage, the probability of a collision is about 1 in 500 billion. However, we + // take into account that the PRNG used to seed the transfer-ID may be imperfect, so we add explicit collision + // handling. In all practical scenarios at most a single iteration will be needed. + const tx_key_transfer_id_t key = { .transfer_id = transfer_id, .seq_no = 0 }; + tx_transfer_t* tr = + CAVL2_TO_OWNER(cavl2_lower_bound(rx->tx->index_transfer_id, &key, &tx_cavl_compare_transfer_id), + tx_transfer_t, + index_transfer_id); + while ((tr != NULL) && (tr->transfer_id == transfer_id)) { + // Outgoing reliable P2P transfers only accept acks from the intended recipient. + // Non-P2P accept acks from any sender. + const bool destination_match = !tr->is_p2p || (tr->p2p_remote.uid == sender_uid); + if ((tr->kind == frame_msg_reliable) && destination_match) { + tx_transfer_retire(rx->tx, tr, true); + break; + } + tr = CAVL2_TO_OWNER(cavl2_next_greater(&tr->index_transfer_id), tx_transfer_t, index_transfer_id); } } } @@ -1137,22 +1090,37 @@ static void tx_receive_ack(udpard_rx_t* const rx, const uint64_t topic_hash, con static void tx_send_ack(udpard_rx_t* const rx, const udpard_us_t now, const udpard_prio_t priority, - const uint64_t topic_hash, const uint64_t transfer_id, const udpard_remote_t remote) { udpard_tx_t* const tx = rx->tx; if (tx != NULL) { // Check if an ack for this transfer is already enqueued. - const tx_transfer_key_t key = { .topic_hash = topic_hash, .transfer_id = transfer_id }; - tx_transfer_t* const prior = - CAVL2_TO_OWNER(cavl2_find(tx->index_transfer_ack, &key, &tx_cavl_compare_transfer_remote), + // A transfer-ID collision is astronomically unlikely: given 10k simultaneously pending reliable transfers, + // which is outside typical usage, the probability of a collision is about 1 in 500 billion. However, we + // take into account that the PRNG used to seed the transfer-ID may be imperfect, so we add explicit collision + // handling. In all practical scenarios at most a single iteration will be needed. + const tx_key_transfer_id_t key = { .transfer_id = transfer_id, .seq_no = 0 }; + tx_transfer_t* prior = + CAVL2_TO_OWNER(cavl2_lower_bound(rx->tx->index_transfer_id, &key, &tx_cavl_compare_transfer_id), tx_transfer_t, - index_transfer_ack); - const uint16_t prior_ep_bitmap = (prior != NULL) ? valid_ep_bitmap(prior->p2p_destination) : 0U; - UDPARD_ASSERT((prior == NULL) || (prior_ep_bitmap == prior->iface_bitmap)); - const uint16_t new_ep_bitmap = valid_ep_bitmap(remote.endpoints); - const bool new_better = (new_ep_bitmap & (uint16_t)(~prior_ep_bitmap)) != 0U; + index_transfer_id); + // Scan all matches (there will be at most 1 barring extremely unlikely hash collisions) to find the ack. + while (prior != NULL) { + if (prior->transfer_id != transfer_id) { + prior = NULL; + break; // scanned all contenders, no match + } + if ((prior->kind == frame_ack) && (prior->p2p_remote.uid == remote.uid)) { + break; // match found + } + prior = CAVL2_TO_OWNER(cavl2_next_greater(&prior->index_transfer_id), tx_transfer_t, index_transfer_id); + } + + // Determine if the new ack has better return path discovery than the prior one (if any). + const uint16_t prior_ep_bitmap = (prior != NULL) ? valid_ep_bitmap(prior->p2p_remote.endpoints) : 0U; + const uint16_t new_ep_bitmap = valid_ep_bitmap(remote.endpoints); + const bool new_better = (new_ep_bitmap & (uint16_t)(~prior_ep_bitmap)) != 0U; if (!new_better) { return; // Can we get an ack? We have ack at home! } @@ -1163,47 +1131,23 @@ static void tx_send_ack(udpard_rx_t* const rx, // Even if the new, better ack fails to enqueue for some reason, it's no big deal -- we will send the next one. // The only reason it might fail is an OOM but we just freed a slot so it should be fine. - // Serialize the ACK payload. - byte_t message[ACK_SIZE_BYTES]; - byte_t* ptr = message; - ptr = serialize_u64(ptr, topic_hash); - ptr = serialize_u64(ptr, transfer_id); - UDPARD_ASSERT((ptr - message) == ACK_SIZE_BYTES); - (void)ptr; - // Enqueue the transfer. - const udpard_bytes_t payload = { .size = ACK_SIZE_BYTES, .data = message }; - const meta_t meta = { - .priority = priority, - .flag_reliable = false, - .flag_acknowledgement = true, - .transfer_payload_size = (uint32_t)payload.size, - .transfer_id = tx->p2p_transfer_id++, - .sender_uid = tx->local_uid, - .topic_hash = remote.uid, - }; - tx_transfer_t* tr = NULL; + const meta_t meta = { .priority = priority, + .kind = frame_ack, + .transfer_payload_size = 0, + .transfer_id = transfer_id, + .sender_uid = tx->local_uid }; const uint32_t count = tx_push(tx, now, now + ACK_TX_DEADLINE, meta, new_ep_bitmap, - remote.endpoints, - (udpard_bytes_scattered_t){ .bytes = payload, .next = NULL }, + &remote, + (udpard_bytes_scattered_t){ .bytes = { .size = 0, .data = "" }, .next = NULL }, NULL, - UDPARD_USER_CONTEXT_NULL, - &tr); + UDPARD_USER_CONTEXT_NULL); UDPARD_ASSERT(count <= 1); - if (count == 1) { // ack is always a single-frame transfer, so we get either 0 or 1 - UDPARD_ASSERT(tr != NULL); - tr->remote_topic_hash = topic_hash; - tr->remote_transfer_id = transfer_id; - (void)cavl2_find_or_insert(&tx->index_transfer_ack, - &key, - tx_cavl_compare_transfer_remote, - &tr->index_transfer_ack, - cavl2_trivial_factory); - } else { + if (count != 1) { // ack is always a single-frame transfer rx->errors_ack_tx++; } } else { @@ -1213,7 +1157,7 @@ static void tx_send_ack(udpard_rx_t* const rx, bool udpard_tx_new(udpard_tx_t* const self, const uint64_t local_uid, - const uint64_t p2p_transfer_id_initial, + const uint64_t p2p_transfer_id_seed, const size_t enqueued_frames_limit, const udpard_tx_mem_resources_t memory, const udpard_tx_vtable_t* const vtable) @@ -1224,14 +1168,16 @@ bool udpard_tx_new(udpard_tx_t* const self, mem_zero(sizeof(*self), self); self->vtable = vtable; self->local_uid = local_uid; - self->p2p_transfer_id = p2p_transfer_id_initial; + self->p2p_transfer_id = p2p_transfer_id_seed ^ local_uid; // extra entropy self->ack_baseline_timeout = UDPARD_TX_ACK_BASELINE_TIMEOUT_DEFAULT_us; self->enqueued_frames_limit = enqueued_frames_limit; self->enqueued_frames_count = 0; + self->next_seq_no = 0; self->memory = memory; - self->index_staged = NULL; + self->index_transfer_id = NULL; self->index_deadline = NULL; - self->index_transfer = NULL; + self->index_staged = NULL; + self->agewise = (udpard_list_t){ NULL, NULL }; self->user = NULL; for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { self->mtu[i] = UDPARD_MTU_DEFAULT; @@ -1249,7 +1195,6 @@ bool udpard_tx_push(udpard_tx_t* const self, const udpard_us_t deadline, const uint16_t iface_bitmap, const udpard_prio_t priority, - const uint64_t topic_hash, const uint64_t transfer_id, const udpard_bytes_scattered_t payload, void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), // NULL if best-effort. @@ -1257,28 +1202,16 @@ bool udpard_tx_push(udpard_tx_t* const self, { bool ok = (self != NULL) && (deadline >= now) && (now >= 0) && (self->local_uid != 0) && ((iface_bitmap & UDPARD_IFACE_BITMAP_ALL) != 0) && (priority < UDPARD_PRIORITY_COUNT) && - ((payload.bytes.data != NULL) || (payload.bytes.size == 0U)) && - (tx_transfer_find(self, topic_hash, transfer_id) == NULL); + ((payload.bytes.data != NULL) || (payload.bytes.size == 0U)); if (ok) { const meta_t meta = { .priority = priority, - .flag_reliable = feedback != NULL, + .kind = (feedback != NULL) ? frame_msg_reliable : frame_msg_best, .transfer_payload_size = (uint32_t)bytes_scattered_size(payload), .transfer_id = transfer_id, .sender_uid = self->local_uid, - .topic_hash = topic_hash, }; - const udpard_udpip_ep_t blank_ep[UDPARD_IFACE_COUNT_MAX] = { 0 }; - ok = tx_push(self, // -------------------------------------- - now, - deadline, - meta, - iface_bitmap & UDPARD_IFACE_BITMAP_ALL, - blank_ep, - payload, - feedback, - user, - NULL); + ok = tx_push(self, now, deadline, meta, iface_bitmap & UDPARD_IFACE_BITMAP_ALL, NULL, payload, feedback, user); } return ok; } @@ -1299,18 +1232,15 @@ bool udpard_tx_push_p2p(udpard_tx_t* const self, if (ok) { const meta_t meta = { .priority = priority, - .flag_reliable = feedback != NULL, + .kind = (feedback != NULL) ? frame_msg_reliable : frame_msg_best, .transfer_payload_size = (uint32_t)bytes_scattered_size(payload), - .transfer_id = self->p2p_transfer_id++, + .transfer_id = self->p2p_transfer_id++, // Shared for all remotes, hence no ack ambiguity. .sender_uid = self->local_uid, - .topic_hash = remote.uid, }; - tx_transfer_t* tr = NULL; - ok = tx_push(self, now, deadline, meta, iface_bitmap, remote.endpoints, payload, feedback, user, &tr); - UDPARD_ASSERT((!ok) || (tr->transfer_id == meta.transfer_id)); - if (ok && (out_transfer_id != NULL)) { - *out_transfer_id = tr->transfer_id; + if (out_transfer_id != NULL) { + *out_transfer_id = meta.transfer_id; } + ok = tx_push(self, now, deadline, meta, iface_bitmap, &remote, payload, feedback, user); } return ok; } @@ -1347,10 +1277,10 @@ static void tx_eject_pending_frames(udpard_tx_t* const self, const udpard_us_t n .dscp = self->dscp_value_per_priority[tr->priority], .datagram = tx_frame_view(frame), .user = tr->user }; - const bool ep_valid = udpard_is_valid_endpoint(tr->p2p_destination[ifindex]); - UDPARD_ASSERT((!ep_valid) || ((tr->iface_bitmap & (1U << ifindex)) != 0U)); - const bool ejected = ep_valid ? self->vtable->eject_p2p(self, &ejection, tr->p2p_destination[ifindex]) - : self->vtable->eject_subject(self, &ejection); + // + const bool ejected = tr->is_p2p + ? self->vtable->eject_p2p(self, &ejection, tr->p2p_remote.endpoints[ifindex]) + : self->vtable->eject_subject(self, &ejection); if (!ejected) { // The easy case -- no progress was made at this time; break; // don't change anything, just try again later as-is } @@ -1369,9 +1299,9 @@ static void tx_eject_pending_frames(udpard_tx_t* const self, const udpard_us_t n tr->cursor[ifindex] = tr->head[ifindex]; delist(&self->queue[ifindex][tr->priority], &tr->queue[ifindex]); // no longer pending for transmission UDPARD_ASSERT(!last_attempt || (tr->head[ifindex] == NULL)); // this iface is done with the payload - if (last_attempt && !tr->reliable && !tx_is_pending(self, tr)) { // remove early once all ifaces are done + if (last_attempt && (tr->kind != frame_msg_reliable) && !tx_is_pending(self, tr)) { UDPARD_ASSERT(tr->feedback == NULL); // non-reliable transfers have no feedback callback - tx_transfer_retire(self, tr, true); + tx_transfer_retire(self, tr, true); // remove early once all ifaces are done } } } @@ -1390,39 +1320,30 @@ void udpard_tx_poll(udpard_tx_t* const self, const udpard_us_t now, const uint16 } } -bool udpard_tx_cancel(udpard_tx_t* const self, const uint64_t topic_hash, const uint64_t transfer_id) +bool udpard_tx_cancel(udpard_tx_t* const self, const uint64_t transfer_id, const bool reliable) { bool cancelled = false; if (self != NULL) { - tx_transfer_t* const tr = tx_transfer_find(self, topic_hash, transfer_id); - if (tr != NULL) { - tx_transfer_retire(self, tr, false); - cancelled = true; + // A transfer-ID collision is astronomically unlikely: given 10k simultaneously pending reliable transfers, + // which is outside typical usage, the probability of a collision is about 1 in 500 billion. However, we + // take into account that the PRNG used to seed the transfer-ID may be imperfect, so we add explicit collision + // handling. In all practical scenarios at most a single iteration will be needed. + const tx_key_transfer_id_t key = { .transfer_id = transfer_id, .seq_no = 0 }; + tx_transfer_t* tr = + CAVL2_TO_OWNER(cavl2_lower_bound(self->index_transfer_id, &key, &tx_cavl_compare_transfer_id), + tx_transfer_t, + index_transfer_id); + while ((tr != NULL) && (tr->transfer_id == transfer_id)) { + if (tr->kind == (reliable ? frame_msg_reliable : frame_msg_best)) { // Cancel all matching (normally <=1). + tx_transfer_retire(self, tr, false); + cancelled = true; + } + tr = CAVL2_TO_OWNER(cavl2_next_greater(&tr->index_transfer_id), tx_transfer_t, index_transfer_id); } } return cancelled; } -size_t udpard_tx_cancel_all(udpard_tx_t* const self, const uint64_t topic_hash) -{ - size_t count = 0; - if (self != NULL) { - // Find the first transfer with matching topic_hash using transfer_id=0 as lower bound. - const tx_transfer_key_t key = { .topic_hash = topic_hash, .transfer_id = 0 }; - tx_transfer_t* tr = CAVL2_TO_OWNER( - cavl2_lower_bound(self->index_transfer, &key, &tx_cavl_compare_transfer), tx_transfer_t, index_transfer); - // Iterate through all transfers with the same topic_hash. - while ((tr != NULL) && (tr->topic_hash == topic_hash)) { - tx_transfer_t* const next = - CAVL2_TO_OWNER(cavl2_next_greater(&tr->index_transfer), tx_transfer_t, index_transfer); - tx_transfer_retire(self, tr, false); - count++; - tr = next; - } - } - return count; -} - uint16_t udpard_tx_pending_ifaces(const udpard_tx_t* const self) { uint16_t bitmap = 0; @@ -1468,9 +1389,8 @@ void udpard_tx_refcount_dec(const udpard_bytes_t tx_payload_view) void udpard_tx_free(udpard_tx_t* const self) { if (self != NULL) { - while (self->index_transfer != NULL) { - tx_transfer_t* tr = CAVL2_TO_OWNER(self->index_transfer, tx_transfer_t, index_transfer); - tx_transfer_retire(self, tr, false); + while (self->agewise.tail != NULL) { + tx_transfer_retire(self, LIST_TAIL(self->agewise, tx_transfer_t, agewise), false); } } } @@ -1866,6 +1786,9 @@ typedef struct rx_session_t bool initialized; ///< Set after the first frame is seen. + // TODO: Static slots are taking too much space; allocate them dynamically instead. + // Each is <=56 bytes so it fits nicely into a 64-byte o1heap block. + // The slot state enum can be replaced with a simple "done" flag. rx_slot_t slots[RX_SLOT_COUNT]; } rx_session_t; @@ -1927,22 +1850,14 @@ static int32_t cavl_compare_rx_session_by_remote_uid(const void* const user, con return 0; // clang-format on } -// Key for reordering deadline ordering with stable tiebreaking. -typedef struct -{ - udpard_us_t deadline; - uint64_t remote_uid; -} rx_reordering_key_t; - -// Compare sessions by reordering deadline then by remote UID. static int32_t cavl_compare_rx_session_by_reordering_deadline(const void* const user, const udpard_tree_t* const node) { - const rx_reordering_key_t* const key = (const rx_reordering_key_t*)user; - const rx_session_t* const ses = CAVL2_TO_OWNER(node, rx_session_t, index_reordering_window); // clang-format off - if (key->deadline < ses->reordering_window_deadline) { return -1; } - if (key->deadline > ses->reordering_window_deadline) { return +1; } - if (key->remote_uid < ses->remote.uid) { return -1; } - if (key->remote_uid > ses->remote.uid) { return +1; } + const rx_session_t* const outer = (const rx_session_t*)user; + const rx_session_t* const inner = CAVL2_TO_OWNER(node, rx_session_t, index_reordering_window); // clang-format off + if (outer->reordering_window_deadline < inner->reordering_window_deadline) { return -1; } + if (outer->reordering_window_deadline > inner->reordering_window_deadline) { return +1; } + if (outer->remote.uid < inner->remote.uid) { return -1; } + if (outer->remote.uid > inner->remote.uid) { return +1; } return 0; // clang-format on } @@ -2060,11 +1975,8 @@ static void rx_session_ordered_scan_slots(rx_session_t* const self, // closure deadline, but we ignore them because the nearest transfer overrides the more distant ones. if (slot != NULL) { self->reordering_window_deadline = slot->ts_min + self->port->reordering_window; - // Insert into reordering index with deterministic tie-breaking. - const rx_reordering_key_t key = { .deadline = self->reordering_window_deadline, - .remote_uid = self->remote.uid }; const udpard_tree_t* res = cavl2_find_or_insert(&rx->index_session_by_reordering, //---------------- - &key, + self, &cavl_compare_rx_session_by_reordering_deadline, &self->index_reordering_window, &cavl2_trivial_factory); @@ -2157,7 +2069,6 @@ static void rx_session_update(rx_session_t* const self, const uint_fast8_t ifindex) { UDPARD_ASSERT(self->remote.uid == frame->meta.sender_uid); - UDPARD_ASSERT(frame->meta.topic_hash == self->port->topic_hash); // must be checked by the caller beforehand // Animate the session to prevent it from being retired. enlist_head(&rx->list_session_by_animation, &self->list_by_animation); @@ -2208,9 +2119,9 @@ static void rx_session_update_ordered(rx_session_t* const self, &rx->errors_transfer_malformed); if (slot->state == rx_slot_done) { UDPARD_ASSERT(rx_session_is_transfer_interned(self, slot->transfer_id)); - if (frame->meta.flag_reliable) { + if (frame->meta.kind == frame_msg_reliable) { // Payload view: ((udpard_fragment_t*)cavl2_min(slot->fragments))->view - tx_send_ack(rx, ts, slot->priority, self->port->topic_hash, slot->transfer_id, self->remote); + tx_send_ack(rx, ts, slot->priority, slot->transfer_id, self->remote); } rx_session_ordered_scan_slots(self, rx, ts, false); } @@ -2218,9 +2129,9 @@ static void rx_session_update_ordered(rx_session_t* const self, // Note: transfers that are no longer retained in the history will not solicit an ACK response, // meaning that the sender will not get a confirmation if the retransmitted transfer is too old. // We assume that RX_TRANSFER_HISTORY_COUNT is enough to cover all sensible use cases. - if ((is_interned || is_ejected) && frame->meta.flag_reliable && (frame->base.offset == 0U)) { + if ((is_interned || is_ejected) && (frame->meta.kind == frame_msg_reliable) && (frame->base.offset == 0U)) { // Payload view: frame->base.payload - tx_send_ack(rx, ts, frame->meta.priority, self->port->topic_hash, frame->meta.transfer_id, self->remote); + tx_send_ack(rx, ts, frame->meta.priority, frame->meta.transfer_id, self->remote); } mem_free_payload(payload_deleter, frame->base.origin); } @@ -2252,15 +2163,15 @@ static void rx_session_update_unordered(rx_session_t* const self, &rx->errors_oom, &rx->errors_transfer_malformed); if (slot->state == rx_slot_done) { - if (frame->meta.flag_reliable) { // Payload view: ((udpard_fragment_t*)cavl2_min(slot->fragments))->view - tx_send_ack(rx, ts, slot->priority, self->port->topic_hash, slot->transfer_id, self->remote); + if (frame->meta.kind == frame_msg_reliable) { + tx_send_ack(rx, ts, slot->priority, slot->transfer_id, self->remote); } rx_session_eject(self, rx, slot); } - } else { // retransmit ACK if needed - if (frame->meta.flag_reliable && (frame->base.offset == 0U)) { // Payload view: frame->base.payload + } else { // retransmit ACK if needed + if ((frame->meta.kind == frame_msg_reliable) && (frame->base.offset == 0U)) { UDPARD_ASSERT(rx_session_is_transfer_ejected(self, frame->meta.transfer_id)); - tx_send_ack(rx, ts, frame->meta.priority, self->port->topic_hash, frame->meta.transfer_id, self->remote); + tx_send_ack(rx, ts, frame->meta.priority, frame->meta.transfer_id, self->remote); } mem_free_payload(payload_deleter, frame->base.origin); } @@ -2387,7 +2298,6 @@ void udpard_rx_poll(udpard_rx_t* const self, const udpard_us_t now) } bool udpard_rx_port_new(udpard_rx_port_t* const self, - const uint64_t topic_hash, const size_t extent, const udpard_rx_mode_t mode, const udpard_us_t reordering_window, @@ -2398,9 +2308,9 @@ bool udpard_rx_port_new(udpard_rx_port_t* const self, (vtable->on_message != NULL); if (ok) { mem_zero(sizeof(*self), self); - self->topic_hash = topic_hash; self->extent = extent; self->mode = mode; + self->is_p2p = false; self->memory = memory; self->index_session_by_remote_uid = NULL; self->vtable = vtable; @@ -2426,6 +2336,18 @@ bool udpard_rx_port_new(udpard_rx_port_t* const self, return ok; } +bool udpard_rx_port_new_p2p(udpard_rx_port_t* const self, + const size_t extent, + const udpard_rx_mem_resources_t memory, + const udpard_rx_port_vtable_t* const vtable) +{ + if (udpard_rx_port_new(self, extent, udpard_rx_unordered, 0, memory, vtable)) { + self->is_p2p = true; + return true; + } + return false; +} + void udpard_rx_port_free(udpard_rx_t* const rx, udpard_rx_port_t* const port) { if ((rx != NULL) && (port != NULL)) { @@ -2437,17 +2359,6 @@ void udpard_rx_port_free(udpard_rx_t* const rx, udpard_rx_port_t* const port) } } -static void rx_accept_ack(udpard_rx_t* const rx, const udpard_bytes_t message) -{ - if (message.size >= ACK_SIZE_BYTES) { - uint64_t topic_hash = 0; - uint64_t transfer_id = 0; - (void)deserialize_u64(((const byte_t*)message.data) + 0U, &topic_hash); - (void)deserialize_u64(((const byte_t*)message.data) + 8U, &transfer_id); - tx_receive_ack(rx, topic_hash, transfer_id); - } -} - bool udpard_rx_port_push(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_us_t timestamp, @@ -2460,34 +2371,28 @@ bool udpard_rx_port_push(udpard_rx_t* const rx, (datagram_payload.data != NULL) && (iface_index < UDPARD_IFACE_COUNT_MAX) && (payload_deleter.vtable != NULL) && (payload_deleter.vtable->free != NULL); if (ok) { + // Parse and validate the frame. rx_frame_t frame = { 0 }; uint32_t frame_index = 0; uint32_t offset_32 = 0; - const bool frame_valid = header_deserialize( + bool frame_valid = header_deserialize( datagram_payload, &frame.meta, &frame_index, &offset_32, &frame.base.crc, &frame.base.payload); + frame_valid = frame_valid && ((frame.meta.kind != frame_ack) || port->is_p2p); // ACKs only valid in P2P ports. + (void)frame_index; // currently not used by this reassembler implementation. frame.base.offset = (size_t)offset_32; - (void)frame_index; // currently not used by this reassembler implementation. frame.base.origin = datagram_payload; // Take ownership of the payload. + + // Process the parsed frame. if (frame_valid) { - if (frame.meta.topic_hash == port->topic_hash) { - if (!frame.meta.flag_acknowledgement) { - port->vtable_private->accept(rx, port, timestamp, source_ep, &frame, payload_deleter, iface_index); - } else { - UDPARD_ASSERT(frame.base.offset == 0); // checked by the frame parser - rx_accept_ack(rx, frame.base.payload); - mem_free_payload(payload_deleter, frame.base.origin); - } - } else { // Collisions are discovered early so that we don't attempt to allocate sessions for them. + if (frame.meta.kind != frame_ack) { + port->vtable_private->accept(rx, port, timestamp, source_ep, &frame, payload_deleter, iface_index); + } else { + tx_receive_ack(rx, frame.meta.sender_uid, frame.meta.transfer_id); mem_free_payload(payload_deleter, frame.base.origin); - udpard_remote_t remote = { .uid = frame.meta.sender_uid }; - remote.endpoints[iface_index] = source_ep; - if (port->vtable->on_collision != NULL) { - port->vtable->on_collision(rx, port, remote); - } } } else { mem_free_payload(payload_deleter, frame.base.origin); - ++rx->errors_frame_malformed; + rx->errors_frame_malformed++; } } return ok; diff --git a/libudpard/udpard.h b/libudpard/udpard.h index 4e2ce06..6ce6524 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -397,7 +397,8 @@ struct udpard_tx_t /// The globally unique identifier of the local node. Must not change after initialization. uint64_t local_uid; - /// A random-initialized transfer-ID counter for all outgoing P2P transfers. Must not be changed by the application. + /// A random-initialized counter for outgoing P2P transfers. Must not be changed by the application. + /// The shared counter for all P2P transfers ensures uniqueness of the transfer-ID per remote node. uint64_t p2p_transfer_id; /// The maximum number of Cyphal transfer payload bytes per UDP datagram. See UDPARD_MTU_*. @@ -433,6 +434,10 @@ struct udpard_tx_t /// READ-ONLY! size_t enqueued_frames_count; + /// Starts at zero and increments with every enqueued transfer. Do not modify! + /// This is used internally as a tiebreaker in non-unique indexes. + uint64_t next_seq_no; + udpard_tx_mem_resources_t memory; /// Error counters incremented automatically when the corresponding error condition occurs. @@ -444,11 +449,10 @@ struct udpard_tx_t /// Internal use only, do not modify! See tx_transfer_t for details. udpard_list_t queue[UDPARD_IFACE_COUNT_MAX][UDPARD_PRIORITY_COUNT]; ///< Next to transmit at the tail. - udpard_list_t agewise; ///< Oldest at the tail. - udpard_tree_t* index_staged; + udpard_tree_t* index_transfer_id; udpard_tree_t* index_deadline; - udpard_tree_t* index_transfer; - udpard_tree_t* index_transfer_ack; + udpard_tree_t* index_staged; + udpard_list_t agewise; ///< Oldest at the tail. /// Opaque pointer for the application use only. Not accessed by the library. void* user; @@ -471,7 +475,7 @@ struct udpard_tx_t /// True on success, false if any of the arguments are invalid. bool udpard_tx_new(udpard_tx_t* const self, const uint64_t local_uid, - const uint64_t p2p_transfer_id_initial, + const uint64_t p2p_transfer_id_seed, const size_t enqueued_frames_limit, const udpard_tx_mem_resources_t memory, const udpard_tx_vtable_t* const vtable); @@ -483,7 +487,10 @@ bool udpard_tx_new(udpard_tx_t* const self, /// The caller shall increment the transfer-ID counter after each successful invocation of this function per topic. /// There shall be a separate transfer-ID counter per topic. The initial value shall be chosen randomly /// such that it is likely to be distinct per application startup (embedded systems can use noinit memory sections, -/// hash uninitialized SRAM, use timers or ADC noise, etc). +/// hash uninitialized SRAM, use timers or ADC noise, etc); hashing with the topic hash is possible for extra entropy. +/// It is essential to provide a monotonic contiguous counter per topic to allow remotes to recover the original +/// publication order and detect lost messages. +/// The random starting point will ensure global uniqueness across topics. /// Related thread on random transfer-ID init: https://forum.opencyphal.org/t/improve-the-transfer-id-timeout/2375 /// /// The user context value is carried through to the callbacks. It must contain enough context to allow subject-ID @@ -497,9 +504,6 @@ bool udpard_tx_new(udpard_tx_t* const self, /// The subject-ID cannot be computed beforehand at the time of enqueuing because the topic->subject consensus protocol /// may find a different subject-ID allocation between the time of enqueuing and the time of (re)transmission. /// -/// An attempt to push a transfer with a (topic hash, transfer-ID) pair that is already enqueued will fail, -/// as that violates the transfer-ID uniqueness requirement stated above. -/// /// The feedback callback is set to NULL for best-effort (non-acknowledged) transfers. Otherwise, the transfer is /// treated as reliable, requesting a delivery acknowledgement from remote subscribers with repeated retransmissions if /// necessary; it is guaranteed that delivery attempts will cease no later than by the specified deadline. @@ -522,21 +526,19 @@ bool udpard_tx_push(udpard_tx_t* const self, const udpard_us_t deadline, const uint16_t iface_bitmap, const udpard_prio_t priority, - const uint64_t topic_hash, const uint64_t transfer_id, const udpard_bytes_scattered_t payload, void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), // NULL if best-effort. const udpard_user_context_t user); /// This is a specialization of the general push function for P2P transfers. -/// P2P transfers treat the topic hash as the destination node's UID. -/// The transfer-ID counter is shared for all P2P outgoing P2P transfers and is managed automatically. +/// The transfer-ID counter is managed automatically. /// If out_transfer_id is not NULL, the assigned internal transfer-ID is stored there for use with udpard_tx_cancel_p2p. bool udpard_tx_push_p2p(udpard_tx_t* const self, const udpard_us_t now, const udpard_us_t deadline, const udpard_prio_t priority, - const udpard_remote_t remote, // Endpoints may be invalid for some ifaces. + const udpard_remote_t remote, // Endpoints may be empty (zero) for some ifaces. const udpard_bytes_scattered_t payload, void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), // NULL if best-effort. const udpard_user_context_t user, @@ -550,20 +552,14 @@ bool udpard_tx_push_p2p(udpard_tx_t* const self, /// The function may deallocate memory. The time complexity is logarithmic in the number of enqueued transfers. void udpard_tx_poll(udpard_tx_t* const self, const udpard_us_t now, const uint16_t iface_bitmap); -/// Cancel a previously enqueued transfer. -/// To cancel a P2P transfer, pass the destination node's UID as the topic_hash. +/// Cancel a previously enqueued transfer of the specified transfer-ID and QoS. /// If provided, the feedback callback will be invoked with success==false. /// Not safe to call from the eject() callback. /// Returns true if a transfer was found and cancelled, false if no such transfer was found. /// The complexity is O(log t + f), where t is the number of enqueued transfers, /// and f is the number of frames in the transfer. /// The function will free the memory associated with the transfer. -bool udpard_tx_cancel(udpard_tx_t* const self, const uint64_t topic_hash, const uint64_t transfer_id); - -/// Like udpard_tx_cancel(), but cancels all transfers matching the given topic hash. -/// Returns the number of matched transfers. -/// This is important to invoke when destroying a topic to ensure no dangling callbacks remain. -size_t udpard_tx_cancel_all(udpard_tx_t* const self, const uint64_t topic_hash); +bool udpard_tx_cancel(udpard_tx_t* const self, const uint64_t transfer_id, const bool reliable); /// Returns a bitmap of interfaces that have pending transmissions. This is useful for IO multiplexing loops. /// Zero indicates that there are no pending transmissions. @@ -596,8 +592,7 @@ void udpard_tx_free(udpard_tx_t* const self); /// The application needs to listen to all these sockets simultaneously and pass the received UDP datagrams to the /// corresponding RX port instance as they arrive. /// -/// P2P transfers are handled in a similar way, except that the topic hash is replaced with the destination node's UID, -/// and the UDP/IP endpoints are unicast addresses instead of multicast addresses. +/// P2P transfers are handled in a similar way, except that the UDP/IP endpoints are unicast instead of multicast. /// /// Graphically, the subscription pipeline is arranged per port as shown below. /// Remember that the application with N RX ports would have N such pipelines, one per port. @@ -720,21 +715,11 @@ typedef struct udpard_rx_port_vtable_t { /// A new message is received on a port. The handler takes ownership of the payload; it must free it after use. void (*on_message)(udpard_rx_t*, udpard_rx_port_t*, udpard_rx_transfer_t); - - /// A topic hash collision is detected on a port. - /// On P2P ports, this indicates that the destination UID doesn't match the local UID (misaddressed message); - /// safe to ignore. - /// May be NULL if the application is not interested. - void (*on_collision)(udpard_rx_t*, udpard_rx_port_t*, udpard_remote_t); } udpard_rx_port_vtable_t; /// This type represents an open input port, such as a subscription to a topic. struct udpard_rx_port_t { - /// Mismatch will be filtered out and the collision notification callback invoked. - /// For P2P ports, this is the destination node's UID (i.e., the local node's UID). - uint64_t topic_hash; - /// Transfer payloads exceeding this extent may be truncated. /// The total size of the received payload may still exceed this extent setting by some small margin. size_t extent; @@ -743,6 +728,10 @@ struct udpard_rx_port_t udpard_rx_mode_t mode; udpard_us_t reordering_window; + /// True if this port is used for P2P transfers, false for subject subscriptions. + /// There shall be exactly one P2P port per RX instance. + bool is_p2p; + udpard_rx_mem_resources_t memory; /// Libudpard creates a new session instance per remote UID that emits transfers matching this port. @@ -839,18 +828,12 @@ void udpard_rx_poll(udpard_rx_t* const self, const udpard_us_t now); /// 3. Read data from the sockets continuously and forward each datagram to udpard_rx_port_push(), /// along with the index of the redundant interface the datagram was received on. /// -/// For P2P ports, the procedure is identical, except that the topic hash is set to the local node's UID. -/// There must be exactly one P2P port per node. The P2P port is also used for acks. -/// /// The extent defines the maximum possible size of received objects, considering also possible future data type /// versions with new fields. It is safe to pick larger values. Note well that the extent is not the same thing as /// the maximum size of the object, it is usually larger! Transfers that carry payloads beyond the specified extent /// still keep fragments that start before the extent, so the delivered payload may exceed it; fragments starting past /// the limit are dropped. /// -/// The topic hash is needed to detect and ignore transfers that use different topics on the same subject-ID. -/// The collision callback is invoked if a topic hash collision is detected. -/// /// If not sure which reassembly mode to choose, consider `udpard_rx_unordered` as the default choice. /// For ordering-sensitive use cases, such as state estimators and control loops, use `udpard_rx_ordered` with a short /// window. @@ -860,13 +843,18 @@ void udpard_rx_poll(udpard_rx_t* const self, const udpard_us_t now); /// The return value is true on success, false if any of the arguments are invalid. /// The time complexity is constant. This function does not invoke the dynamic memory manager. bool udpard_rx_port_new(udpard_rx_port_t* const self, - const uint64_t topic_hash, // For P2P ports, this is the local node's UID. const size_t extent, const udpard_rx_mode_t mode, const udpard_us_t reordering_window, const udpard_rx_mem_resources_t memory, const udpard_rx_port_vtable_t* const vtable); +/// The P2P counterpart. There must be exactly one P2P port per node. +bool udpard_rx_port_new_p2p(udpard_rx_port_t* const self, + const size_t extent, + const udpard_rx_mem_resources_t memory, + const udpard_rx_port_vtable_t* const vtable); + /// Returns all memory allocated for the sessions, slots, fragments, etc of the given port. /// Does not free the port itself since it is allocated by the application rather than the library, /// and does not alter the RX instance aside from unlinking the port from it. From 323be5471f4ae9add2d015a4b4eb014f22d3be26 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Mon, 2 Feb 2026 21:55:33 +0200 Subject: [PATCH 02/13] remove ORDERED mode --- libudpard/udpard.c | 376 ++++++++------------------------------------- libudpard/udpard.h | 104 ++----------- 2 files changed, 82 insertions(+), 398 deletions(-) diff --git a/libudpard/udpard.c b/libudpard/udpard.c index b7f500a..54bf0bc 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -59,14 +59,6 @@ typedef unsigned char byte_t; ///< For compatibility with platforms where byte s /// The list works equally well given a non-contiguous transfer-ID stream, unlike the bitmap, thus more robust. #define RX_TRANSFER_HISTORY_COUNT 32U -/// In the ORDERED reassembly mode, with the most recently received transfer-ID N, the library will reject -/// transfers with transfer-ID less than or equal to N-ORDERING_WINDOW (modulo 2^64) as late. -/// This limit is chosen rather arbitrarily; its value does not affect the resource utilization in any way. -/// One trade-off to keep in mind is that a very large window may somewhat increase the likelihood of choosing a new -/// random transfer-ID that falls within the window, thus being rejected as late by receivers; however, given the -/// 64-bit ID space, this value will have to be extremely large to have any measurable effect on that probability. -#define RX_TRANSFER_ORDERING_WINDOW 8192U - #define UDP_PORT 9382U #define IPv4_MCAST_PREFIX 0xEF000000UL static_assert((UDPARD_IPv4_SUBJECT_ID_MAX & (UDPARD_IPv4_SUBJECT_ID_MAX + 1)) == 0, @@ -1421,9 +1413,7 @@ void udpard_tx_free(udpard_tx_t* const self) // // Each session keeps track of recently received/seen transfers, which is used for ack retransmission // if the remote end attempts to retransmit a transfer that was already fully received, and is also used for duplicate -// rejection. In the ORDERED mode, late transfers (those arriving out of order past the reordering window closure) -// are never acked, but they may still be received and acked by some other nodes in the network that were able to -// accept them. +// rejection. // // Acks are transmitted immediately upon successful reception of a transfer. If the remote end retransmits the transfer // (e.g., if the first ack was lost or due to a spurious duplication), repeat acks are only retransmitted @@ -1662,18 +1652,11 @@ static bool rx_fragment_tree_finalize(udpard_tree_t* const root, const uint32_t // --------------------------------------------- SLOT --------------------------------------------- -typedef enum -{ - rx_slot_idle = 0, - rx_slot_busy = 1, - rx_slot_done = 2, -} rx_slot_state_t; - /// Frames from all redundant interfaces are pooled into the same reassembly slot per transfer-ID. /// The redundant interfaces may use distinct MTU, which requires special fragment tree handling. typedef struct { - rx_slot_state_t state; + bool busy; uint64_t transfer_id; ///< Which transfer we're reassembling here. @@ -1695,14 +1678,14 @@ static void rx_slot_reset(rx_slot_t* const slot, const udpard_mem_t fragment_mem { udpard_fragment_free_all((udpard_fragment_t*)slot->fragments, udpard_make_deleter(fragment_memory)); slot->fragments = NULL; - slot->state = rx_slot_idle; + slot->busy = false; slot->covered_prefix = 0U; slot->crc_end = 0U; slot->crc = CRC_INITIAL; } -/// The caller will accept the ownership of the fragments iff the resulting state is done. -static void rx_slot_update(rx_slot_t* const slot, +/// The caller will accept the ownership of the fragments iff the result is true. +static bool rx_slot_update(rx_slot_t* const slot, const udpard_us_t ts, const udpard_mem_t fragment_memory, const udpard_deleter_t payload_deleter, @@ -1711,9 +1694,10 @@ static void rx_slot_update(rx_slot_t* const slot, uint64_t* const errors_oom, uint64_t* const errors_transfer_malformed) { - if (slot->state != rx_slot_busy) { + bool done = false; + if (!slot->busy) { rx_slot_reset(slot, fragment_memory); - slot->state = rx_slot_busy; + slot->busy = true; slot->transfer_id = frame->meta.transfer_id; slot->ts_min = ts; slot->ts_max = ts; @@ -1726,7 +1710,7 @@ static void rx_slot_update(rx_slot_t* const slot, ++*errors_transfer_malformed; mem_free_payload(payload_deleter, frame->base.origin); rx_slot_reset(slot, fragment_memory); - return; + return false; } const rx_fragment_tree_update_result_t tree_res = rx_fragment_tree_update(&slot->fragments, fragment_memory, @@ -1749,19 +1733,18 @@ static void rx_slot_update(rx_slot_t* const slot, } if (tree_res == rx_fragment_tree_done) { if (rx_fragment_tree_finalize(slot->fragments, slot->crc)) { - slot->state = rx_slot_done; // The caller will handle the completed transfer. + slot->busy = false; + done = true; } else { ++*errors_transfer_malformed; rx_slot_reset(slot, fragment_memory); } } + return done; } // --------------------------------------------- SESSION & PORT --------------------------------------------- -/// The number of times `from` must be incremented (modulo 2^64) to reach `to`. -static uint64_t rx_transfer_id_forward_distance(const uint64_t from, const uint64_t to) { return to - from; } - /// Keep in mind that we have a dedicated session object per remote node per port; this means that the states /// kept here are specific per remote node, as it should be. typedef struct rx_session_t @@ -1771,10 +1754,6 @@ typedef struct rx_session_t udpard_rx_port_t* port; - /// Sessions interned for the reordering window closure. - udpard_tree_t index_reordering_window; - udpard_us_t reordering_window_deadline; - /// LRU last animated list for automatic retirement of stale sessions. udpard_listed_t list_by_animation; udpard_us_t last_animated_ts; @@ -1803,8 +1782,6 @@ typedef struct udpard_rx_port_vtable_private_t rx_frame_t*, udpard_deleter_t, uint_fast8_t); - /// Takes ownership of the frame payload. - void (*update_session)(rx_session_t*, udpard_rx_t*, udpard_us_t, rx_frame_t*, udpard_deleter_t); } udpard_rx_port_vtable_private_t; /// True iff the given transfer-ID was recently ejected. @@ -1818,29 +1795,6 @@ static bool rx_session_is_transfer_ejected(const rx_session_t* const self, const return false; } -/// True iff the given transfer-ID is shortly before one of the recently ejected ones or equals one. -/// In the ORDERED mode, this indicates that the transfer is late and can no longer be ejected. -static bool rx_session_is_transfer_late_or_ejected(const rx_session_t* const self, const uint64_t transfer_id) -{ - for (size_t i = 0; i < RX_TRANSFER_HISTORY_COUNT; i++) { - if (rx_transfer_id_forward_distance(transfer_id, self->history[i]) < RX_TRANSFER_ORDERING_WINDOW) { - return true; - } - } - return false; -} - -/// True iff the transfer is already received but is not yet ejected to maintain ordering. Only useful for ORDERED mode. -static bool rx_session_is_transfer_interned(const rx_session_t* const self, const uint64_t transfer_id) -{ - for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - if ((self->slots[i].state == rx_slot_done) && (self->slots[i].transfer_id == transfer_id)) { - return true; - } - } - return false; -} - static int32_t cavl_compare_rx_session_by_remote_uid(const void* const user, const udpard_tree_t* const node) { const uint64_t uid_a = *(const uint64_t*)user; @@ -1850,17 +1804,6 @@ static int32_t cavl_compare_rx_session_by_remote_uid(const void* const user, con return 0; // clang-format on } -static int32_t cavl_compare_rx_session_by_reordering_deadline(const void* const user, const udpard_tree_t* const node) -{ - const rx_session_t* const outer = (const rx_session_t*)user; - const rx_session_t* const inner = CAVL2_TO_OWNER(node, rx_session_t, index_reordering_window); // clang-format off - if (outer->reordering_window_deadline < inner->reordering_window_deadline) { return -1; } - if (outer->reordering_window_deadline > inner->reordering_window_deadline) { return +1; } - if (outer->remote.uid < inner->remote.uid) { return -1; } - if (outer->remote.uid > inner->remote.uid) { return +1; } - return 0; // clang-format on -} - typedef struct { udpard_rx_port_t* owner; @@ -1875,10 +1818,8 @@ static udpard_tree_t* cavl_factory_rx_session_by_remote_uid(void* const user) rx_session_t* const out = mem_alloc(args->owner->memory.session, sizeof(rx_session_t)); if (out != NULL) { mem_zero(sizeof(*out), out); - out->index_remote_uid = (udpard_tree_t){ NULL, { NULL, NULL }, 0 }; - out->index_reordering_window = (udpard_tree_t){ NULL, { NULL, NULL }, 0 }; - out->reordering_window_deadline = BIG_BANG; - out->list_by_animation = (udpard_listed_t){ NULL, NULL }; + out->index_remote_uid = (udpard_tree_t){ NULL, { NULL, NULL }, 0 }; + out->list_by_animation = (udpard_listed_t){ NULL, NULL }; for (size_t i = 0; i < RX_SLOT_COUNT; i++) { out->slots[i].fragments = NULL; rx_slot_reset(&out->slots[i], args->owner->memory.fragment); @@ -1894,24 +1835,19 @@ static udpard_tree_t* cavl_factory_rx_session_by_remote_uid(void* const user) } /// Removes the instance from all indexes and frees all associated memory. -static void rx_session_free(rx_session_t* const self, - udpard_list_t* const sessions_by_animation, - udpard_tree_t** const sessions_by_reordering) +static void rx_session_free(rx_session_t* const self, udpard_list_t* const sessions_by_animation) { for (size_t i = 0; i < RX_SLOT_COUNT; i++) { rx_slot_reset(&self->slots[i], self->port->memory.fragment); } cavl2_remove(&self->port->index_session_by_remote_uid, &self->index_remote_uid); - (void)cavl2_remove_if(sessions_by_reordering, &self->index_reordering_window); delist(sessions_by_animation, &self->list_by_animation); mem_free(self->port->memory.session, sizeof(rx_session_t), self); } -/// The payload ownership is transferred to the application. The history log and the window will be updated. +/// The payload ownership is transferred to the application. static void rx_session_eject(rx_session_t* const self, udpard_rx_t* const rx, rx_slot_t* const slot) { - UDPARD_ASSERT(slot->state == rx_slot_done); - // Update the history -- overwrite the oldest entry. self->history_current = (self->history_current + 1U) % RX_TRANSFER_HISTORY_COUNT; self->history[self->history_current] = slot->transfer_id; @@ -1933,130 +1869,42 @@ static void rx_session_eject(rx_session_t* const self, udpard_rx_t* const rx, rx rx_slot_reset(slot, self->port->memory.fragment); } -/// In the ORDERED mode, checks which slots can be ejected or interned in the reordering window. -/// This is only useful for the ORDERED mode. This mode is much more complex and CPU-heavy than the UNORDERED mode. -/// Should be invoked whenever a slot MAY or MUST be ejected (i.e., on completion or when an empty slot is required). -/// If the force flag is set, at least one DONE slot will be ejected even if its reordering window is still open; -/// this is used to forcibly free up at least one slot when no slot is idle and a new transfer arrives. -static void rx_session_ordered_scan_slots(rx_session_t* const self, - udpard_rx_t* const rx, - const udpard_us_t ts, - const bool force_one) -{ - // Reset the reordering window timer because we will either eject everything or arm it again later. - if (cavl2_remove_if(&rx->index_session_by_reordering, &self->index_reordering_window)) { - self->reordering_window_deadline = BIG_BANG; - } - // We need to repeat the scan because each ejection may open up the window for the next in-sequence transfer. - for (size_t iter = 0; iter < RX_SLOT_COUNT; iter++) { - // Find the slot closest to the next in-sequence transfer-ID. - const uint64_t tid_expected = self->history[self->history_current] + 1U; - uint64_t min_tid_dist = UINT64_MAX; - rx_slot_t* slot = NULL; - for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - const uint64_t dist = rx_transfer_id_forward_distance(tid_expected, self->slots[i].transfer_id); - if ((self->slots[i].state == rx_slot_done) && (dist < min_tid_dist)) { - min_tid_dist = dist; - slot = &self->slots[i]; - if (dist == 0) { - break; // Fast path for a common case. - } - } - } - // The slot needs to be ejected if it's in-sequence, if it's reordering window is closed, or if we're - // asked to force an ejection and we haven't done so yet. - // The reordering window timeout implies that earlier transfers will be dropped if ORDERED mode is used. - const bool eject = - (slot != NULL) && ((slot->transfer_id == tid_expected) || - (ts >= (slot->ts_min + self->port->reordering_window)) || (force_one && (iter == 0))); - if (!eject) { - // The slot is done but cannot be ejected yet; arm the reordering window timer. - // There may be transfers with future (more distant) transfer-IDs with an earlier reordering window - // closure deadline, but we ignore them because the nearest transfer overrides the more distant ones. - if (slot != NULL) { - self->reordering_window_deadline = slot->ts_min + self->port->reordering_window; - const udpard_tree_t* res = cavl2_find_or_insert(&rx->index_session_by_reordering, //---------------- - self, - &cavl_compare_rx_session_by_reordering_deadline, - &self->index_reordering_window, - &cavl2_trivial_factory); - UDPARD_ASSERT(res == &self->index_reordering_window); - (void)res; - } - break; // No more slots can be ejected at this time. - } - // We always pick the next transfer to eject with the nearest transfer-ID, which guarantees that the other - // DONE transfers will not end up being late. - // Some of the in-progress slots may be obsoleted by this move, which will be taken care of later. - UDPARD_ASSERT((slot != NULL) && (slot->state == rx_slot_done)); - rx_session_eject(self, rx, slot); - } - // Ensure that in-progress slots, if any, have not ended up within the accepted window after the update. - // We can release them early to avoid holding the payload buffers that won't be used anyway. - for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - rx_slot_t* const slot = &self->slots[i]; - if ((slot->state == rx_slot_busy) && rx_session_is_transfer_late_or_ejected(self, slot->transfer_id)) { - rx_slot_reset(slot, self->port->memory.fragment); - } - } -} - /// Finds an existing in-progress slot with the specified transfer-ID, or allocates a new one. -/// Allocation always succeeds so the result is never NULL, but it may cause early ejection of an interned DONE slot. -/// THIS IS POTENTIALLY DESTRUCTIVE IN THE ORDERED MODE because it may force an early reordering window closure. -static rx_slot_t* rx_session_get_slot(rx_session_t* const self, - udpard_rx_t* const rx, - const udpard_us_t ts, - const uint64_t transfer_id) +/// Allocation always succeeds so the result is never NULL, but it may cancel a stale slot with incomplete transfer. +static rx_slot_t* rx_session_get_slot(rx_session_t* const self, const udpard_us_t ts, const uint64_t transfer_id) { // First, check if one is in progress already; resume it if so. for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - if ((self->slots[i].state == rx_slot_busy) && (self->slots[i].transfer_id == transfer_id)) { + if (self->slots[i].busy && (self->slots[i].transfer_id == transfer_id)) { return &self->slots[i]; } } // Use this opportunity to check for timed-out in-progress slots. This may free up a slot for the search below. for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - if ((self->slots[i].state == rx_slot_busy) && (ts >= (self->slots[i].ts_max + SESSION_LIFETIME))) { + if (self->slots[i].busy && (ts >= (self->slots[i].ts_max + SESSION_LIFETIME))) { rx_slot_reset(&self->slots[i], self->port->memory.fragment); } } // This appears to be a new transfer, so we will need to allocate a new slot for it. for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - if (self->slots[i].state == rx_slot_idle) { + if (!self->slots[i].busy) { return &self->slots[i]; } } - // All slots are currently occupied; find the oldest slot to sacrifice, which may be busy or done. + // All slots are currently occupied; find the oldest slot to sacrifice. rx_slot_t* slot = NULL; udpard_us_t oldest_ts = HEAT_DEATH; for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - UDPARD_ASSERT(self->slots[i].state != rx_slot_idle); // Checked this already. + UDPARD_ASSERT(self->slots[i].busy); // Checked this already. if (self->slots[i].ts_max < oldest_ts) { oldest_ts = self->slots[i].ts_max; slot = &self->slots[i]; } } - UDPARD_ASSERT((slot != NULL) && ((slot->state == rx_slot_busy) || (slot->state == rx_slot_done))); - // If it's busy, it is probably just a stale transfer, so it's a no-brainer to evict it. - // If it's done, we have to force the reordering window to close early to free up a slot without transfer loss. - if (slot->state == rx_slot_busy) { - rx_slot_reset(slot, self->port->memory.fragment); // Just a stale transfer, it's probably dead anyway. - } else { - UDPARD_ASSERT(slot->state == rx_slot_done); - // The oldest slot is DONE; we cannot just reset it, we must force an early ejection. - // The slot to eject will be chosen based on the transfer-ID, which may not be the oldest slot. - // Then we repeat the search looking for any IDLE slot, which must succeed now. - rx_session_ordered_scan_slots(self, rx, ts, true); // A slot will be ejected (we don't know which one). - slot = NULL; - for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - if (self->slots[i].state == rx_slot_idle) { - slot = &self->slots[i]; - break; - } - } - } - UDPARD_ASSERT((slot != NULL) && (slot->state == rx_slot_idle)); + UDPARD_ASSERT((slot != NULL) && slot->busy); + // It is probably just a stale transfer, so it's a no-brainer to evict it, it's probably dead anyway. + rx_slot_reset(slot, self->port->memory.fragment); + UDPARD_ASSERT((slot != NULL) && !slot->busy); return slot; } @@ -2088,81 +1936,21 @@ static void rx_session_update(rx_session_t* const self, self->history[i] = frame->meta.transfer_id - 1U; } } - self->port->vtable_private->update_session(self, rx, ts, frame, payload_deleter); -} - -/// The ORDERED mode implementation. May delay incoming transfers to maintain strict transfer-ID ordering. -/// The ORDERED mode is much more complex and CPU-heavy. -static void rx_session_update_ordered(rx_session_t* const self, - udpard_rx_t* const rx, - const udpard_us_t ts, - rx_frame_t* const frame, - const udpard_deleter_t payload_deleter) -{ - // The queries here may be a bit time-consuming. If this becomes a problem, there are many ways to optimize this. - const bool is_ejected = rx_session_is_transfer_ejected(self, frame->meta.transfer_id); - const bool is_late_or_ejected = rx_session_is_transfer_late_or_ejected(self, frame->meta.transfer_id); - const bool is_interned = rx_session_is_transfer_interned(self, frame->meta.transfer_id); - const bool is_new = !is_late_or_ejected && !is_interned; - if (is_new) { - rx_slot_t* const slot = rx_session_get_slot(self, rx, ts, frame->meta.transfer_id); - UDPARD_ASSERT((slot != NULL) && (slot->state != rx_slot_done)); - UDPARD_ASSERT((slot->state == rx_slot_idle) || - ((slot->state == rx_slot_busy) && (slot->transfer_id == frame->meta.transfer_id))); - rx_slot_update(slot, - ts, - self->port->memory.fragment, - payload_deleter, - frame, - self->port->extent, - &rx->errors_oom, - &rx->errors_transfer_malformed); - if (slot->state == rx_slot_done) { - UDPARD_ASSERT(rx_session_is_transfer_interned(self, slot->transfer_id)); - if (frame->meta.kind == frame_msg_reliable) { - // Payload view: ((udpard_fragment_t*)cavl2_min(slot->fragments))->view - tx_send_ack(rx, ts, slot->priority, slot->transfer_id, self->remote); - } - rx_session_ordered_scan_slots(self, rx, ts, false); - } - } else { // retransmit ACK if needed - // Note: transfers that are no longer retained in the history will not solicit an ACK response, - // meaning that the sender will not get a confirmation if the retransmitted transfer is too old. - // We assume that RX_TRANSFER_HISTORY_COUNT is enough to cover all sensible use cases. - if ((is_interned || is_ejected) && (frame->meta.kind == frame_msg_reliable) && (frame->base.offset == 0U)) { - // Payload view: frame->base.payload - tx_send_ack(rx, ts, frame->meta.priority, frame->meta.transfer_id, self->remote); - } - mem_free_payload(payload_deleter, frame->base.origin); - } -} -/// The UNORDERED mode implementation. Ejects every transfer immediately upon completion without delay. -/// The reordering timer is not used. -static void rx_session_update_unordered(rx_session_t* const self, - udpard_rx_t* const rx, - const udpard_us_t ts, - rx_frame_t* const frame, - const udpard_deleter_t payload_deleter) -{ - UDPARD_ASSERT(self->port->mode == udpard_rx_unordered); - UDPARD_ASSERT(self->port->reordering_window == 0); - // We do not check interned transfers because in the UNORDERED mode they are never interned, always ejected ASAP. - // We don't care about the ordering, either; we just accept anything that looks new. + // UNORDERED mode update. There are no other modes now -- there used to be ORDERED in an experimental revision once. if (!rx_session_is_transfer_ejected(self, frame->meta.transfer_id)) { - rx_slot_t* const slot = rx_session_get_slot(self, rx, ts, frame->meta.transfer_id); // new or continuation - UDPARD_ASSERT((slot != NULL) && (slot->state != rx_slot_done)); - UDPARD_ASSERT((slot->state == rx_slot_idle) || - ((slot->state == rx_slot_busy) && (slot->transfer_id == frame->meta.transfer_id))); - rx_slot_update(slot, - ts, - self->port->memory.fragment, - payload_deleter, - frame, - self->port->extent, - &rx->errors_oom, - &rx->errors_transfer_malformed); - if (slot->state == rx_slot_done) { + rx_slot_t* const slot = rx_session_get_slot(self, ts, frame->meta.transfer_id); // new or continuation + UDPARD_ASSERT(slot != NULL); + UDPARD_ASSERT((!slot->busy) || (slot->transfer_id == frame->meta.transfer_id)); + const bool done = rx_slot_update(slot, + ts, + self->port->memory.fragment, + payload_deleter, + frame, + self->port->extent, + &rx->errors_oom, + &rx->errors_transfer_malformed); + if (done) { if (frame->meta.kind == frame_msg_reliable) { tx_send_ack(rx, ts, slot->priority, slot->transfer_id, self->remote); } @@ -2245,12 +2033,8 @@ static void rx_port_accept_stateless(udpard_rx_t* const rx, } } -static const udpard_rx_port_vtable_private_t rx_port_vtb_ordered = { .accept = rx_port_accept_stateful, - .update_session = rx_session_update_ordered }; -static const udpard_rx_port_vtable_private_t rx_port_vtb_unordered = { .accept = rx_port_accept_stateful, - .update_session = rx_session_update_unordered }; -static const udpard_rx_port_vtable_private_t rx_port_vtb_stateless = { .accept = rx_port_accept_stateless, - .update_session = NULL }; +static const udpard_rx_port_vtable_private_t rx_port_vtb_unordered = { .accept = rx_port_accept_stateful }; +static const udpard_rx_port_vtable_private_t rx_port_vtb_stateless = { .accept = rx_port_accept_stateless }; // --------------------------------------------- RX PUBLIC API --------------------------------------------- @@ -2266,82 +2050,60 @@ void udpard_rx_new(udpard_rx_t* const self, udpard_tx_t* const tx) { UDPARD_ASSERT(self != NULL); mem_zero(sizeof(*self), self); - self->list_session_by_animation = (udpard_list_t){ NULL, NULL }; - self->index_session_by_reordering = NULL; - self->errors_oom = 0; - self->errors_frame_malformed = 0; - self->errors_transfer_malformed = 0; - self->tx = tx; - self->user = NULL; + self->list_session_by_animation = (udpard_list_t){ NULL, NULL }; + self->errors_oom = 0; + self->errors_frame_malformed = 0; + self->errors_transfer_malformed = 0; + self->tx = tx; + self->user = NULL; } void udpard_rx_poll(udpard_rx_t* const self, const udpard_us_t now) { - // Retire timed out sessions. We retire at most one per poll to avoid burstiness -- session retirement - // may potentially free up a lot of memory at once. - { - rx_session_t* const ses = LIST_TAIL(self->list_session_by_animation, rx_session_t, list_by_animation); - if ((ses != NULL) && (now >= (ses->last_animated_ts + SESSION_LIFETIME))) { - rx_session_free(ses, &self->list_session_by_animation, &self->index_session_by_reordering); - } - } - // Process reordering window timeouts. - // We may process more than one to minimize transfer delays; this is also expected to be quick. - while (true) { - rx_session_t* const ses = - CAVL2_TO_OWNER(cavl2_min(self->index_session_by_reordering), rx_session_t, index_reordering_window); - if ((ses == NULL) || (now < ses->reordering_window_deadline)) { - break; - } - rx_session_ordered_scan_slots(ses, self, now, false); + // Retire at most one per poll to avoid burstiness. + rx_session_t* const ses = LIST_TAIL(self->list_session_by_animation, rx_session_t, list_by_animation); + if ((ses != NULL) && (now >= (ses->last_animated_ts + SESSION_LIFETIME))) { + rx_session_free(ses, &self->list_session_by_animation); } } bool udpard_rx_port_new(udpard_rx_port_t* const self, const size_t extent, - const udpard_rx_mode_t mode, - const udpard_us_t reordering_window, const udpard_rx_mem_resources_t memory, const udpard_rx_port_vtable_t* const vtable) { - bool ok = (self != NULL) && rx_validate_mem_resources(memory) && (reordering_window >= 0) && (vtable != NULL) && - (vtable->on_message != NULL); + bool ok = (self != NULL) && rx_validate_mem_resources(memory) && (vtable != NULL) && (vtable->on_message != NULL); if (ok) { mem_zero(sizeof(*self), self); self->extent = extent; - self->mode = mode; self->is_p2p = false; self->memory = memory; self->index_session_by_remote_uid = NULL; self->vtable = vtable; self->user = NULL; - switch (mode) { - case udpard_rx_stateless: - self->vtable_private = &rx_port_vtb_stateless; - self->reordering_window = 0; - break; - case udpard_rx_unordered: - self->vtable_private = &rx_port_vtb_unordered; - self->reordering_window = 0; - break; - case udpard_rx_ordered: - self->vtable_private = &rx_port_vtb_ordered; - self->reordering_window = reordering_window; - UDPARD_ASSERT(self->reordering_window >= 0); - break; - default: - ok = false; - } + self->vtable_private = &rx_port_vtb_unordered; } return ok; } +bool udpard_rx_port_new_stateless(udpard_rx_port_t* const self, + const size_t extent, + const udpard_rx_mem_resources_t memory, + const udpard_rx_port_vtable_t* const vtable) +{ + if (udpard_rx_port_new(self, extent, memory, vtable)) { + self->vtable_private = &rx_port_vtb_stateless; + return true; + } + return false; +} + bool udpard_rx_port_new_p2p(udpard_rx_port_t* const self, const size_t extent, const udpard_rx_mem_resources_t memory, const udpard_rx_port_vtable_t* const vtable) { - if (udpard_rx_port_new(self, extent, udpard_rx_unordered, 0, memory, vtable)) { + if (udpard_rx_port_new(self, extent, memory, vtable)) { self->is_p2p = true; return true; } @@ -2352,9 +2114,7 @@ void udpard_rx_port_free(udpard_rx_t* const rx, udpard_rx_port_t* const port) { if ((rx != NULL) && (port != NULL)) { while (port->index_session_by_remote_uid != NULL) { - rx_session_free((rx_session_t*)(void*)port->index_session_by_remote_uid, - &rx->list_session_by_animation, - &rx->index_session_by_reordering); + rx_session_free((rx_session_t*)(void*)port->index_session_by_remote_uid, &rx->list_session_by_animation); } } } diff --git a/libudpard/udpard.h b/libudpard/udpard.h index 6ce6524..ef58ade 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -315,10 +315,6 @@ size_t udpard_fragment_gather(const udpard_fragment_t** cursor, /// /// Reliable messages published over high-fanout topics will generate a large amount of feedback acknowledgments, /// which must be kept in mind when designing the network. -/// -/// Subscribers operating in the ORDERED mode do not acknowledge messages that have been designated as lost -/// (arriving too late, after the reordering window has passed). No negative acknowledgments are sent either -/// because there may be other subscribers on the same topic who might still be able to receive the message. typedef struct udpard_tx_t udpard_tx_t; typedef struct udpard_tx_mem_resources_t @@ -488,8 +484,7 @@ bool udpard_tx_new(udpard_tx_t* const self, /// There shall be a separate transfer-ID counter per topic. The initial value shall be chosen randomly /// such that it is likely to be distinct per application startup (embedded systems can use noinit memory sections, /// hash uninitialized SRAM, use timers or ADC noise, etc); hashing with the topic hash is possible for extra entropy. -/// It is essential to provide a monotonic contiguous counter per topic to allow remotes to recover the original -/// publication order and detect lost messages. +/// It is essential to provide a monotonic contiguous counter per topic. /// The random starting point will ensure global uniqueness across topics. /// Related thread on random transfer-ID init: https://forum.opencyphal.org/t/improve-the-transfer-id-timeout/2375 /// @@ -515,8 +510,7 @@ bool udpard_tx_new(udpard_tx_t* const self, /// Beware that reliable delivery may cause message reordering. For example, when sending messages A and B, /// and A is lost on the first attempt, the next attempt may be scheduled after B is published, /// so that the remote sees B followed by A. Most applications tolerate it without issues; if this is not the case, -/// the subscriber should use the ORDERED subscription mode (refer to the RX pipeline for details), -/// which will reconstruct the original message ordering. +/// the subscriber should reconstruct the original message ordering. /// /// On success, the function allocates a single transfer state instance and a number of payload fragments. /// The time complexity is O(p + log e), where p is the transfer payload size, and e is the number of @@ -602,71 +596,11 @@ void udpard_tx_free(udpard_tx_t* const self); /// REDUNDANT INTERFACE B ---> UDP SOCKET ---+---> udpard_rx_port_t ---> TRANSFERS /// | /// ... ---+ -/// -/// The transfer reassembly state machine can operate in several modes described below. First, a brief summary: -/// -/// Mode Guarantees Limitations Reordering window -/// -----------------------------------------−------------------------------------------------------------------ -/// ORDERED Strictly increasing transfer-ID May delay transfers, CPU heavier Non-negative microseconds -/// UNORDERED Unique transfer-ID Ordering not guaranteed Ignored -/// STATELESS Constant time, constant memory 1-frame only, dups, no responses Ignored -/// -/// If not sure, choose unordered. The ordered mode is a good fit for ordering-sensitive use cases like state -/// estimators and control loops, but it is not suitable for P2P. -/// The stateless mode is chiefly intended for the heartbeat topic. -/// -/// ORDERED -/// -/// Each transfer is received at most once. The sequence of transfers delivered (ejected) -/// to the application is STRICTLY INCREASING (with possible gaps in case of loss). -/// -/// The reassembler may hold completed transfers for a brief time if they arrive out-of-order, -/// hoping for the earlier missing transfers to show up, such that they are not permanently lost. -/// For example, a sequence 1 2 4 3 5 will be delivered as 1 2 3 4 5 if 3 arrives shortly after 4; -/// however, if 3 does not arrive within the configured reordering window, -/// the application will receive 1 2 4 5, and transfer 3 will be permanently lost even if it arrives later -/// because accepting it without violating the strictly increasing transfer-ID constraint is not possible. -/// -/// This mode requires much more bookkeeping which results in a greater processing load per received fragment/transfer. -/// -/// Zero is not really a special case for the reordering window; it simply means that out-of-order transfers -/// are not waited for at all (declared permanently lost immediately), and no received transfer is delayed -/// before ejection to the application. -/// -/// The ORDERED mode is mostly intended for applications like state estimators, control systems, and data streaming -/// where ordering is critical. -/// -/// UNORDERED -/// -/// Each transfer is ejected immediately upon successful reassembly. Ordering is not enforced, -/// but duplicates are still removed. For example, a sequence 1 2 4 3 5 will be delivered as-is without delay. -/// -/// This mode does not reject nor delay transfers arriving late, making it the desired choice for applications -/// where all transfers need to be received no matter the order. This is in particular useful for request-response -/// topics, where late arrivals occur not only due to network conditions but also due to the inherent -/// asynchrony between requests and responses. For example, node A could publish messages X and Y on subject S, -/// while node B could respond to X only after receiving Y, thus causing the response to X to arrive late with -/// respect to Y. This would cause the ORDERED mode to delay or drop the response to X, which is undesirable; -/// therefore, the UNORDERED mode is preferred for request-response topics. -/// -/// The unordered mode should be the default mode for most use cases. -/// -/// STATELESS -/// -/// Only single-frame transfers are accepted (where the entire payload fits into a single datagram, -/// or the extent does not exceed the MTU). No attempt to enforce ordering or remove duplicates is made. -/// The return path is only discovered for the one interface that delivered the transfer. -/// Transfers arriving from N interfaces are duplicated N times. -/// -/// The stateless mode allocates only a fragment header per accepted frame and does not contain any -/// variable-complexity processing logic, enabling great scalability for topics with a very large number of -/// publishers where unordered and duplicated messages are acceptable, such as the heartbeat topic. /// The application will have a single RX instance to manage all subscriptions and P2P ports. typedef struct udpard_rx_t { - udpard_list_t list_session_by_animation; ///< Oldest at the tail. - udpard_tree_t* index_session_by_reordering; ///< Earliest reordering window closure on the left. + udpard_list_t list_session_by_animation; ///< Oldest at the tail. uint64_t errors_oom; ///< A frame could not be processed (transfer possibly dropped) due to OOM. uint64_t errors_frame_malformed; ///< A received frame was malformed and thus dropped. @@ -701,14 +635,6 @@ typedef struct udpard_rx_mem_resources_t typedef struct udpard_rx_port_t udpard_rx_port_t; typedef struct udpard_rx_transfer_t udpard_rx_transfer_t; -/// RX port mode for transfer reassembly behavior. -typedef enum udpard_rx_mode_t -{ - udpard_rx_unordered = 0, - udpard_rx_ordered = 1, - udpard_rx_stateless = 2, -} udpard_rx_mode_t; - /// Provided by the application per port instance to specify the callbacks to be invoked on certain events. /// This design allows distinct callbacks per port, which is especially useful for the P2P port. typedef struct udpard_rx_port_vtable_t @@ -724,10 +650,6 @@ struct udpard_rx_port_t /// The total size of the received payload may still exceed this extent setting by some small margin. size_t extent; - /// Behavior undefined if the reassembly mode or the reordering window are switched on a live port. - udpard_rx_mode_t mode; - udpard_us_t reordering_window; - /// True if this port is used for P2P transfers, false for subject subscriptions. /// There shall be exactly one P2P port per RX instance. bool is_p2p; @@ -813,9 +735,9 @@ struct udpard_rx_transfer_t /// in which case it may be NULL. void udpard_rx_new(udpard_rx_t* const self, udpard_tx_t* const tx); -/// Must be invoked at least every few milliseconds (more often is fine) to purge timed-out sessions and eject -/// received transfers when the reordering window expires. If this is invoked simultaneously with rx subscription -/// reception, then this function should ideally be invoked after the reception handling. +/// Must be invoked at least every few milliseconds (more often is fine). +/// If this is invoked simultaneously with rx subscription reception, +/// then this function should ideally be invoked after the reception handling. /// The time complexity is logarithmic in the number of living sessions. void udpard_rx_poll(udpard_rx_t* const self, const udpard_us_t now); @@ -834,21 +756,23 @@ void udpard_rx_poll(udpard_rx_t* const self, const udpard_us_t now); /// still keep fragments that start before the extent, so the delivered payload may exceed it; fragments starting past /// the limit are dropped. /// -/// If not sure which reassembly mode to choose, consider `udpard_rx_unordered` as the default choice. -/// For ordering-sensitive use cases, such as state estimators and control loops, use `udpard_rx_ordered` with a short -/// window. -/// /// The pointed-to vtable instance must outlive the port instance. /// /// The return value is true on success, false if any of the arguments are invalid. /// The time complexity is constant. This function does not invoke the dynamic memory manager. bool udpard_rx_port_new(udpard_rx_port_t* const self, const size_t extent, - const udpard_rx_mode_t mode, - const udpard_us_t reordering_window, const udpard_rx_mem_resources_t memory, const udpard_rx_port_vtable_t* const vtable); +/// A specialization of udpard_rx_port_new() for scalable stateless subscriptions, where only single-frame transfers +/// are accepted, and no attempt at deduplication is made. This is useful for the heartbeat topic mostly, and perhaps +/// other topics with a great number of publishers and/or very high traffic. +bool udpard_rx_port_new_stateless(udpard_rx_port_t* const self, + const size_t extent, + const udpard_rx_mem_resources_t memory, + const udpard_rx_port_vtable_t* const vtable); + /// The P2P counterpart. There must be exactly one P2P port per node. bool udpard_rx_port_new_p2p(udpard_rx_port_t* const self, const size_t extent, From 6ac82bceafdcc99e015c0e60043578ee14c3d1aa Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Mon, 2 Feb 2026 22:25:38 +0200 Subject: [PATCH 03/13] rx: allocate slots dynamically to reduce the average case session heap footprint --- libudpard/udpard.c | 152 ++++++++++++++++++++++++--------------------- libudpard/udpard.h | 5 +- 2 files changed, 84 insertions(+), 73 deletions(-) diff --git a/libudpard/udpard.c b/libudpard/udpard.c index 54bf0bc..a3dd9f4 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -1656,8 +1656,6 @@ static bool rx_fragment_tree_finalize(udpard_tree_t* const root, const uint32_t /// The redundant interfaces may use distinct MTU, which requires special fragment tree handling. typedef struct { - bool busy; - uint64_t transfer_id; ///< Which transfer we're reassembling here. udpard_us_t ts_min; ///< Earliest frame timestamp, aka transfer reception timestamp. @@ -1674,30 +1672,46 @@ typedef struct udpard_tree_t* fragments; } rx_slot_t; -static void rx_slot_reset(rx_slot_t* const slot, const udpard_mem_t fragment_memory) +static rx_slot_t* rx_slot_new(const udpard_mem_t slot_memory) +{ + rx_slot_t* const slot = mem_alloc(slot_memory, sizeof(rx_slot_t)); + if (slot != NULL) { + mem_zero(sizeof(*slot), slot); + slot->ts_min = HEAT_DEATH; + slot->ts_max = BIG_BANG; + slot->covered_prefix = 0; + slot->crc_end = 0; + slot->crc = CRC_INITIAL; + slot->fragments = NULL; + } + return slot; +} + +static void rx_slot_destroy(rx_slot_t* const slot, const udpard_mem_t fragment_memory, const udpard_mem_t slot_memory) { + UDPARD_ASSERT(slot != NULL); udpard_fragment_free_all((udpard_fragment_t*)slot->fragments, udpard_make_deleter(fragment_memory)); - slot->fragments = NULL; - slot->busy = false; - slot->covered_prefix = 0U; - slot->crc_end = 0U; - slot->crc = CRC_INITIAL; + mem_free(slot_memory, sizeof(rx_slot_t), slot); } +typedef enum +{ + rx_slot_not_done, + rx_slot_done, + rx_slot_reset, +} rx_slot_update_result_t; + /// The caller will accept the ownership of the fragments iff the result is true. -static bool rx_slot_update(rx_slot_t* const slot, - const udpard_us_t ts, - const udpard_mem_t fragment_memory, - const udpard_deleter_t payload_deleter, - rx_frame_t* const frame, - const size_t extent, - uint64_t* const errors_oom, - uint64_t* const errors_transfer_malformed) -{ - bool done = false; - if (!slot->busy) { - rx_slot_reset(slot, fragment_memory); - slot->busy = true; +static rx_slot_update_result_t rx_slot_update(rx_slot_t* const slot, + const udpard_us_t ts, + const udpard_mem_t fragment_memory, + const udpard_deleter_t payload_deleter, + rx_frame_t* const frame, + const size_t extent, + uint64_t* const errors_oom, + uint64_t* const errors_transfer_malformed) +{ + if ((slot->ts_min == HEAT_DEATH) && (slot->ts_max == BIG_BANG)) { slot->transfer_id = frame->meta.transfer_id; slot->ts_min = ts; slot->ts_max = ts; @@ -1709,8 +1723,7 @@ static bool rx_slot_update(rx_slot_t* const slot, if ((slot->total_size != frame->meta.transfer_payload_size) || (slot->priority != frame->meta.priority)) { ++*errors_transfer_malformed; mem_free_payload(payload_deleter, frame->base.origin); - rx_slot_reset(slot, fragment_memory); - return false; + return rx_slot_reset; } const rx_fragment_tree_update_result_t tree_res = rx_fragment_tree_update(&slot->fragments, fragment_memory, @@ -1733,14 +1746,12 @@ static bool rx_slot_update(rx_slot_t* const slot, } if (tree_res == rx_fragment_tree_done) { if (rx_fragment_tree_finalize(slot->fragments, slot->crc)) { - slot->busy = false; - done = true; - } else { - ++*errors_transfer_malformed; - rx_slot_reset(slot, fragment_memory); + return rx_slot_done; } + ++*errors_transfer_malformed; + return rx_slot_reset; } - return done; + return rx_slot_not_done; } // --------------------------------------------- SESSION & PORT --------------------------------------------- @@ -1752,8 +1763,6 @@ typedef struct rx_session_t udpard_tree_t index_remote_uid; ///< Must be the first member. udpard_remote_t remote; ///< Most recent discovered reverse path for P2P to the sender. - udpard_rx_port_t* port; - /// LRU last animated list for automatic retirement of stale sessions. udpard_listed_t list_by_animation; udpard_us_t last_animated_ts; @@ -1765,10 +1774,9 @@ typedef struct rx_session_t bool initialized; ///< Set after the first frame is seen. - // TODO: Static slots are taking too much space; allocate them dynamically instead. - // Each is <=56 bytes so it fits nicely into a 64-byte o1heap block. - // The slot state enum can be replaced with a simple "done" flag. - rx_slot_t slots[RX_SLOT_COUNT]; + udpard_rx_port_t* port; + + rx_slot_t* slots[RX_SLOT_COUNT]; } rx_session_t; /// The reassembly strategy is composed once at initialization time by choosing a vtable with the desired behavior. @@ -1821,8 +1829,7 @@ static udpard_tree_t* cavl_factory_rx_session_by_remote_uid(void* const user) out->index_remote_uid = (udpard_tree_t){ NULL, { NULL, NULL }, 0 }; out->list_by_animation = (udpard_listed_t){ NULL, NULL }; for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - out->slots[i].fragments = NULL; - rx_slot_reset(&out->slots[i], args->owner->memory.fragment); + out->slots[i] = NULL; } out->remote.uid = args->remote_uid; out->port = args->owner; @@ -1838,7 +1845,9 @@ static udpard_tree_t* cavl_factory_rx_session_by_remote_uid(void* const user) static void rx_session_free(rx_session_t* const self, udpard_list_t* const sessions_by_animation) { for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - rx_slot_reset(&self->slots[i], self->port->memory.fragment); + if (self->slots[i] != NULL) { + rx_slot_destroy(self->slots[i], self->port->memory.fragment, self->port->memory.session); + } } cavl2_remove(&self->port->index_session_by_remote_uid, &self->index_remote_uid); delist(sessions_by_animation, &self->list_by_animation); @@ -1866,46 +1875,44 @@ static void rx_session_eject(rx_session_t* const self, udpard_rx_t* const rx, rx // Finally, reset the slot. slot->fragments = NULL; // Transfer ownership to the application. - rx_slot_reset(slot, self->port->memory.fragment); + rx_slot_destroy(slot, self->port->memory.fragment, self->port->memory.session); } -/// Finds an existing in-progress slot with the specified transfer-ID, or allocates a new one. -/// Allocation always succeeds so the result is never NULL, but it may cancel a stale slot with incomplete transfer. +/// Finds an existing in-progress slot with the specified transfer-ID, or allocates a new one. Returns NULL of OOM. static rx_slot_t* rx_session_get_slot(rx_session_t* const self, const udpard_us_t ts, const uint64_t transfer_id) { // First, check if one is in progress already; resume it if so. for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - if (self->slots[i].busy && (self->slots[i].transfer_id == transfer_id)) { - return &self->slots[i]; + if ((self->slots[i] != NULL) && (self->slots[i]->transfer_id == transfer_id)) { + return self->slots[i]; } } // Use this opportunity to check for timed-out in-progress slots. This may free up a slot for the search below. for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - if (self->slots[i].busy && (ts >= (self->slots[i].ts_max + SESSION_LIFETIME))) { - rx_slot_reset(&self->slots[i], self->port->memory.fragment); + if ((self->slots[i] != NULL) && (ts >= (self->slots[i]->ts_max + SESSION_LIFETIME))) { + rx_slot_destroy(self->slots[i], self->port->memory.fragment, self->port->memory.session); } } // This appears to be a new transfer, so we will need to allocate a new slot for it. for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - if (!self->slots[i].busy) { - return &self->slots[i]; + if (self->slots[i] == NULL) { + self->slots[i] = rx_slot_new(self->port->memory.session); // may fail + return self->slots[i]; } } // All slots are currently occupied; find the oldest slot to sacrifice. - rx_slot_t* slot = NULL; - udpard_us_t oldest_ts = HEAT_DEATH; + size_t oldest_index = 0; for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - UDPARD_ASSERT(self->slots[i].busy); // Checked this already. - if (self->slots[i].ts_max < oldest_ts) { - oldest_ts = self->slots[i].ts_max; - slot = &self->slots[i]; + UDPARD_ASSERT(self->slots[i] != NULL); // Checked this already. + UDPARD_ASSERT(self->slots[oldest_index] != NULL); + if (self->slots[i]->ts_max < self->slots[oldest_index]->ts_max) { + oldest_index = i; } } - UDPARD_ASSERT((slot != NULL) && slot->busy); // It is probably just a stale transfer, so it's a no-brainer to evict it, it's probably dead anyway. - rx_slot_reset(slot, self->port->memory.fragment); - UDPARD_ASSERT((slot != NULL) && !slot->busy); - return slot; + rx_slot_destroy(self->slots[oldest_index], self->port->memory.fragment, self->port->memory.session); + self->slots[oldest_index] = rx_slot_new(self->port->memory.session); // may fail + return self->slots[oldest_index]; } static void rx_session_update(rx_session_t* const self, @@ -1940,21 +1947,24 @@ static void rx_session_update(rx_session_t* const self, // UNORDERED mode update. There are no other modes now -- there used to be ORDERED in an experimental revision once. if (!rx_session_is_transfer_ejected(self, frame->meta.transfer_id)) { rx_slot_t* const slot = rx_session_get_slot(self, ts, frame->meta.transfer_id); // new or continuation - UDPARD_ASSERT(slot != NULL); - UDPARD_ASSERT((!slot->busy) || (slot->transfer_id == frame->meta.transfer_id)); - const bool done = rx_slot_update(slot, - ts, - self->port->memory.fragment, - payload_deleter, - frame, - self->port->extent, - &rx->errors_oom, - &rx->errors_transfer_malformed); - if (done) { - if (frame->meta.kind == frame_msg_reliable) { - tx_send_ack(rx, ts, slot->priority, slot->transfer_id, self->remote); + if (slot == NULL) { + mem_free_payload(payload_deleter, frame->base.origin); + rx->errors_oom++; + } else { + const bool done = rx_slot_update(slot, + ts, + self->port->memory.fragment, + payload_deleter, + frame, + self->port->extent, + &rx->errors_oom, + &rx->errors_transfer_malformed); + if (done) { + if (frame->meta.kind == frame_msg_reliable) { + tx_send_ack(rx, ts, slot->priority, slot->transfer_id, self->remote); + } + rx_session_eject(self, rx, slot); } - rx_session_eject(self, rx, slot); } } else { // retransmit ACK if needed if ((frame->meta.kind == frame_msg_reliable) && (frame->base.offset == 0U)) { diff --git a/libudpard/udpard.h b/libudpard/udpard.h index ef58ade..05eceb7 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -28,6 +28,7 @@ /// The TX pipeline adds a small overhead of sizeof(tx_frame_t). /// - sizeof(tx_transfer_t) blocks for the TX pipeline to store outgoing transfer metadata. /// - sizeof(rx_session_t) blocks for the RX pipeline to store incoming transfer session metadata. +/// - sizeof(rx_slot_t) blocks for the RX pipeline to store specific transfer reassembly state. /// - sizeof(udpard_fragment_t) blocks for the RX pipeline to store received data fragments. /// /// Suitable memory allocators may be found here: @@ -623,8 +624,8 @@ typedef struct udpard_rx_t /// simple applications may choose to use the same memory resource implemented via malloc()/free() for all of them. typedef struct udpard_rx_mem_resources_t { - /// Provides memory for rx_session_t described below. - /// Each instance is fixed-size, so a trivial zero-fragmentation block allocator is sufficient. + /// Provides memory for rx_session_t and rx_slot_t. + /// Instances are fixed-size and similar in size, so a trivial zero-fragmentation block allocator is sufficient. udpard_mem_t session; /// The udpard_fragment_t handles are allocated per payload fragment; each contains a pointer to its fragment. From 4c3f01022dd737f11eae07ce1dced4c18ded27d2 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Mon, 2 Feb 2026 22:25:41 +0200 Subject: [PATCH 04/13] Revert "rx: allocate slots dynamically to reduce the average case session heap footprint" This reverts commit 6ac82bceafdcc99e015c0e60043578ee14c3d1aa. --- libudpard/udpard.c | 152 +++++++++++++++++++++------------------------ libudpard/udpard.h | 5 +- 2 files changed, 73 insertions(+), 84 deletions(-) diff --git a/libudpard/udpard.c b/libudpard/udpard.c index a3dd9f4..54bf0bc 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -1656,6 +1656,8 @@ static bool rx_fragment_tree_finalize(udpard_tree_t* const root, const uint32_t /// The redundant interfaces may use distinct MTU, which requires special fragment tree handling. typedef struct { + bool busy; + uint64_t transfer_id; ///< Which transfer we're reassembling here. udpard_us_t ts_min; ///< Earliest frame timestamp, aka transfer reception timestamp. @@ -1672,46 +1674,30 @@ typedef struct udpard_tree_t* fragments; } rx_slot_t; -static rx_slot_t* rx_slot_new(const udpard_mem_t slot_memory) -{ - rx_slot_t* const slot = mem_alloc(slot_memory, sizeof(rx_slot_t)); - if (slot != NULL) { - mem_zero(sizeof(*slot), slot); - slot->ts_min = HEAT_DEATH; - slot->ts_max = BIG_BANG; - slot->covered_prefix = 0; - slot->crc_end = 0; - slot->crc = CRC_INITIAL; - slot->fragments = NULL; - } - return slot; -} - -static void rx_slot_destroy(rx_slot_t* const slot, const udpard_mem_t fragment_memory, const udpard_mem_t slot_memory) +static void rx_slot_reset(rx_slot_t* const slot, const udpard_mem_t fragment_memory) { - UDPARD_ASSERT(slot != NULL); udpard_fragment_free_all((udpard_fragment_t*)slot->fragments, udpard_make_deleter(fragment_memory)); - mem_free(slot_memory, sizeof(rx_slot_t), slot); + slot->fragments = NULL; + slot->busy = false; + slot->covered_prefix = 0U; + slot->crc_end = 0U; + slot->crc = CRC_INITIAL; } -typedef enum -{ - rx_slot_not_done, - rx_slot_done, - rx_slot_reset, -} rx_slot_update_result_t; - /// The caller will accept the ownership of the fragments iff the result is true. -static rx_slot_update_result_t rx_slot_update(rx_slot_t* const slot, - const udpard_us_t ts, - const udpard_mem_t fragment_memory, - const udpard_deleter_t payload_deleter, - rx_frame_t* const frame, - const size_t extent, - uint64_t* const errors_oom, - uint64_t* const errors_transfer_malformed) -{ - if ((slot->ts_min == HEAT_DEATH) && (slot->ts_max == BIG_BANG)) { +static bool rx_slot_update(rx_slot_t* const slot, + const udpard_us_t ts, + const udpard_mem_t fragment_memory, + const udpard_deleter_t payload_deleter, + rx_frame_t* const frame, + const size_t extent, + uint64_t* const errors_oom, + uint64_t* const errors_transfer_malformed) +{ + bool done = false; + if (!slot->busy) { + rx_slot_reset(slot, fragment_memory); + slot->busy = true; slot->transfer_id = frame->meta.transfer_id; slot->ts_min = ts; slot->ts_max = ts; @@ -1723,7 +1709,8 @@ static rx_slot_update_result_t rx_slot_update(rx_slot_t* const slot, if ((slot->total_size != frame->meta.transfer_payload_size) || (slot->priority != frame->meta.priority)) { ++*errors_transfer_malformed; mem_free_payload(payload_deleter, frame->base.origin); - return rx_slot_reset; + rx_slot_reset(slot, fragment_memory); + return false; } const rx_fragment_tree_update_result_t tree_res = rx_fragment_tree_update(&slot->fragments, fragment_memory, @@ -1746,12 +1733,14 @@ static rx_slot_update_result_t rx_slot_update(rx_slot_t* const slot, } if (tree_res == rx_fragment_tree_done) { if (rx_fragment_tree_finalize(slot->fragments, slot->crc)) { - return rx_slot_done; + slot->busy = false; + done = true; + } else { + ++*errors_transfer_malformed; + rx_slot_reset(slot, fragment_memory); } - ++*errors_transfer_malformed; - return rx_slot_reset; } - return rx_slot_not_done; + return done; } // --------------------------------------------- SESSION & PORT --------------------------------------------- @@ -1763,6 +1752,8 @@ typedef struct rx_session_t udpard_tree_t index_remote_uid; ///< Must be the first member. udpard_remote_t remote; ///< Most recent discovered reverse path for P2P to the sender. + udpard_rx_port_t* port; + /// LRU last animated list for automatic retirement of stale sessions. udpard_listed_t list_by_animation; udpard_us_t last_animated_ts; @@ -1774,9 +1765,10 @@ typedef struct rx_session_t bool initialized; ///< Set after the first frame is seen. - udpard_rx_port_t* port; - - rx_slot_t* slots[RX_SLOT_COUNT]; + // TODO: Static slots are taking too much space; allocate them dynamically instead. + // Each is <=56 bytes so it fits nicely into a 64-byte o1heap block. + // The slot state enum can be replaced with a simple "done" flag. + rx_slot_t slots[RX_SLOT_COUNT]; } rx_session_t; /// The reassembly strategy is composed once at initialization time by choosing a vtable with the desired behavior. @@ -1829,7 +1821,8 @@ static udpard_tree_t* cavl_factory_rx_session_by_remote_uid(void* const user) out->index_remote_uid = (udpard_tree_t){ NULL, { NULL, NULL }, 0 }; out->list_by_animation = (udpard_listed_t){ NULL, NULL }; for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - out->slots[i] = NULL; + out->slots[i].fragments = NULL; + rx_slot_reset(&out->slots[i], args->owner->memory.fragment); } out->remote.uid = args->remote_uid; out->port = args->owner; @@ -1845,9 +1838,7 @@ static udpard_tree_t* cavl_factory_rx_session_by_remote_uid(void* const user) static void rx_session_free(rx_session_t* const self, udpard_list_t* const sessions_by_animation) { for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - if (self->slots[i] != NULL) { - rx_slot_destroy(self->slots[i], self->port->memory.fragment, self->port->memory.session); - } + rx_slot_reset(&self->slots[i], self->port->memory.fragment); } cavl2_remove(&self->port->index_session_by_remote_uid, &self->index_remote_uid); delist(sessions_by_animation, &self->list_by_animation); @@ -1875,44 +1866,46 @@ static void rx_session_eject(rx_session_t* const self, udpard_rx_t* const rx, rx // Finally, reset the slot. slot->fragments = NULL; // Transfer ownership to the application. - rx_slot_destroy(slot, self->port->memory.fragment, self->port->memory.session); + rx_slot_reset(slot, self->port->memory.fragment); } -/// Finds an existing in-progress slot with the specified transfer-ID, or allocates a new one. Returns NULL of OOM. +/// Finds an existing in-progress slot with the specified transfer-ID, or allocates a new one. +/// Allocation always succeeds so the result is never NULL, but it may cancel a stale slot with incomplete transfer. static rx_slot_t* rx_session_get_slot(rx_session_t* const self, const udpard_us_t ts, const uint64_t transfer_id) { // First, check if one is in progress already; resume it if so. for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - if ((self->slots[i] != NULL) && (self->slots[i]->transfer_id == transfer_id)) { - return self->slots[i]; + if (self->slots[i].busy && (self->slots[i].transfer_id == transfer_id)) { + return &self->slots[i]; } } // Use this opportunity to check for timed-out in-progress slots. This may free up a slot for the search below. for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - if ((self->slots[i] != NULL) && (ts >= (self->slots[i]->ts_max + SESSION_LIFETIME))) { - rx_slot_destroy(self->slots[i], self->port->memory.fragment, self->port->memory.session); + if (self->slots[i].busy && (ts >= (self->slots[i].ts_max + SESSION_LIFETIME))) { + rx_slot_reset(&self->slots[i], self->port->memory.fragment); } } // This appears to be a new transfer, so we will need to allocate a new slot for it. for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - if (self->slots[i] == NULL) { - self->slots[i] = rx_slot_new(self->port->memory.session); // may fail - return self->slots[i]; + if (!self->slots[i].busy) { + return &self->slots[i]; } } // All slots are currently occupied; find the oldest slot to sacrifice. - size_t oldest_index = 0; + rx_slot_t* slot = NULL; + udpard_us_t oldest_ts = HEAT_DEATH; for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - UDPARD_ASSERT(self->slots[i] != NULL); // Checked this already. - UDPARD_ASSERT(self->slots[oldest_index] != NULL); - if (self->slots[i]->ts_max < self->slots[oldest_index]->ts_max) { - oldest_index = i; + UDPARD_ASSERT(self->slots[i].busy); // Checked this already. + if (self->slots[i].ts_max < oldest_ts) { + oldest_ts = self->slots[i].ts_max; + slot = &self->slots[i]; } } + UDPARD_ASSERT((slot != NULL) && slot->busy); // It is probably just a stale transfer, so it's a no-brainer to evict it, it's probably dead anyway. - rx_slot_destroy(self->slots[oldest_index], self->port->memory.fragment, self->port->memory.session); - self->slots[oldest_index] = rx_slot_new(self->port->memory.session); // may fail - return self->slots[oldest_index]; + rx_slot_reset(slot, self->port->memory.fragment); + UDPARD_ASSERT((slot != NULL) && !slot->busy); + return slot; } static void rx_session_update(rx_session_t* const self, @@ -1947,24 +1940,21 @@ static void rx_session_update(rx_session_t* const self, // UNORDERED mode update. There are no other modes now -- there used to be ORDERED in an experimental revision once. if (!rx_session_is_transfer_ejected(self, frame->meta.transfer_id)) { rx_slot_t* const slot = rx_session_get_slot(self, ts, frame->meta.transfer_id); // new or continuation - if (slot == NULL) { - mem_free_payload(payload_deleter, frame->base.origin); - rx->errors_oom++; - } else { - const bool done = rx_slot_update(slot, - ts, - self->port->memory.fragment, - payload_deleter, - frame, - self->port->extent, - &rx->errors_oom, - &rx->errors_transfer_malformed); - if (done) { - if (frame->meta.kind == frame_msg_reliable) { - tx_send_ack(rx, ts, slot->priority, slot->transfer_id, self->remote); - } - rx_session_eject(self, rx, slot); + UDPARD_ASSERT(slot != NULL); + UDPARD_ASSERT((!slot->busy) || (slot->transfer_id == frame->meta.transfer_id)); + const bool done = rx_slot_update(slot, + ts, + self->port->memory.fragment, + payload_deleter, + frame, + self->port->extent, + &rx->errors_oom, + &rx->errors_transfer_malformed); + if (done) { + if (frame->meta.kind == frame_msg_reliable) { + tx_send_ack(rx, ts, slot->priority, slot->transfer_id, self->remote); } + rx_session_eject(self, rx, slot); } } else { // retransmit ACK if needed if ((frame->meta.kind == frame_msg_reliable) && (frame->base.offset == 0U)) { diff --git a/libudpard/udpard.h b/libudpard/udpard.h index 05eceb7..ef58ade 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -28,7 +28,6 @@ /// The TX pipeline adds a small overhead of sizeof(tx_frame_t). /// - sizeof(tx_transfer_t) blocks for the TX pipeline to store outgoing transfer metadata. /// - sizeof(rx_session_t) blocks for the RX pipeline to store incoming transfer session metadata. -/// - sizeof(rx_slot_t) blocks for the RX pipeline to store specific transfer reassembly state. /// - sizeof(udpard_fragment_t) blocks for the RX pipeline to store received data fragments. /// /// Suitable memory allocators may be found here: @@ -624,8 +623,8 @@ typedef struct udpard_rx_t /// simple applications may choose to use the same memory resource implemented via malloc()/free() for all of them. typedef struct udpard_rx_mem_resources_t { - /// Provides memory for rx_session_t and rx_slot_t. - /// Instances are fixed-size and similar in size, so a trivial zero-fragmentation block allocator is sufficient. + /// Provides memory for rx_session_t described below. + /// Each instance is fixed-size, so a trivial zero-fragmentation block allocator is sufficient. udpard_mem_t session; /// The udpard_fragment_t handles are allocated per payload fragment; each contains a pointer to its fragment. From 53f55d0b77ea982f8d2a876bd7219ee001ef7dd0 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Mon, 2 Feb 2026 22:29:21 +0200 Subject: [PATCH 05/13] readme --- README.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 55bc1eb..e73bbbd 100644 --- a/README.md +++ b/README.md @@ -24,15 +24,13 @@ next-generation intelligent vehicles: manned and unmanned aircraft, spacecraft, - ≤1-copy TX pipeline with deduplication across multiple interfaces and scattered input buffer support. - Support for redundant network interfaces with seamless interface aggregation and zero fail-over delay. - Robust message reassembler supporting highly distorted datagram streams: - out-of-order fragments, message ordering recovery, fragment/message deduplication, interleaving, variable MTU, ... -- Robust message ordering recovery for ordering-sensitive applications (e.g., state estimators, control loops) - with well-defined deterministic recovery in the event of lost messages. + out-of-order fragments, fragment/message deduplication, interleaving, variable MTU, ... - Packet loss mitigation via: - reliable topics (retransmit until acknowledged; callback notifications for successful/failed deliveries). - redundant interfaces (packet lost on one interface may be received on another, transparent to the application); - Heap not required (but supported); the library can be used with fixed-size block pool allocators. - Detailed time complexity and memory requirement models for the benefit of real-time high-integrity applications. -- Highly scalable: designed to handle thousands of topics and hundreds of concurrent transfers with minimal resources. +- Scalable: designed to handle thousands of topics and hundreds of concurrent transfers with minimal resources. - Runs anywhere out of the box, including extremely resource-constrained baremetal environments with ~100K ROM/RAM. No porting required. - Partial MISRA C compliance (reach out to ). From 59cadd0abb05156e8af435948595c08ed128e886 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Mon, 2 Feb 2026 22:43:08 +0200 Subject: [PATCH 06/13] corrections --- libudpard/udpard.c | 4 ++-- libudpard/udpard.h | 10 ++++------ 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/libudpard/udpard.c b/libudpard/udpard.c index 54bf0bc..cc460c6 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -453,7 +453,7 @@ typedef enum frame_kind_t frame_ack, } frame_kind_t; -/// The transfer-ID is designed to be unique per pending transfer. The uniquness is achieved by randomization. +/// The transfer-ID is designed to be unique per pending transfer. The uniqueness is achieved by randomization. /// For extra entropy, P2P transfers have their transfer-ID computed as (base_counter++)+destination_uid; /// the base counter is seeded with a random value. typedef struct @@ -2018,7 +2018,7 @@ static void rx_port_accept_stateless(udpard_rx_t* const rx, .priority = frame->meta.priority, .transfer_id = frame->meta.transfer_id, .remote = remote, - .payload_size_stored = required_size, + .payload_size_stored = frame->base.payload.size, .payload_size_wire = frame->meta.transfer_payload_size, .payload = frag, }; diff --git a/libudpard/udpard.h b/libudpard/udpard.h index ef58ade..fc0be4d 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -480,12 +480,10 @@ bool udpard_tx_new(udpard_tx_t* const self, /// invalidated immediately after this function returns. When redundant interfaces are used, the library will attempt to /// minimize the number of copies by reusing frames across interfaces with identical MTU values and memory resources. /// -/// The caller shall increment the transfer-ID counter after each successful invocation of this function per topic. -/// There shall be a separate transfer-ID counter per topic. The initial value shall be chosen randomly -/// such that it is likely to be distinct per application startup (embedded systems can use noinit memory sections, -/// hash uninitialized SRAM, use timers or ADC noise, etc); hashing with the topic hash is possible for extra entropy. -/// It is essential to provide a monotonic contiguous counter per topic. -/// The random starting point will ensure global uniqueness across topics. +/// The caller shall increment the transfer-ID counter after each successful invocation of this function per subject. +/// The initial value shall be chosen randomly such that it is likely to be distinct per application startup +/// (embedded systems can use noinit memory sections, hash uninitialized SRAM, use timers or ADC noise, etc). +/// The random starting point will ensure global uniqueness across different subjects. /// Related thread on random transfer-ID init: https://forum.opencyphal.org/t/improve-the-transfer-id-timeout/2375 /// /// The user context value is carried through to the callbacks. It must contain enough context to allow subject-ID From cd40b5e763ff7409487aba2a7e204ced005c58a4 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Mon, 2 Feb 2026 23:44:23 +0200 Subject: [PATCH 07/13] update tests --- libudpard/udpard.c | 4 +- tests/CMakeLists.txt | 1 - tests/src/test_e2e_api.cpp | 23 +- tests/src/test_e2e_edge.cpp | 259 +------ tests/src/test_e2e_random.cpp | 52 +- tests/src/test_e2e_reliable_ordered.cpp | 463 ------------- tests/src/test_e2e_responses.cpp | 46 +- tests/src/test_integration_sockets.cpp | 59 +- tests/src/test_intrusive_guards.c | 36 +- tests/src/test_intrusive_header.c | 27 +- tests/src/test_intrusive_rx.c | 858 +++++------------------- tests/src/test_intrusive_tx.c | 465 ++++--------- 12 files changed, 409 insertions(+), 1884 deletions(-) delete mode 100644 tests/src/test_e2e_reliable_ordered.cpp diff --git a/libudpard/udpard.c b/libudpard/udpard.c index cc460c6..1efc16c 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -1326,11 +1326,13 @@ bool udpard_tx_cancel(udpard_tx_t* const self, const uint64_t transfer_id, const tx_transfer_t, index_transfer_id); while ((tr != NULL) && (tr->transfer_id == transfer_id)) { + tx_transfer_t* const next = + CAVL2_TO_OWNER(cavl2_next_greater(&tr->index_transfer_id), tx_transfer_t, index_transfer_id); if (tr->kind == (reliable ? frame_msg_reliable : frame_msg_best)) { // Cancel all matching (normally <=1). tx_transfer_retire(self, tr, false); cancelled = true; } - tr = CAVL2_TO_OWNER(cavl2_next_greater(&tr->index_transfer_id), tx_transfer_t, index_transfer_id); + tr = next; } } return cancelled; diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 0e8f0b4..e277a86 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -98,7 +98,6 @@ gen_test_single(test_e2e_random "src/test_e2e_random.cpp;${library_dir}/udpard.c gen_test_single(test_e2e_edge "src/test_e2e_edge.cpp;${library_dir}/udpard.c") gen_test_single(test_e2e_api "src/test_e2e_api.cpp;${library_dir}/udpard.c") gen_test_single(test_e2e_responses "src/test_e2e_responses.cpp;${library_dir}/udpard.c") -gen_test_single(test_e2e_reliable_ordered "src/test_e2e_reliable_ordered.cpp;${library_dir}/udpard.c") gen_test_single(test_integration_sockets "src/test_integration_sockets.cpp;${library_dir}/udpard.c") # Coverage targets. Usage: diff --git a/tests/src/test_e2e_api.cpp b/tests/src/test_e2e_api.cpp index 520cb4e..7e5f737 100644 --- a/tests/src/test_e2e_api.cpp +++ b/tests/src/test_e2e_api.cpp @@ -31,7 +31,6 @@ struct RxContext std::array sources{}; uint64_t remote_uid = 0; size_t received = 0; - size_t collisions = 0; }; // Refcount helpers keep captured datagrams alive. @@ -115,19 +114,14 @@ void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpar ctx->received++; } -void on_collision(udpard_rx_t* const rx, udpard_rx_port_t* const /*port*/, const udpard_remote_t /*remote*/) -{ - auto* ctx = static_cast(rx->user); - ctx->collisions++; -} -constexpr udpard_rx_port_vtable_t callbacks{ .on_message = &on_message, .on_collision = &on_collision }; +constexpr udpard_rx_port_vtable_t callbacks{ .on_message = &on_message }; // Ack port frees responses. void on_ack_response(udpard_rx_t*, udpard_rx_port_t* port, const udpard_rx_transfer_t tr) { udpard_fragment_free_all(tr.payload, udpard_make_deleter(port->memory.fragment)); } -constexpr udpard_rx_port_vtable_t ack_callbacks{ .on_message = &on_ack_response, .on_collision = &on_collision }; +constexpr udpard_rx_port_vtable_t ack_callbacks{ .on_message = &on_ack_response }; // Reliable delivery must survive data and ack loss. // Each node uses exactly one TX and one RX instance as per the library design. @@ -184,8 +178,7 @@ void test_reliable_delivery_under_losses() udpard_rx_t pub_rx{}; udpard_rx_new(&pub_rx, &pub_tx); udpard_rx_port_t pub_p2p_port{}; - TEST_ASSERT_TRUE( - udpard_rx_port_new(&pub_p2p_port, pub_uid, 16, udpard_rx_unordered, 0, pub_rx_mem, &ack_callbacks)); + TEST_ASSERT_TRUE(udpard_rx_port_new_p2p(&pub_p2p_port, 16, pub_rx_mem, &ack_callbacks)); // Subscriber node: single TX, single RX (linked to TX for sending ACKs). constexpr uint64_t sub_uid = 0xABCDEF0012345678ULL; @@ -197,8 +190,7 @@ void test_reliable_delivery_under_losses() udpard_rx_t sub_rx{}; udpard_rx_new(&sub_rx, &sub_tx); udpard_rx_port_t sub_port{}; - const uint64_t topic_hash = 0x0123456789ABCDEFULL; - TEST_ASSERT_TRUE(udpard_rx_port_new(&sub_port, topic_hash, 6000, udpard_rx_unordered, 0, sub_rx_mem, &callbacks)); + TEST_ASSERT_TRUE(udpard_rx_port_new(&sub_port, 6000, sub_rx_mem, &callbacks)); // Endpoints. const std::array publisher_sources{ @@ -235,7 +227,6 @@ void test_reliable_delivery_under_losses() deadline, iface_bitmap_all, udpard_prio_fast, - topic_hash, 1U, payload_view, &record_feedback, @@ -296,8 +287,6 @@ void test_reliable_delivery_under_losses() TEST_ASSERT_EQUAL_size_t(1, fb.count); TEST_ASSERT_EQUAL_UINT32(1, fb.acknowledgements); TEST_ASSERT_EQUAL_size_t(1, ctx.received); - TEST_ASSERT_EQUAL_size_t(0, ctx.collisions); - // Cleanup. udpard_rx_port_free(&sub_rx, &sub_port); udpard_rx_port_free(&pub_rx, &pub_p2p_port); @@ -350,7 +339,6 @@ void test_reliable_stats_and_failures() 10, iface_bitmap_1, udpard_prio_fast, - 0xABCULL, 5U, exp_payload, &record_feedback, @@ -399,7 +387,7 @@ void test_reliable_stats_and_failures() ctx.expected.assign({ 1U, 2U, 3U, 4U }); udpard_rx_new(&rx, nullptr); rx.user = &ctx; - TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 0x12340000ULL, 64, udpard_rx_unordered, 0, rx_mem, &callbacks)); + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 64, rx_mem, &callbacks)); const udpard_bytes_scattered_t src_payload = make_scattered(ctx.expected.data(), ctx.expected.size()); FeedbackState fb_ignore{}; @@ -408,7 +396,6 @@ void test_reliable_stats_and_failures() 1000, iface_bitmap_1, udpard_prio_fast, - port.topic_hash, 7U, src_payload, &record_feedback, diff --git a/tests/src/test_e2e_edge.cpp b/tests/src/test_e2e_edge.cpp index c161a54..48aabbe 100644 --- a/tests/src/test_e2e_edge.cpp +++ b/tests/src/test_e2e_edge.cpp @@ -14,8 +14,7 @@ namespace { void on_message(udpard_rx_t* rx, udpard_rx_port_t* port, udpard_rx_transfer_t transfer); -void on_collision(udpard_rx_t* rx, udpard_rx_port_t* port, udpard_remote_t remote); -constexpr udpard_rx_port_vtable_t callbacks{ .on_message = &on_message, .on_collision = &on_collision }; +constexpr udpard_rx_port_vtable_t callbacks{ .on_message = &on_message }; struct FbState { @@ -84,7 +83,6 @@ void release_frames(std::vector& frames) struct Context { std::vector ids; - size_t collisions = 0; uint64_t expected_uid = 0; udpard_udpip_ep_t source{}; }; @@ -103,14 +101,13 @@ struct Fixture Context ctx{}; udpard_udpip_ep_t dest{}; udpard_udpip_ep_t source{}; - uint64_t topic_hash{ 0x90AB12CD34EF5678ULL }; Fixture(const Fixture&) = delete; Fixture& operator=(const Fixture&) = delete; Fixture(Fixture&&) = delete; Fixture& operator=(Fixture&&) = delete; - explicit Fixture(const udpard_rx_mode_t mode, const udpard_us_t reordering_window) + explicit Fixture() { instrumented_allocator_new(&tx_alloc_transfer); instrumented_allocator_new(&tx_alloc_payload); @@ -133,7 +130,7 @@ struct Fixture ctx.expected_uid = tx.local_uid; ctx.source = source; rx.user = &ctx; - TEST_ASSERT_TRUE(udpard_rx_port_new(&port, topic_hash, 1024, mode, reordering_window, rx_mem, &callbacks)); + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 1024, rx_mem, &callbacks)); } ~Fixture() @@ -168,7 +165,6 @@ struct Fixture deadline, iface_bitmap_1, udpard_prio_slow, - topic_hash, transfer_id, payload, nullptr, @@ -192,16 +188,10 @@ void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpar udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); } -void on_collision(udpard_rx_t* const rx, udpard_rx_port_t* const /*port*/, const udpard_remote_t /*remote*/) -{ - auto* const ctx = static_cast(rx->user); - ctx->collisions++; -} - /// UNORDERED mode should drop duplicates while keeping arrival order. void test_udpard_rx_unordered_duplicates() { - Fixture fix{ udpard_rx_unordered, 0 }; + Fixture fix{}; udpard_us_t now = 0; constexpr std::array ids{ 100, 20000, 10100, 5000, 20000, 100 }; @@ -217,118 +207,6 @@ void test_udpard_rx_unordered_duplicates() for (size_t i = 0; i < expected.size(); i++) { TEST_ASSERT_EQUAL_UINT64(expected[i], fix.ctx.ids[i]); } - TEST_ASSERT_EQUAL_size_t(0, fix.ctx.collisions); -} - -/// ORDERED mode waits for the window, then rejects late arrivals. -void test_udpard_rx_ordered_out_of_order() -{ - Fixture fix{ udpard_rx_ordered, 50 }; - udpard_us_t now = 0; - - // First batch builds the ordered baseline. - fix.push_single(now, 100); - udpard_rx_poll(&fix.rx, now); - fix.push_single(++now, 300); - udpard_rx_poll(&fix.rx, now); - fix.push_single(++now, 200); - udpard_rx_poll(&fix.rx, now); - - // Let the reordering window close for the early transfers. - now = 60; - udpard_rx_poll(&fix.rx, now); - - // Queue far-future IDs while keeping the head at 300. - fix.push_single(now + 1, 10100); - udpard_rx_poll(&fix.rx, now + 1); - fix.push_single(now + 2, 10200); - udpard_rx_poll(&fix.rx, now + 2); - - // Late arrivals inside the window shall be dropped. - fix.push_single(now + 3, 250); - udpard_rx_poll(&fix.rx, now + 3); - fix.push_single(now + 4, 150); - udpard_rx_poll(&fix.rx, now + 4); - - // Allow the window to expire so the remaining interned transfers eject. - udpard_rx_poll(&fix.rx, now + 70); - - constexpr std::array expected{ 100, 200, 300, 10100, 10200 }; - TEST_ASSERT_EQUAL_size_t(expected.size(), fix.ctx.ids.size()); - for (size_t i = 0; i < expected.size(); i++) { - TEST_ASSERT_EQUAL_UINT64(expected[i], fix.ctx.ids[i]); - } - TEST_ASSERT_EQUAL_size_t(0, fix.ctx.collisions); -} - -/// ORDERED mode after head advance should reject late IDs arriving after window expiry. -void test_udpard_rx_ordered_head_advanced_late() -{ - Fixture fix{ udpard_rx_ordered, 50 }; - udpard_us_t now = 0; - - fix.push_single(now, 100); - udpard_rx_poll(&fix.rx, now); - fix.push_single(++now, 300); - udpard_rx_poll(&fix.rx, now); - fix.push_single(++now, 200); - udpard_rx_poll(&fix.rx, now); - now = 60; - udpard_rx_poll(&fix.rx, now); // head -> 300 - - fix.push_single(++now, 420); - udpard_rx_poll(&fix.rx, now); - fix.push_single(++now, 450); - udpard_rx_poll(&fix.rx, now); - now = 120; - udpard_rx_poll(&fix.rx, now); // head -> 450 - - fix.push_single(++now, 320); - udpard_rx_poll(&fix.rx, now); - fix.push_single(++now, 310); - udpard_rx_poll(&fix.rx, now); - - constexpr std::array expected{ 100, 200, 300, 420, 450 }; - TEST_ASSERT_EQUAL_size_t(expected.size(), fix.ctx.ids.size()); - for (size_t i = 0; i < expected.size(); i++) { - TEST_ASSERT_EQUAL_UINT64(expected[i], fix.ctx.ids[i]); - } - TEST_ASSERT_EQUAL_size_t(0, fix.ctx.collisions); -} - -/// ORDERED mode rejects transfer-IDs far behind the recent history window. -void test_udpard_rx_ordered_reject_far_past() -{ - Fixture fix{ udpard_rx_ordered, 50 }; - udpard_us_t now = 0; - - fix.push_single(now, 200000); - udpard_rx_poll(&fix.rx, now); - - now = 60; - udpard_rx_poll(&fix.rx, now); - - const uint64_t late_tid_close = 200000 - 1000; - fix.push_single(++now, late_tid_close); - udpard_rx_poll(&fix.rx, now); - udpard_rx_poll(&fix.rx, now + 100); - - const uint64_t far_past_tid = 200000 - 100000; - fix.push_single(++now, far_past_tid); - udpard_rx_poll(&fix.rx, now); - udpard_rx_poll(&fix.rx, now + 50); - - const uint64_t recent_tid = 200001; - fix.push_single(++now, recent_tid); - udpard_rx_poll(&fix.rx, now); - udpard_rx_poll(&fix.rx, now + 50); - - constexpr std::array expected{ 200000, far_past_tid, recent_tid }; - TEST_ASSERT_EQUAL_size_t(expected.size(), fix.ctx.ids.size()); - for (size_t i = 0; i < expected.size(); i++) { - TEST_ASSERT_EQUAL_UINT64(expected[i], fix.ctx.ids[i]); - } - TEST_ASSERT_EQUAL_size_t(0, fix.ctx.collisions); } // Feedback must fire regardless of disposal path. @@ -357,7 +235,6 @@ void test_udpard_tx_feedback_always_called() 10, iface_bitmap_1, udpard_prio_fast, - 1, 11, make_scattered(nullptr, 0), fb_record, @@ -382,7 +259,6 @@ void test_udpard_tx_feedback_always_called() 1000, iface_bitmap_1, udpard_prio_fast, - 2, 21, make_scattered(nullptr, 0), fb_record, @@ -392,7 +268,6 @@ void test_udpard_tx_feedback_always_called() 1000, iface_bitmap_1, udpard_prio_fast, - 3, 22, make_scattered(nullptr, 0), fb_record, @@ -417,7 +292,6 @@ void test_udpard_tx_feedback_always_called() 1000, iface_bitmap_1, udpard_prio_fast, - 4, 33, make_scattered(nullptr, 0), fb_record, @@ -460,14 +334,14 @@ void test_udpard_tx_push_p2p() Context ctx{}; const udpard_udpip_ep_t source{ .ip = 0x0A0000AAU, .port = 7600U }; const udpard_udpip_ep_t dest{ .ip = 0x0A000010U, .port = 7400U }; - const uint64_t local_uid = 0xCAFEBABECAFED00DULL; - ctx.expected_uid = tx.local_uid; - ctx.source = source; - rx.user = &ctx; - TEST_ASSERT_TRUE(udpard_rx_port_new(&port, local_uid, 1024, udpard_rx_unordered, 0, rx_mem, &callbacks)); + ctx.expected_uid = tx.local_uid; + ctx.source = source; + rx.user = &ctx; + TEST_ASSERT_TRUE(udpard_rx_port_new_p2p(&port, 1024, rx_mem, &callbacks)); + const uint64_t remote_uid = 0xCAFEBABECAFED00DULL; udpard_remote_t remote{}; - remote.uid = local_uid; + remote.uid = remote_uid; remote.endpoints[0U] = dest; const std::array user_payload{ 0xAAU, 0xBBU, 0xCCU }; @@ -486,8 +360,6 @@ void test_udpard_tx_push_p2p() udpard_rx_poll(&rx, now); TEST_ASSERT_EQUAL_size_t(1, ctx.ids.size()); TEST_ASSERT_EQUAL_UINT64(out_tid, ctx.ids[0]); - TEST_ASSERT_EQUAL_size_t(0, ctx.collisions); - udpard_rx_port_free(&rx, &port); udpard_tx_free(&tx); TEST_ASSERT_EQUAL(0, tx_alloc_transfer.allocated_fragments); @@ -533,12 +405,11 @@ void test_udpard_tx_minimum_mtu() udpard_rx_t rx{}; udpard_rx_port_t port{}; Context ctx{}; - const uint64_t topic_hash = 0x1234567890ABCDEFULL; - ctx.expected_uid = tx.local_uid; - ctx.source = { .ip = 0x0A000001U, .port = 7501U }; + ctx.expected_uid = tx.local_uid; + ctx.source = { .ip = 0x0A000001U, .port = 7501U }; udpard_rx_new(&rx, nullptr); rx.user = &ctx; - TEST_ASSERT_TRUE(udpard_rx_port_new(&port, topic_hash, 4096, udpard_rx_unordered, 0, rx_mem, &callbacks)); + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 4096, rx_mem, &callbacks)); // Send a payload that will require fragmentation at minimum MTU std::array payload{}; @@ -556,7 +427,6 @@ void test_udpard_tx_minimum_mtu() now + 1000000, iface_bitmap_1, udpard_prio_nominal, - topic_hash, 1U, payload_view, nullptr, @@ -594,7 +464,7 @@ void test_udpard_tx_minimum_mtu() /// Test with transfer-ID at uint64 boundary values (0, large values) void test_udpard_transfer_id_boundaries() { - Fixture fix{ udpard_rx_unordered, 0 }; + Fixture fix{}; // Test transfer-ID = 0 (first valid value) fix.push_single(0, 0); @@ -613,8 +483,6 @@ void test_udpard_transfer_id_boundaries() udpard_rx_poll(&fix.rx, 2); TEST_ASSERT_EQUAL_size_t(3, fix.ctx.ids.size()); TEST_ASSERT_EQUAL_UINT64(0x8000000000000000ULL, fix.ctx.ids[2]); - - TEST_ASSERT_EQUAL_size_t(0, fix.ctx.collisions); } /// Test zero extent handling - should accept transfers but truncate payload @@ -644,11 +512,10 @@ void test_udpard_rx_zero_extent() udpard_rx_t rx{}; udpard_rx_port_t port{}; - const uint64_t topic_hash = 0xFEDCBA9876543210ULL; udpard_rx_new(&rx, nullptr); // Create port with zero extent - TEST_ASSERT_TRUE(udpard_rx_port_new(&port, topic_hash, 0, udpard_rx_unordered, 0, rx_mem, &callbacks)); + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 0, rx_mem, &callbacks)); // Track received transfers struct ZeroExtentContext @@ -672,10 +539,8 @@ void test_udpard_rx_zero_extent() z->payload_size_wire = transfer.payload_size_wire; udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port_arg->memory.fragment)); } - static void on_collision(udpard_rx_t*, udpard_rx_port_t*, udpard_remote_t) {} }; - static constexpr udpard_rx_port_vtable_t zero_callbacks{ .on_message = &ZeroExtentCallbacks::on_message, - .on_collision = &ZeroExtentCallbacks::on_collision }; + static constexpr udpard_rx_port_vtable_t zero_callbacks{ .on_message = &ZeroExtentCallbacks::on_message }; port.vtable = &zero_callbacks; rx.user = &zctx; @@ -696,7 +561,6 @@ void test_udpard_rx_zero_extent() now + 1000000, iface_bitmap_1, udpard_prio_nominal, - topic_hash, 5U, payload_view, nullptr, @@ -733,7 +597,7 @@ void test_udpard_rx_zero_extent() /// Test empty payload transfer (zero-size payload) void test_udpard_empty_payload() { - Fixture fix{ udpard_rx_unordered, 0 }; + Fixture fix{}; // Send an empty payload fix.frames.clear(); @@ -746,7 +610,6 @@ void test_udpard_empty_payload() deadline, iface_bitmap_1, udpard_prio_nominal, - fix.topic_hash, 10U, empty_payload, nullptr, @@ -769,7 +632,7 @@ void test_udpard_empty_payload() /// Test priority levels from exceptional (0) to optional (7) void test_udpard_all_priority_levels() { - Fixture fix{ udpard_rx_unordered, 0 }; + Fixture fix{}; udpard_us_t now = 0; constexpr uint16_t iface_bitmap_1 = (1U << 0U); @@ -786,7 +649,6 @@ void test_udpard_all_priority_levels() now + 1000000, iface_bitmap_1, static_cast(prio), - fix.topic_hash, 100U + prio, payload_view, nullptr, @@ -809,87 +671,6 @@ void test_udpard_all_priority_levels() } } -/// Test collision detection (topic hash mismatch) -void test_udpard_topic_hash_collision() -{ - instrumented_allocator_t tx_alloc_transfer{}; - instrumented_allocator_t tx_alloc_payload{}; - instrumented_allocator_t rx_alloc_frag{}; - instrumented_allocator_t rx_alloc_session{}; - instrumented_allocator_new(&tx_alloc_transfer); - instrumented_allocator_new(&tx_alloc_payload); - instrumented_allocator_new(&rx_alloc_frag); - instrumented_allocator_new(&rx_alloc_session); - - udpard_tx_mem_resources_t tx_mem{}; - tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer); - for (auto& res : tx_mem.payload) { - res = instrumented_allocator_make_resource(&tx_alloc_payload); - } - const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), - .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; - - udpard_tx_t tx{}; - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x1111222233334444ULL, 300U, 64, tx_mem, &tx_vtable)); - std::vector frames; - tx.user = &frames; - - udpard_rx_t rx{}; - udpard_rx_port_t port{}; - Context ctx{}; - const uint64_t rx_topic_hash = 0xAAAAAAAAAAAAAAAAULL; // Different from TX - const uint64_t tx_topic_hash = 0xBBBBBBBBBBBBBBBBULL; // Different from RX - ctx.expected_uid = tx.local_uid; - ctx.source = { .ip = 0x0A000003U, .port = 7503U }; - udpard_rx_new(&rx, nullptr); - rx.user = &ctx; - TEST_ASSERT_TRUE(udpard_rx_port_new(&port, rx_topic_hash, 1024, udpard_rx_unordered, 0, rx_mem, &callbacks)); - - // Send with mismatched topic hash - std::array payload{}; - const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); - constexpr uint16_t iface_bitmap_1 = (1U << 0U); - - const udpard_us_t now = 0; - frames.clear(); - TEST_ASSERT_TRUE(udpard_tx_push(&tx, - now, - now + 1000000, - iface_bitmap_1, - udpard_prio_nominal, - tx_topic_hash, // Different from port's topic_hash - 1U, - payload_view, - nullptr, - UDPARD_USER_CONTEXT_NULL)); - udpard_tx_poll(&tx, now, UDPARD_IFACE_BITMAP_ALL); - TEST_ASSERT_FALSE(frames.empty()); - - // Deliver to RX - should trigger collision callback - const udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr }; - for (const auto& f : frames) { - TEST_ASSERT_TRUE( - udpard_rx_port_push(&rx, &port, now, ctx.source, f.datagram, tx_payload_deleter, f.iface_index)); - } - udpard_rx_poll(&rx, now); - - // No transfers received, but collision detected - TEST_ASSERT_EQUAL_size_t(0, ctx.ids.size()); - TEST_ASSERT_EQUAL_size_t(1, ctx.collisions); - - // Cleanup - udpard_rx_port_free(&rx, &port); - udpard_tx_free(&tx); - TEST_ASSERT_EQUAL(0, tx_alloc_transfer.allocated_fragments); - TEST_ASSERT_EQUAL(0, tx_alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, rx_alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(0, rx_alloc_session.allocated_fragments); - instrumented_allocator_reset(&tx_alloc_transfer); - instrumented_allocator_reset(&tx_alloc_payload); - instrumented_allocator_reset(&rx_alloc_frag); - instrumented_allocator_reset(&rx_alloc_session); -} - } // namespace extern "C" void setUp() {} @@ -900,9 +681,6 @@ int main() { UNITY_BEGIN(); RUN_TEST(test_udpard_rx_unordered_duplicates); - RUN_TEST(test_udpard_rx_ordered_out_of_order); - RUN_TEST(test_udpard_rx_ordered_head_advanced_late); - RUN_TEST(test_udpard_rx_ordered_reject_far_past); RUN_TEST(test_udpard_tx_feedback_always_called); RUN_TEST(test_udpard_tx_push_p2p); RUN_TEST(test_udpard_tx_minimum_mtu); @@ -910,6 +688,5 @@ int main() RUN_TEST(test_udpard_rx_zero_extent); RUN_TEST(test_udpard_empty_payload); RUN_TEST(test_udpard_all_priority_levels); - RUN_TEST(test_udpard_topic_hash_collision); return UNITY_END(); } diff --git a/tests/src/test_e2e_random.cpp b/tests/src/test_e2e_random.cpp index 7dc6926..948cca9 100644 --- a/tests/src/test_e2e_random.cpp +++ b/tests/src/test_e2e_random.cpp @@ -18,19 +18,12 @@ namespace { struct TransferKey { uint64_t transfer_id; - uint64_t topic_hash; - bool operator==(const TransferKey& other) const - { - return (transfer_id == other.transfer_id) && (topic_hash == other.topic_hash); - } + bool operator==(const TransferKey& other) const { return transfer_id == other.transfer_id; } }; struct TransferKeyHash { - size_t operator()(const TransferKey& key) const - { - return (std::hash{}(key.transfer_id) << 1U) ^ std::hash{}(key.topic_hash); - } + size_t operator()(const TransferKey& key) const { return std::hash{}(key.transfer_id); } }; struct ExpectedPayload @@ -43,7 +36,6 @@ struct Context { std::unordered_map expected; size_t received = 0; - size_t collisions = 0; size_t truncated = 0; uint64_t remote_uid = 0; size_t reliable_feedback_success = 0; @@ -135,14 +127,14 @@ void on_ack_response(udpard_rx_t*, udpard_rx_port_t* port, const udpard_rx_trans { udpard_fragment_free_all(tr.payload, udpard_make_deleter(port->memory.fragment)); } -constexpr udpard_rx_port_vtable_t ack_callbacks{ .on_message = &on_ack_response, .on_collision = nullptr }; +constexpr udpard_rx_port_vtable_t ack_callbacks{ .on_message = &on_ack_response }; void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) { auto* const ctx = static_cast(rx->user); // Match the incoming transfer against the expected table keyed by topic hash and transfer-ID. - const TransferKey key{ .transfer_id = transfer.transfer_id, .topic_hash = port->topic_hash }; + const TransferKey key{ .transfer_id = transfer.transfer_id }; const auto it = ctx->expected.find(key); if (it == ctx->expected.end()) { udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); @@ -178,14 +170,7 @@ void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpar ctx->received++; } -void on_collision(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_remote_t remote) -{ - auto* ctx = static_cast(rx->user); - (void)port; - (void)remote; - ctx->collisions++; -} -constexpr udpard_rx_port_vtable_t callbacks{ .on_message = &on_message, .on_collision = &on_collision }; +constexpr udpard_rx_port_vtable_t callbacks{ .on_message = &on_message }; /// Randomized end-to-end TX/RX covering fragmentation, reordering, and extent-driven truncation. void test_udpard_tx_rx_end_to_end() @@ -236,18 +221,12 @@ void test_udpard_tx_rx_end_to_end() udpard_rx_new(&ack_rx, &tx); // Test parameters. - constexpr std::array topic_hashes{ 0x123456789ABCDEF0ULL, - 0x0FEDCBA987654321ULL, - 0x00ACE00ACE00ACEULL }; - constexpr std::array modes{ udpard_rx_ordered, udpard_rx_unordered, udpard_rx_ordered }; - constexpr std::array windows{ 2000, 0, 5000 }; - constexpr std::array extents{ 1000, 5000, SIZE_MAX }; + constexpr std::array extents{ 1000, 5000, SIZE_MAX }; // Configure ports with varied extents and reordering windows to cover truncation and different RX modes. std::array ports{}; for (size_t i = 0; i < ports.size(); i++) { - TEST_ASSERT_TRUE( - udpard_rx_port_new(&ports[i], topic_hashes[i], extents[i], modes[i], windows[i], rx_mem, &callbacks)); + TEST_ASSERT_TRUE(udpard_rx_port_new(&ports[i], extents[i], rx_mem, &callbacks)); } // Setup the context. @@ -264,26 +243,23 @@ void test_udpard_tx_rx_end_to_end() tx.user = &frames; std::vector ack_frames; ack_tx.user = &ack_frames; - TEST_ASSERT_TRUE( - udpard_rx_port_new(&ack_port, tx.local_uid, 16, udpard_rx_unordered, 0, ack_rx_mem, &ack_callbacks)); + TEST_ASSERT_TRUE(udpard_rx_port_new_p2p(&ack_port, 16, ack_rx_mem, &ack_callbacks)); std::array ack_sources{}; for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { ack_sources[i] = { .ip = static_cast(0x0A000020U + i), .port = static_cast(7700U + i) }; } // Main test loop: generate transfers, push into TX, drain and shuffle frames, push into RX. - std::array transfer_ids{ static_cast(rand()), - static_cast(rand()), - static_cast(rand()) }; - size_t reliable_total = 0; - udpard_us_t now = 0; + uint64_t next_transfer_id = (static_cast(rand()) << 32U) ^ static_cast(rand()); + size_t reliable_total = 0; + udpard_us_t now = 0; for (size_t transfer_index = 0; transfer_index < 1000; transfer_index++) { now += static_cast(random_range(1000, 5000)); frames.clear(); // Pick a port, build a random payload, and remember what to expect on that topic. const size_t port_index = random_range(0, ports.size() - 1U); - const uint64_t transfer_id = transfer_ids[port_index]++; + const uint64_t transfer_id = next_transfer_id++; const size_t payload_size = random_range(0, 10000); std::vector payload(payload_size); fill_random(payload); @@ -295,7 +271,7 @@ void test_udpard_tx_rx_end_to_end() // Each transfer is sent on all redundant interfaces with different MTUs to exercise fragmentation variety. const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); const auto priority = static_cast(random_range(0, UDPARD_PRIORITY_COUNT - 1U)); - const TransferKey key{ .transfer_id = transfer_id, .topic_hash = topic_hashes[port_index] }; + const TransferKey key{ .transfer_id = transfer_id }; const bool inserted = ctx.expected.emplace(key, ExpectedPayload{ .payload = payload, .payload_size_wire = payload.size() }).second; TEST_ASSERT_TRUE(inserted); @@ -315,7 +291,6 @@ void test_udpard_tx_rx_end_to_end() deadline, UDPARD_IFACE_BITMAP_ALL, priority, - topic_hashes[port_index], transfer_id, payload_view, reliable ? &record_feedback : nullptr, @@ -377,7 +352,6 @@ void test_udpard_tx_rx_end_to_end() TEST_ASSERT_TRUE(ctx.expected.empty()); TEST_ASSERT_EQUAL_size_t(1000, ctx.received); TEST_ASSERT_TRUE(ctx.truncated > 0); - TEST_ASSERT_EQUAL_size_t(0, ctx.collisions); TEST_ASSERT_EQUAL_size_t(reliable_total, ctx.reliable_feedback_success); TEST_ASSERT_EQUAL_size_t(0, ctx.reliable_feedback_failure); for (auto& port : ports) { diff --git a/tests/src/test_e2e_reliable_ordered.cpp b/tests/src/test_e2e_reliable_ordered.cpp deleted file mode 100644 index 009c1d7..0000000 --- a/tests/src/test_e2e_reliable_ordered.cpp +++ /dev/null @@ -1,463 +0,0 @@ -/// This software is distributed under the terms of the MIT License. -/// Copyright (C) OpenCyphal Development Team -/// Copyright Amazon.com Inc. or its affiliates. -/// SPDX-License-Identifier: MIT -/// This test validates reliable delivery with ORDERED mode under packet loss and reordering. - -#include -#include "helpers.h" -#include -#include -#include - -namespace { - -constexpr size_t CyphalHeaderSize = 48; // Cyphal/UDP header size - -struct CapturedFrame -{ - udpard_bytes_mut_t datagram; - uint_fast8_t iface_index; -}; - -void tx_refcount_free(void* const user, const size_t size, void* const payload) -{ - (void)user; - udpard_tx_refcount_dec(udpard_bytes_t{ .size = size, .data = payload }); -} - -constexpr udpard_deleter_vtable_t tx_refcount_deleter_vt{ .free = &tx_refcount_free }; -constexpr udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr }; - -void drop_frame(const CapturedFrame& frame) -{ - udpard_tx_refcount_dec(udpard_bytes_t{ .size = frame.datagram.size, .data = frame.datagram.data }); -} - -// Extract transfer_id from Cyphal/UDP header (bytes 16-23 of datagram). -uint64_t extract_transfer_id(const udpard_bytes_mut_t& datagram) -{ - if (datagram.size < 24) { - return 0; - } - const auto* p = static_cast(datagram.data); - uint64_t tid = 0; - for (size_t i = 0; i < 8; i++) { - tid |= static_cast(p[16 + i]) << (i * 8U); - } - return tid; -} - -// Extract the transfer_id being ACKed from ACK payload. -// ACK payload format: topic_hash(8) + transfer_id(8). -uint64_t extract_acked_transfer_id(const udpard_bytes_mut_t& datagram) -{ - constexpr size_t p2p_tid_offset = CyphalHeaderSize + 8; - if (datagram.size < p2p_tid_offset + 8) { - return 0; - } - const auto* p = static_cast(datagram.data); - uint64_t tid = 0; - for (size_t i = 0; i < 8; i++) { - tid |= static_cast(p[p2p_tid_offset + i]) << (i * 8U); - } - return tid; -} - -bool capture_tx_frame_impl(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) -{ - auto* frames = static_cast*>(tx->user); - if (frames == nullptr) { - return false; - } - udpard_tx_refcount_inc(ejection->datagram); - void* const data = const_cast(ejection->datagram.data); // NOLINT - const udpard_bytes_mut_t dgram{ .size = ejection->datagram.size, .data = data }; - frames->push_back(CapturedFrame{ .datagram = dgram, .iface_index = ejection->iface_index }); - return true; -} - -bool capture_tx_frame_subject(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) -{ - return capture_tx_frame_impl(tx, ejection); -} - -bool capture_tx_frame_p2p(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection, udpard_udpip_ep_t /*dest*/) -{ - return capture_tx_frame_impl(tx, ejection); -} - -constexpr udpard_tx_vtable_t tx_vtable{ .eject_subject = &capture_tx_frame_subject, - .eject_p2p = &capture_tx_frame_p2p }; - -struct FeedbackState -{ - size_t count = 0; - uint16_t acknowledgements = 0; -}; - -void record_feedback(udpard_tx_t*, const udpard_tx_feedback_t fb) -{ - auto* st = static_cast(fb.user.ptr[0]); - if (st != nullptr) { - st->count++; - st->acknowledgements = fb.acknowledgements; - } -} - -struct ReceiverContext -{ - std::vector received_transfer_ids; -}; - -void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) -{ - auto* ctx = static_cast(rx->user); - ctx->received_transfer_ids.push_back(transfer.transfer_id); - udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); -} - -void on_collision(udpard_rx_t*, udpard_rx_port_t*, udpard_remote_t) {} - -constexpr udpard_rx_port_vtable_t topic_callbacks{ .on_message = &on_message, .on_collision = &on_collision }; - -void on_ack_only(udpard_rx_t*, udpard_rx_port_t* port, const udpard_rx_transfer_t tr) -{ - udpard_fragment_free_all(tr.payload, udpard_make_deleter(port->memory.fragment)); -} - -constexpr udpard_rx_port_vtable_t ack_only_callbacks{ .on_message = &on_ack_only, .on_collision = &on_collision }; - -/// Test scenario: -/// - Sender publishes messages A, B, C (tid=100, 101, 102) in reliable mode, in quick succession. -/// - A is delivered successfully, establishing the session baseline for the receiver in ORDERED mode. -/// - First attempt to deliver B fails (lost). -/// - Every first ACK for B and C is lost, forcing sender to retransmit. -/// -/// The receiver first sees A, then C (tid=102), which gets interned waiting for lower transfer IDs. -/// When B (tid=101) arrives via retransmission, it gets delivered first, then C is ejected in order. -/// -/// Transmission sequence: -/// 1. A (tid=100) delivered successfully -- establishes ordered session -/// 2. B (tid=101) lost -/// 3. C (tid=102) delivered but ACK lost -- interned, waiting for tid < 102 -/// 4. B (tid=101) delivered but ACK lost -- delivered first, then C ejected -/// 5. C (tid=102) re-delivered, duplicate ignored, ACK delivered -/// 6. B (tid=101) re-delivered, duplicate ignored, ACK delivered -/// -/// Receiver must validate: receives A, then B, then C, in correct order without duplicates. -void test_reliable_ordered_with_loss_and_reordering() -{ - seed_prng(); - - // Allocators - instrumented_allocator_t sender_tx_alloc_transfer{}; - instrumented_allocator_t sender_tx_alloc_payload{}; - instrumented_allocator_t receiver_rx_alloc_frag{}; - instrumented_allocator_t receiver_rx_alloc_session{}; - instrumented_allocator_t receiver_tx_alloc_transfer{}; - instrumented_allocator_t receiver_tx_alloc_payload{}; - instrumented_allocator_t sender_rx_alloc_frag{}; - instrumented_allocator_t sender_rx_alloc_session{}; - instrumented_allocator_new(&sender_tx_alloc_transfer); - instrumented_allocator_new(&sender_tx_alloc_payload); - instrumented_allocator_new(&receiver_rx_alloc_frag); - instrumented_allocator_new(&receiver_rx_alloc_session); - instrumented_allocator_new(&receiver_tx_alloc_transfer); - instrumented_allocator_new(&receiver_tx_alloc_payload); - instrumented_allocator_new(&sender_rx_alloc_frag); - instrumented_allocator_new(&sender_rx_alloc_session); - - // Memory resources - udpard_tx_mem_resources_t sender_tx_mem{}; - sender_tx_mem.transfer = instrumented_allocator_make_resource(&sender_tx_alloc_transfer); - for (auto& res : sender_tx_mem.payload) { - res = instrumented_allocator_make_resource(&sender_tx_alloc_payload); - } - const udpard_rx_mem_resources_t sender_rx_mem{ .session = - instrumented_allocator_make_resource(&sender_rx_alloc_session), - .fragment = - instrumented_allocator_make_resource(&sender_rx_alloc_frag) }; - - udpard_tx_mem_resources_t receiver_tx_mem{}; - receiver_tx_mem.transfer = instrumented_allocator_make_resource(&receiver_tx_alloc_transfer); - for (auto& res : receiver_tx_mem.payload) { - res = instrumented_allocator_make_resource(&receiver_tx_alloc_payload); - } - const udpard_rx_mem_resources_t receiver_rx_mem{ .session = - instrumented_allocator_make_resource(&receiver_rx_alloc_session), - .fragment = - instrumented_allocator_make_resource(&receiver_rx_alloc_frag) }; - - // Node identifiers - constexpr uint64_t sender_uid = 0xAAAA1111BBBB2222ULL; - constexpr uint64_t receiver_uid = 0xCCCC3333DDDD4444ULL; - const udpard_udpip_ep_t sender_source{ .ip = 0x0A000001U, .port = 7400U }; - const udpard_udpip_ep_t receiver_source{ .ip = 0x0A000011U, .port = 7500U }; - constexpr uint64_t topic_hash = 0x0123456789ABCDEFULL; - constexpr uint64_t tid_a = 100; - constexpr uint64_t tid_b = 101; - constexpr uint64_t tid_c = 102; - constexpr uint16_t iface_bitmap_1 = (1U << 0U); - - // Use a large reordering window to ensure retransmissions arrive within the window. - // With exponential backoff, retransmissions can take significant time. - constexpr udpard_us_t reordering_window = 1000000; // 1 second - constexpr udpard_us_t ack_timeout = 10000; // 10ms baseline - - // Sender TX/RX - udpard_tx_t sender_tx{}; - std::vector sender_frames; - TEST_ASSERT_TRUE(udpard_tx_new(&sender_tx, sender_uid, 100, 64, sender_tx_mem, &tx_vtable)); - sender_tx.user = &sender_frames; - sender_tx.ack_baseline_timeout = ack_timeout; - - udpard_rx_t sender_rx{}; - udpard_rx_new(&sender_rx, &sender_tx); - - udpard_rx_port_t sender_p2p_port{}; - TEST_ASSERT_TRUE( - udpard_rx_port_new(&sender_p2p_port, sender_uid, 16, udpard_rx_unordered, 0, sender_rx_mem, &ack_only_callbacks)); - - // Receiver TX/RX - udpard_tx_t receiver_tx{}; - std::vector receiver_frames; - TEST_ASSERT_TRUE(udpard_tx_new(&receiver_tx, receiver_uid, 200, 64, receiver_tx_mem, &tx_vtable)); - receiver_tx.user = &receiver_frames; - receiver_tx.ack_baseline_timeout = ack_timeout; - - udpard_rx_t receiver_rx{}; - ReceiverContext receiver_ctx{}; - udpard_rx_new(&receiver_rx, &receiver_tx); - receiver_rx.user = &receiver_ctx; - - udpard_rx_port_t receiver_topic_port{}; - TEST_ASSERT_TRUE(udpard_rx_port_new( - &receiver_topic_port, topic_hash, 4096, udpard_rx_ordered, reordering_window, receiver_rx_mem, &topic_callbacks)); - - // Payloads - const std::array payload_a{ 0xAA, 0xAA, 0xAA, 0xAA }; - const std::array payload_b{ 0xBB, 0xBB, 0xBB, 0xBB }; - const std::array payload_c{ 0xCC, 0xCC, 0xCC, 0xCC }; - - // Feedback states - FeedbackState fb_a{}; - FeedbackState fb_b{}; - FeedbackState fb_c{}; - - udpard_us_t now = 0; - const udpard_us_t deadline = now + 2000000; // 2 second deadline - - // Step 1: Send transfer A that is delivered successfully (establishes the session baseline). - TEST_ASSERT_TRUE(udpard_tx_push(&sender_tx, - now, - deadline, - iface_bitmap_1, - udpard_prio_nominal, - topic_hash, - tid_a, - make_scattered(payload_a.data(), payload_a.size()), - &record_feedback, - make_user_context(&fb_a))); - - // Deliver A - sender_frames.clear(); - udpard_tx_poll(&sender_tx, now, UDPARD_IFACE_BITMAP_ALL); - for (const auto& frame : sender_frames) { - TEST_ASSERT_TRUE(udpard_rx_port_push(&receiver_rx, - &receiver_topic_port, - now, - sender_source, - frame.datagram, - tx_payload_deleter, - frame.iface_index)); - } - sender_frames.clear(); - udpard_rx_poll(&receiver_rx, now); - - // Deliver A's ACK back to sender - receiver_frames.clear(); - udpard_tx_poll(&receiver_tx, now, UDPARD_IFACE_BITMAP_ALL); - for (const auto& frame : receiver_frames) { - TEST_ASSERT_TRUE(udpard_rx_port_push( - &sender_rx, &sender_p2p_port, now, receiver_source, frame.datagram, tx_payload_deleter, frame.iface_index)); - } - receiver_frames.clear(); - udpard_rx_poll(&sender_rx, now); - - // Verify A was received - TEST_ASSERT_EQUAL_size_t(1, receiver_ctx.received_transfer_ids.size()); - TEST_ASSERT_EQUAL_UINT64(tid_a, receiver_ctx.received_transfer_ids[0]); - TEST_ASSERT_EQUAL_size_t(1, fb_a.count); - TEST_ASSERT_EQUAL_UINT32(1, fb_a.acknowledgements); - - // Step 2: Push transfers B and C - TEST_ASSERT_TRUE(udpard_tx_push(&sender_tx, - now, - deadline, - iface_bitmap_1, - udpard_prio_nominal, - topic_hash, - tid_b, - make_scattered(payload_b.data(), payload_b.size()), - &record_feedback, - make_user_context(&fb_b))); - - TEST_ASSERT_TRUE(udpard_tx_push(&sender_tx, - now, - deadline, - iface_bitmap_1, - udpard_prio_nominal, - topic_hash, - tid_c, - make_scattered(payload_c.data(), payload_c.size()), - &record_feedback, - make_user_context(&fb_c))); - - // Simulation state tracking - bool b_first_tx_dropped = false; - bool c_first_tx_done = false; - bool b_first_ack_dropped = false; - bool c_first_ack_dropped = false; - size_t iterations = 0; - constexpr size_t max_iterations = 100; - - // Main simulation loop - while (iterations < max_iterations) { - iterations++; - - // Sender transmits frames - sender_frames.clear(); - udpard_tx_poll(&sender_tx, now, UDPARD_IFACE_BITMAP_ALL); - - for (const auto& frame : sender_frames) { - const uint64_t tid = extract_transfer_id(frame.datagram); - - // First transmission of B is lost - if ((tid == tid_b) && !b_first_tx_dropped) { - b_first_tx_dropped = true; - drop_frame(frame); - continue; - } - - // Track first transmission of C - if ((tid == tid_c) && !c_first_tx_done) { - c_first_tx_done = true; - } - - // Deliver frame to receiver - TEST_ASSERT_TRUE(udpard_rx_port_push(&receiver_rx, - &receiver_topic_port, - now, - sender_source, - frame.datagram, - tx_payload_deleter, - frame.iface_index)); - } - sender_frames.clear(); - udpard_rx_poll(&receiver_rx, now); - - // Receiver transmits ACKs - receiver_frames.clear(); - udpard_tx_poll(&receiver_tx, now, UDPARD_IFACE_BITMAP_ALL); - - for (const auto& frame : receiver_frames) { - const uint64_t acked_tid = extract_acked_transfer_id(frame.datagram); - - // First ACK for B is lost - if ((acked_tid == tid_b) && !b_first_ack_dropped) { - b_first_ack_dropped = true; - drop_frame(frame); - continue; - } - - // First ACK for C is lost - if ((acked_tid == tid_c) && !c_first_ack_dropped) { - c_first_ack_dropped = true; - drop_frame(frame); - continue; - } - - // Deliver ACK to sender - TEST_ASSERT_TRUE(udpard_rx_port_push(&sender_rx, - &sender_p2p_port, - now, - receiver_source, - frame.datagram, - tx_payload_deleter, - frame.iface_index)); - } - receiver_frames.clear(); - udpard_rx_poll(&sender_rx, now); - - // Check termination condition: both B and C feedbacks received - if ((fb_b.count > 0) && (fb_c.count > 0)) { - break; - } - - // Advance time to trigger retransmission (2x baseline timeout) - now += ack_timeout * 2; - } - - // Wait for reordering window to close and eject pending transfers - now += reordering_window + 10000; - udpard_rx_poll(&receiver_rx, now); - - // Verify the simulation exercised all loss paths - TEST_ASSERT_TRUE(b_first_tx_dropped); - TEST_ASSERT_TRUE(c_first_tx_done); - TEST_ASSERT_TRUE(b_first_ack_dropped); - TEST_ASSERT_TRUE(c_first_ack_dropped); - TEST_ASSERT_LESS_THAN_size_t(max_iterations, iterations); - - // Verify sender received ACKs for all transfers - TEST_ASSERT_EQUAL_size_t(1, fb_b.count); - TEST_ASSERT_EQUAL_UINT32(1, fb_b.acknowledgements); - - TEST_ASSERT_EQUAL_size_t(1, fb_c.count); - TEST_ASSERT_EQUAL_UINT32(1, fb_c.acknowledgements); - - // CRITICAL: Verify receiver got exactly 3 transfers in correct order: A, B, then C - // This validates that ORDERED mode correctly reorders out-of-order arrivals. - TEST_ASSERT_EQUAL_size_t(3, receiver_ctx.received_transfer_ids.size()); - TEST_ASSERT_EQUAL_UINT64(tid_a, receiver_ctx.received_transfer_ids[0]); - TEST_ASSERT_EQUAL_UINT64(tid_b, receiver_ctx.received_transfer_ids[1]); - TEST_ASSERT_EQUAL_UINT64(tid_c, receiver_ctx.received_transfer_ids[2]); - - // Cleanup - udpard_rx_port_free(&receiver_rx, &receiver_topic_port); - udpard_rx_port_free(&sender_rx, &sender_p2p_port); - udpard_tx_free(&sender_tx); - udpard_tx_free(&receiver_tx); - - TEST_ASSERT_EQUAL_size_t(0, sender_tx_alloc_transfer.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, sender_tx_alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, sender_rx_alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, sender_rx_alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, receiver_tx_alloc_transfer.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, receiver_tx_alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, receiver_rx_alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, receiver_rx_alloc_session.allocated_fragments); - - instrumented_allocator_reset(&sender_tx_alloc_transfer); - instrumented_allocator_reset(&sender_tx_alloc_payload); - instrumented_allocator_reset(&sender_rx_alloc_frag); - instrumented_allocator_reset(&sender_rx_alloc_session); - instrumented_allocator_reset(&receiver_tx_alloc_transfer); - instrumented_allocator_reset(&receiver_tx_alloc_payload); - instrumented_allocator_reset(&receiver_rx_alloc_frag); - instrumented_allocator_reset(&receiver_rx_alloc_session); -} - -} // namespace - -extern "C" void setUp() {} - -extern "C" void tearDown() {} - -int main() -{ - UNITY_BEGIN(); - RUN_TEST(test_reliable_ordered_with_loss_and_reordering); - return UNITY_END(); -} diff --git a/tests/src/test_e2e_responses.cpp b/tests/src/test_e2e_responses.cpp index 708a269..1ae3657 100644 --- a/tests/src/test_e2e_responses.cpp +++ b/tests/src/test_e2e_responses.cpp @@ -63,14 +63,14 @@ constexpr udpard_deleter_vtable_t tx_refcount_deleter_vt{ .free = &tx_refcount_f constexpr udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr }; // Check the ACK flag in the Cyphal/UDP header. -constexpr size_t HeaderSizeBytes = 48U; +constexpr size_t HeaderSizeBytes = 40U; bool is_ack_frame(const udpard_bytes_mut_t& datagram) { if (datagram.size < HeaderSizeBytes) { return false; } const auto* p = static_cast(datagram.data); - return (p[1] & 0x02U) != 0U; + return p[1] == 2U; } // -------------------------------------------------------------------------------------------------------------------- @@ -96,10 +96,9 @@ struct NodeBTopicContext { std::vector received_payload; std::array sender_sources{}; - uint64_t sender_uid = 0; - uint64_t received_topic = 0; - uint64_t received_tid = 0; - size_t message_count = 0; + uint64_t sender_uid = 0; + uint64_t received_tid = 0; + size_t message_count = 0; }; struct NodeAResponseContext @@ -135,8 +134,7 @@ void node_b_on_topic_message(udpard_rx_t* const rx, udpard_rx_port_t* const port for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { ctx->sender_sources[i] = transfer.remote.endpoints[i]; } - ctx->received_topic = port->topic_hash; - ctx->received_tid = transfer.transfer_id; + ctx->received_tid = transfer.transfer_id; ctx->received_payload.resize(transfer.payload_size_stored); const udpard_fragment_t* cursor = transfer.payload; @@ -145,10 +143,7 @@ void node_b_on_topic_message(udpard_rx_t* const rx, udpard_rx_port_t* const port udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); } -void on_collision(udpard_rx_t* const, udpard_rx_port_t* const, const udpard_remote_t) {} - -constexpr udpard_rx_port_vtable_t topic_callbacks{ .on_message = &node_b_on_topic_message, - .on_collision = &on_collision }; +constexpr udpard_rx_port_vtable_t topic_callbacks{ .on_message = &node_b_on_topic_message }; // Node A's P2P response reception callback - receives the response from B void node_a_on_p2p_response(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) @@ -169,8 +164,7 @@ void node_a_on_p2p_response(udpard_rx_t* const rx, udpard_rx_port_t* const port, udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); } -constexpr udpard_rx_port_vtable_t p2p_response_callbacks{ .on_message = &node_a_on_p2p_response, - .on_collision = &on_collision }; +constexpr udpard_rx_port_vtable_t p2p_response_callbacks{ .on_message = &node_a_on_p2p_response }; // ACK-only P2P port callback (for receiving ACKs, which have no user payload) void on_ack_only(udpard_rx_t*, udpard_rx_port_t* port, const udpard_rx_transfer_t tr) @@ -178,7 +172,7 @@ void on_ack_only(udpard_rx_t*, udpard_rx_port_t* port, const udpard_rx_transfer_ udpard_fragment_free_all(tr.payload, udpard_make_deleter(port->memory.fragment)); } -constexpr udpard_rx_port_vtable_t ack_only_callbacks{ .on_message = &on_ack_only, .on_collision = &on_collision }; +constexpr udpard_rx_port_vtable_t ack_only_callbacks{ .on_message = &on_ack_only }; // -------------------------------------------------------------------------------------------------------------------- // TEST: Basic topic message with P2P response flow @@ -248,7 +242,6 @@ void test_topic_with_p2p_response() udpard_udpip_ep_t{ .ip = 0x0A000013U, .port = 7502U }, }; - constexpr uint64_t topic_hash = 0x0123456789ABCDEFULL; constexpr uint64_t transfer_id = 42; // ================================================================================================================ @@ -269,8 +262,7 @@ void test_topic_with_p2p_response() // A's P2P port for receiving responses and ACKs udpard_rx_port_t a_p2p_port{}; - TEST_ASSERT_TRUE( - udpard_rx_port_new(&a_p2p_port, node_a_uid, 4096, udpard_rx_unordered, 0, a_rx_mem, &p2p_response_callbacks)); + TEST_ASSERT_TRUE(udpard_rx_port_new_p2p(&a_p2p_port, 4096, a_rx_mem, &p2p_response_callbacks)); // Node B: single TX, single RX (linked to TX for ACK processing) udpard_tx_t b_tx{}; @@ -287,13 +279,11 @@ void test_topic_with_p2p_response() // B's topic subscription port udpard_rx_port_t b_topic_port{}; - TEST_ASSERT_TRUE( - udpard_rx_port_new(&b_topic_port, topic_hash, 4096, udpard_rx_unordered, 0, b_rx_mem, &topic_callbacks)); + TEST_ASSERT_TRUE(udpard_rx_port_new(&b_topic_port, 4096, b_rx_mem, &topic_callbacks)); // B's P2P port for receiving response ACKs udpard_rx_port_t b_p2p_port{}; - TEST_ASSERT_TRUE( - udpard_rx_port_new(&b_p2p_port, node_b_uid, 16, udpard_rx_unordered, 0, b_rx_mem, &ack_only_callbacks)); + TEST_ASSERT_TRUE(udpard_rx_port_new_p2p(&b_p2p_port, 16, b_rx_mem, &ack_only_callbacks)); // ================================================================================================================ // PAYLOADS AND FEEDBACK STATES @@ -315,7 +305,6 @@ void test_topic_with_p2p_response() now + 1000000, iface_bitmap_1, udpard_prio_nominal, - topic_hash, transfer_id, topic_payload_scat, &record_feedback, @@ -535,7 +524,6 @@ void test_topic_with_p2p_response_under_loss() udpard_udpip_ep_t{}, }; - constexpr uint64_t topic_hash = 0xFEDCBA9876543210ULL; constexpr uint64_t transfer_id = 99; // ================================================================================================================ @@ -554,8 +542,7 @@ void test_topic_with_p2p_response_under_loss() a_rx.user = &a_node_ctx; udpard_rx_port_t a_p2p_port{}; - TEST_ASSERT_TRUE( - udpard_rx_port_new(&a_p2p_port, node_a_uid, 4096, udpard_rx_unordered, 0, a_rx_mem, &p2p_response_callbacks)); + TEST_ASSERT_TRUE(udpard_rx_port_new_p2p(&a_p2p_port, 4096, a_rx_mem, &p2p_response_callbacks)); udpard_tx_t b_tx{}; std::vector b_frames; @@ -570,12 +557,10 @@ void test_topic_with_p2p_response_under_loss() b_rx.user = &b_node_ctx; udpard_rx_port_t b_topic_port{}; - TEST_ASSERT_TRUE( - udpard_rx_port_new(&b_topic_port, topic_hash, 4096, udpard_rx_unordered, 0, b_rx_mem, &topic_callbacks)); + TEST_ASSERT_TRUE(udpard_rx_port_new(&b_topic_port, 4096, b_rx_mem, &topic_callbacks)); udpard_rx_port_t b_p2p_port{}; - TEST_ASSERT_TRUE( - udpard_rx_port_new(&b_p2p_port, node_b_uid, 16, udpard_rx_unordered, 0, b_rx_mem, &ack_only_callbacks)); + TEST_ASSERT_TRUE(udpard_rx_port_new_p2p(&b_p2p_port, 16, b_rx_mem, &ack_only_callbacks)); // ================================================================================================================ // PAYLOADS AND FEEDBACK STATES @@ -597,7 +582,6 @@ void test_topic_with_p2p_response_under_loss() now + 500000, iface_bitmap_1, udpard_prio_fast, - topic_hash, transfer_id, topic_payload_scat, &record_feedback, diff --git a/tests/src/test_integration_sockets.cpp b/tests/src/test_integration_sockets.cpp index d9255bb..24470cd 100644 --- a/tests/src/test_integration_sockets.cpp +++ b/tests/src/test_integration_sockets.cpp @@ -74,7 +74,6 @@ struct ReceivedTransfer { std::vector payload; uint64_t transfer_id; - uint64_t topic_hash; uint64_t remote_uid; size_t payload_size_wire; }; @@ -82,7 +81,6 @@ struct ReceivedTransfer struct TestContext { std::vector received_transfers; - size_t collisions = 0; }; // ===================================================================================================================== @@ -131,7 +129,6 @@ void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpar if (ctx != nullptr) { ReceivedTransfer rt{}; rt.transfer_id = transfer.transfer_id; - rt.topic_hash = port->topic_hash; rt.remote_uid = transfer.remote.uid; rt.payload_size_wire = transfer.payload_size_wire; @@ -145,15 +142,7 @@ void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpar udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); } -void on_collision(udpard_rx_t* const rx, udpard_rx_port_t* const, const udpard_remote_t) -{ - auto* ctx = static_cast(rx->user); - if (ctx != nullptr) { - ctx->collisions++; - } -} - -constexpr udpard_rx_port_vtable_t rx_port_vtable{ .on_message = &on_message, .on_collision = &on_collision }; +constexpr udpard_rx_port_vtable_t rx_port_vtable{ .on_message = &on_message }; // ===================================================================================================================== // Fixtures and helpers @@ -227,10 +216,10 @@ struct RxFixture }; // Create a subject port. -udpard_rx_port_t make_subject_port(const uint64_t topic_hash, const size_t extent, RxFixture& rx) +udpard_rx_port_t make_subject_port(const size_t extent, RxFixture& rx) { udpard_rx_port_t port{}; - TEST_ASSERT_TRUE(udpard_rx_port_new(&port, topic_hash, extent, udpard_rx_unordered, 0, rx.mem, &rx_port_vtable)); + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, extent, rx.mem, &rx_port_vtable)); return port; } @@ -278,7 +267,6 @@ void test_single_frame_transfer() seed_prng(); constexpr uint64_t publisher_uid = 0x1111222233334444ULL; - constexpr uint64_t topic_hash = 0x0123456789ABCDEFULL; constexpr uint64_t transfer_id = 42U; // Set up publisher. @@ -288,7 +276,7 @@ void test_single_frame_transfer() // Set up subscriber. RxFixture sub{}; sub.init(); - udpard_rx_port_t sub_port = make_subject_port(topic_hash, 4096, sub); + udpard_rx_port_t sub_port = make_subject_port(4096, sub); // Send a small payload. const std::vector payload = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08 }; @@ -302,7 +290,6 @@ void test_single_frame_transfer() deadline, 1U, // iface_bitmap: interface 0 only udpard_prio_nominal, - topic_hash, transfer_id, payload_view, nullptr, @@ -318,11 +305,9 @@ void test_single_frame_transfer() // Verify transfer. TEST_ASSERT_EQUAL_size_t(1, sub.ctx.received_transfers.size()); TEST_ASSERT_EQUAL_UINT64(transfer_id, sub.ctx.received_transfers[0].transfer_id); - TEST_ASSERT_EQUAL_UINT64(topic_hash, sub.ctx.received_transfers[0].topic_hash); TEST_ASSERT_EQUAL_UINT64(publisher_uid, sub.ctx.received_transfers[0].remote_uid); TEST_ASSERT_EQUAL_size_t(payload.size(), sub.ctx.received_transfers[0].payload.size()); TEST_ASSERT_EQUAL_MEMORY(payload.data(), sub.ctx.received_transfers[0].payload.data(), payload.size()); - TEST_ASSERT_EQUAL_size_t(0, sub.ctx.collisions); // Cleanup. udpard_rx_port_free(&sub.rx, &sub_port); @@ -336,7 +321,6 @@ void test_multi_frame_transfer() seed_prng(); constexpr uint64_t publisher_uid = 0x5555666677778888ULL; - constexpr uint64_t topic_hash = 0xFEDCBA9876543210ULL; constexpr size_t payload_size = 50000; // Large enough to require many frames // Set up publisher. @@ -346,7 +330,7 @@ void test_multi_frame_transfer() // Set up subscriber. RxFixture sub{}; sub.init(); - udpard_rx_port_t sub_port = make_subject_port(topic_hash, payload_size + 1024, sub); + udpard_rx_port_t sub_port = make_subject_port(payload_size + 1024, sub); // Generate random payload. const std::vector payload = make_payload(payload_size); @@ -360,7 +344,6 @@ void test_multi_frame_transfer() deadline, 1U, // iface_bitmap udpard_prio_nominal, - topic_hash, 100, payload_view, nullptr, @@ -377,7 +360,6 @@ void test_multi_frame_transfer() TEST_ASSERT_EQUAL_size_t(1, sub.ctx.received_transfers.size()); TEST_ASSERT_EQUAL_size_t(payload_size, sub.ctx.received_transfers[0].payload.size()); TEST_ASSERT_EQUAL_MEMORY(payload.data(), sub.ctx.received_transfers[0].payload.data(), payload_size); - TEST_ASSERT_EQUAL_size_t(0, sub.ctx.collisions); // Cleanup. udpard_rx_port_free(&sub.rx, &sub_port); @@ -391,7 +373,6 @@ void test_multi_frame_with_reordering() seed_prng(); constexpr uint64_t publisher_uid = 0xABCDEF0123456789ULL; - constexpr uint64_t topic_hash = 0x1234ABCD5678EF00ULL; constexpr size_t payload_size = 20000; NetworkSimulator sim(0.0, true, static_cast(rand())); // No loss, deterministic shuffle @@ -403,7 +384,7 @@ void test_multi_frame_with_reordering() // Set up subscriber. RxFixture sub{}; sub.init(); - udpard_rx_port_t sub_port = make_subject_port(topic_hash, payload_size + 1024, sub); + udpard_rx_port_t sub_port = make_subject_port(payload_size + 1024, sub); // Generate random payload and send. const std::vector payload = make_payload(payload_size); @@ -415,7 +396,6 @@ void test_multi_frame_with_reordering() now + 5000000, 1U, // iface_bitmap udpard_prio_nominal, - topic_hash, 50, payload_view, nullptr, @@ -432,7 +412,6 @@ void test_multi_frame_with_reordering() TEST_ASSERT_EQUAL_size_t(payload_size, sub.ctx.received_transfers[0].payload.size()); TEST_ASSERT_EQUAL_MEMORY(payload.data(), sub.ctx.received_transfers[0].payload.data(), payload_size); TEST_ASSERT_TRUE((pub.frames.size() < 2U) || sim.reordered()); - TEST_ASSERT_EQUAL_size_t(0, sub.ctx.collisions); // Cleanup. udpard_rx_port_free(&sub.rx, &sub_port); @@ -445,15 +424,14 @@ void test_multiple_publishers() { seed_prng(); - constexpr uint64_t topic_hash = 0x1234567890ABCDEFULL; - constexpr size_t num_publishers = 3; - constexpr size_t num_transfers_per_pub = 5; - constexpr size_t payload_size = 100; + constexpr size_t num_publishers = 3; + constexpr size_t num_transfers_per_pub = 5; + constexpr size_t payload_size = 100; // Set up subscriber. RxFixture sub{}; sub.init(); - udpard_rx_port_t sub_port = make_subject_port(topic_hash, 1024, sub); + udpard_rx_port_t sub_port = make_subject_port(1024, sub); // Set up publishers and send. std::array publishers{}; @@ -479,7 +457,6 @@ void test_multiple_publishers() now + 1000000, 1U, // iface_bitmap udpard_prio_nominal, - topic_hash, transfer_id, payload_view, nullptr, @@ -513,7 +490,6 @@ void test_multiple_publishers() TEST_ASSERT_EQUAL_MEMORY(expected_payloads[i][tid].data(), it->payload.data(), payload_size); } } - TEST_ASSERT_EQUAL_size_t(0, sub.ctx.collisions); // Cleanup. udpard_rx_port_free(&sub.rx, &sub_port); @@ -529,7 +505,6 @@ void test_partial_frame_loss() seed_prng(); constexpr uint64_t publisher_uid = 0xDEADBEEFCAFEBABEULL; - constexpr uint64_t topic_hash = 0xABCDEF0123456789ULL; constexpr size_t payload_size = 5000; // Multi-frame transfer NetworkSimulator sim(0.35, false, static_cast(rand())); // Ensure some loss @@ -541,7 +516,7 @@ void test_partial_frame_loss() // Set up subscriber. RxFixture sub{}; sub.init(); - udpard_rx_port_t sub_port = make_subject_port(topic_hash, payload_size + 1024, sub); + udpard_rx_port_t sub_port = make_subject_port(payload_size + 1024, sub); // Generate payload and send. const std::vector payload = make_payload(payload_size); @@ -553,7 +528,6 @@ void test_partial_frame_loss() now + 5000000, 1U, // iface_bitmap udpard_prio_nominal, - topic_hash, 50, payload_view, nullptr, @@ -569,7 +543,6 @@ void test_partial_frame_loss() // Verify incomplete transfer is dropped. TEST_ASSERT_TRUE(sim.dropped() > 0U); TEST_ASSERT_EQUAL_size_t(0, sub.ctx.received_transfers.size()); - TEST_ASSERT_EQUAL_size_t(0, sub.ctx.collisions); // Cleanup. udpard_rx_port_free(&sub.rx, &sub_port); @@ -583,7 +556,6 @@ void test_no_loss_baseline() seed_prng(); constexpr uint64_t publisher_uid = 0xAAAABBBBCCCCDDDDULL; - constexpr uint64_t topic_hash = 0x9999888877776666ULL; constexpr size_t payload_size = 10000; // Set up publisher. @@ -593,7 +565,7 @@ void test_no_loss_baseline() // Set up subscriber. RxFixture sub{}; sub.init(); - udpard_rx_port_t sub_port = make_subject_port(topic_hash, payload_size + 1024, sub); + udpard_rx_port_t sub_port = make_subject_port(payload_size + 1024, sub); // Generate payload and send. const std::vector payload = make_payload(payload_size); @@ -605,7 +577,6 @@ void test_no_loss_baseline() now + 5000000, 1U, // iface_bitmap udpard_prio_nominal, - topic_hash, 75, payload_view, nullptr, @@ -621,7 +592,6 @@ void test_no_loss_baseline() TEST_ASSERT_EQUAL_size_t(1, sub.ctx.received_transfers.size()); TEST_ASSERT_EQUAL_size_t(payload_size, sub.ctx.received_transfers[0].payload.size()); TEST_ASSERT_EQUAL_MEMORY(payload.data(), sub.ctx.received_transfers[0].payload.data(), payload_size); - TEST_ASSERT_EQUAL_size_t(0, sub.ctx.collisions); // Cleanup. udpard_rx_port_free(&sub.rx, &sub_port); @@ -635,7 +605,6 @@ void test_extent_truncation() seed_prng(); constexpr uint64_t publisher_uid = 0x1234567890ABCDEFULL; - constexpr uint64_t topic_hash = 0xFEDCBA0987654321ULL; constexpr size_t payload_size = 5000; constexpr size_t extent = 1000; // Less than payload_size @@ -646,7 +615,7 @@ void test_extent_truncation() // Set up subscriber with limited extent. RxFixture sub{}; sub.init(); - udpard_rx_port_t sub_port = make_subject_port(topic_hash, extent, sub); + udpard_rx_port_t sub_port = make_subject_port(extent, sub); // Generate payload and send. const std::vector payload = make_payload(payload_size); @@ -658,7 +627,6 @@ void test_extent_truncation() now + 5000000, 1U, // iface_bitmap udpard_prio_nominal, - topic_hash, 100, payload_view, nullptr, @@ -676,7 +644,6 @@ void test_extent_truncation() TEST_ASSERT_EQUAL_size_t(payload_size, sub.ctx.received_transfers[0].payload_size_wire); TEST_ASSERT_EQUAL_MEMORY( payload.data(), sub.ctx.received_transfers[0].payload.data(), sub.ctx.received_transfers[0].payload.size()); - TEST_ASSERT_EQUAL_size_t(0, sub.ctx.collisions); // Cleanup. udpard_rx_port_free(&sub.rx, &sub_port); diff --git a/tests/src/test_intrusive_guards.c b/tests/src/test_intrusive_guards.c index 3a37e93..d51620a 100644 --- a/tests/src/test_intrusive_guards.c +++ b/tests/src/test_intrusive_guards.c @@ -59,13 +59,6 @@ static void on_message_stub(udpard_rx_t* const rx, udpard_rx_port_t* const port, (void)transfer; } -static void on_collision_stub(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_remote_t remote) -{ - (void)rx; - (void)port; - (void)remote; -} - static void test_mem_endpoint_list_guards(void) { // mem_same covers identical and divergent resources. @@ -165,18 +158,18 @@ static void test_tx_guards(void) // Push helpers reject invalid timing and null handles. const uint16_t iface_bitmap_1 = (1U << 0U); const udpard_bytes_scattered_t empty_payload = { .bytes = { .size = 0U, .data = NULL }, .next = NULL }; - TEST_ASSERT_FALSE(udpard_tx_push( - &tx, 10, 5, iface_bitmap_1, udpard_prio_fast, 1U, 1U, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL)); - TEST_ASSERT_FALSE(udpard_tx_push( - NULL, 0, 0, iface_bitmap_1, udpard_prio_fast, 1U, 1U, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL)); + TEST_ASSERT_FALSE( + udpard_tx_push(&tx, 10, 5, iface_bitmap_1, udpard_prio_fast, 1U, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL)); + TEST_ASSERT_FALSE( + udpard_tx_push(NULL, 0, 0, iface_bitmap_1, udpard_prio_fast, 1U, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL)); TEST_ASSERT_FALSE(udpard_tx_push_p2p( NULL, 0, 0, udpard_prio_fast, (udpard_remote_t){ 0 }, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL, NULL)); // Reject invalid payload pointer and empty interface bitmap. const udpard_bytes_scattered_t bad_payload = { .bytes = { .size = 1U, .data = NULL }, .next = NULL }; TEST_ASSERT_FALSE( - udpard_tx_push(&tx, 0, 1, iface_bitmap_1, udpard_prio_fast, 1U, 1U, bad_payload, NULL, UDPARD_USER_CONTEXT_NULL)); + udpard_tx_push(&tx, 0, 1, iface_bitmap_1, udpard_prio_fast, 1U, bad_payload, NULL, UDPARD_USER_CONTEXT_NULL)); TEST_ASSERT_FALSE( - udpard_tx_push(&tx, 0, 1, 0U, udpard_prio_fast, 1U, 1U, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL)); + udpard_tx_push(&tx, 0, 1, 0U, udpard_prio_fast, 1U, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL)); const udpard_remote_t remote_bad = { .uid = 1, .endpoints = { { 0 } } }; TEST_ASSERT_FALSE( udpard_tx_push_p2p(&tx, 0, 1, udpard_prio_fast, remote_bad, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL, NULL)); @@ -214,15 +207,15 @@ static void test_rx_guards(void) static char rx_tag_a; static char rx_tag_b; const udpard_rx_mem_resources_t rx_mem = { .session = make_mem(&rx_tag_a), .fragment = make_mem(&rx_tag_b) }; - const udpard_rx_port_vtable_t rx_vtb = { .on_message = on_message_stub, .on_collision = on_collision_stub }; + const udpard_rx_port_vtable_t rx_vtb = { .on_message = on_message_stub }; udpard_rx_port_t port; - TEST_ASSERT_FALSE(udpard_rx_port_new(NULL, 0, 0, udpard_rx_ordered, 0, rx_mem, &rx_vtb)); - TEST_ASSERT_FALSE(udpard_rx_port_new(&port, 0, 0, udpard_rx_ordered, 0, rx_mem, NULL)); - const udpard_rx_port_vtable_t rx_vtb_no_msg = { .on_message = NULL, .on_collision = on_collision_stub }; - TEST_ASSERT_FALSE(udpard_rx_port_new(&port, 0, 0, udpard_rx_ordered, 0, rx_mem, &rx_vtb_no_msg)); + TEST_ASSERT_FALSE(udpard_rx_port_new(NULL, 0, rx_mem, &rx_vtb)); + TEST_ASSERT_FALSE(udpard_rx_port_new(&port, 0, rx_mem, NULL)); + const udpard_rx_port_vtable_t rx_vtb_no_msg = { .on_message = NULL }; + TEST_ASSERT_FALSE(udpard_rx_port_new(&port, 0, rx_mem, &rx_vtb_no_msg)); udpard_rx_mem_resources_t bad_rx_mem = rx_mem; bad_rx_mem.session.vtable = NULL; - TEST_ASSERT_FALSE(udpard_rx_port_new(&port, 0, 0, udpard_rx_unordered, 0, bad_rx_mem, &rx_vtb)); + TEST_ASSERT_FALSE(udpard_rx_port_new(&port, 0, bad_rx_mem, &rx_vtb)); // rx_validate_mem_resources rejects missing hooks. const udpard_mem_vtable_t vtable_no_free = { .base = { .free = NULL }, .alloc = alloc_stub }; const udpard_mem_vtable_t vtable_no_alloc = { .base = { .free = free_noop }, .alloc = NULL }; @@ -236,10 +229,7 @@ static void test_rx_guards(void) TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_fragment)); bad_fragment.fragment.vtable = &vtable_no_alloc; TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_fragment)); - // NOLINTNEXTLINE(clang-analyzer-optin.core.EnumCastOutOfRange) - TEST_ASSERT_FALSE(udpard_rx_port_new(&port, 0, 0, (udpard_rx_mode_t)99, 0, rx_mem, &rx_vtb)); - TEST_ASSERT_FALSE(udpard_rx_port_new(&port, 0, 0, udpard_rx_ordered, (udpard_us_t)-1, rx_mem, &rx_vtb)); - TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 0xAA, 8U, udpard_rx_stateless, 0, rx_mem, &rx_vtb)); + TEST_ASSERT_TRUE(udpard_rx_port_new_stateless(&port, 8U, rx_mem, &rx_vtb)); // Invalid datagram inputs are rejected without processing. udpard_rx_t rx; diff --git a/tests/src/test_intrusive_header.c b/tests/src/test_intrusive_header.c index 0aa1eb7..bbc367e 100644 --- a/tests/src/test_intrusive_header.c +++ b/tests/src/test_intrusive_header.c @@ -11,11 +11,10 @@ static void test_header_v2(void) byte_t buffer[64]; meta_t meta_in = { .priority = udpard_prio_high, - .flag_reliable = false, + .kind = frame_msg_best, .transfer_payload_size = 0xDEADBEEF, .transfer_id = 0xAABBCCDDEEFF0011ULL, .sender_uid = 0x1122334455667788ULL, - .topic_hash = 0x99AABBCCDDEEFF00ULL, }; // For a first frame (frame_payload_offset=0), frame_index must also be 0 // Compute the correct prefix_crc from the payload @@ -42,14 +41,13 @@ static void test_header_v2(void) TEST_ASSERT_EQUAL(&buffer[HEADER_SIZE_BYTES], payload_out.data); TEST_ASSERT_EQUAL_UINT8(meta_in.priority, meta_out.priority); - TEST_ASSERT_FALSE(meta_out.flag_reliable); + TEST_ASSERT_EQUAL_UINT32(meta_in.kind, meta_out.kind); TEST_ASSERT_EQUAL_UINT32(0, frame_index); // First frame has index 0 TEST_ASSERT_EQUAL_UINT32(0, frame_payload_offset); // First frame has offset 0 TEST_ASSERT_EQUAL_UINT32(payload_crc, prefix_crc); // For first frame, prefix_crc equals payload CRC TEST_ASSERT_EQUAL_UINT32(meta_in.transfer_payload_size, meta_out.transfer_payload_size); TEST_ASSERT_EQUAL_UINT64(meta_in.transfer_id, meta_out.transfer_id); TEST_ASSERT_EQUAL_UINT64(meta_in.sender_uid, meta_out.sender_uid); - TEST_ASSERT_EQUAL_UINT64(meta_in.topic_hash, meta_out.topic_hash); TEST_ASSERT_FALSE(header_deserialize((udpard_bytes_mut_t){ .size = 23, .data = buffer }, &meta_out, @@ -78,11 +76,10 @@ static void test_header_deserialize_edge_cases(void) byte_t buffer[64]; meta_t meta_in = { .priority = udpard_prio_nominal, - .flag_reliable = true, + .kind = frame_msg_reliable, .transfer_payload_size = 1000, .transfer_id = 0x1234567890ABCDEFULL, .sender_uid = 0xFEDCBA9876543210ULL, - .topic_hash = 0xAAAAAAAAAAAAAAAAULL, }; meta_t meta_out; @@ -150,7 +147,7 @@ static void test_header_deserialize_edge_cases(void) &prefix_crc, &payload_out)); - // Test valid case with reliable flag (first frame, so prefix_crc must match payload) + // Test valid case with reliable kind (first frame, so prefix_crc must match payload) const uint32_t payload_crc_v4 = crc_full(sizeof(buffer) - HEADER_SIZE_BYTES, &buffer[HEADER_SIZE_BYTES]); header_serialize(buffer, meta_in, 0, 0, payload_crc_v4); TEST_ASSERT(header_deserialize((udpard_bytes_mut_t){ .size = sizeof(buffer), .data = buffer }, @@ -159,12 +156,11 @@ static void test_header_deserialize_edge_cases(void) &frame_payload_offset, &prefix_crc, &payload_out)); - TEST_ASSERT_TRUE(meta_out.flag_reliable); + TEST_ASSERT_EQUAL_UINT32(frame_msg_reliable, meta_out.kind); TEST_ASSERT_EQUAL_UINT32(payload_crc_v4, prefix_crc); // Reject ACK frames with nonzero offset. - meta_in.flag_reliable = false; - meta_in.flag_acknowledgement = true; + meta_in.kind = frame_ack; header_serialize(buffer, meta_in, 1, 1, 0U); TEST_ASSERT_FALSE(header_deserialize((udpard_bytes_mut_t){ .size = sizeof(buffer), .data = buffer }, &meta_out, @@ -173,10 +169,15 @@ static void test_header_deserialize_edge_cases(void) &prefix_crc, &payload_out)); - // Reject ACK + reliable flag combination. - meta_in.flag_reliable = true; - meta_in.flag_acknowledgement = true; + // Reject invalid kind. + meta_in.kind = frame_msg_best; header_serialize(buffer, meta_in, 0, 0, payload_crc_v4); + buffer[1] = 0xFFU; + const uint32_t new_crc3 = crc_full(HEADER_SIZE_BYTES - CRC_SIZE_BYTES, buffer); + buffer[HEADER_SIZE_BYTES - 4] = (byte_t)(new_crc3 & 0xFFU); + buffer[HEADER_SIZE_BYTES - 3] = (byte_t)((new_crc3 >> 8U) & 0xFFU); + buffer[HEADER_SIZE_BYTES - 2] = (byte_t)((new_crc3 >> 16U) & 0xFFU); + buffer[HEADER_SIZE_BYTES - 1] = (byte_t)((new_crc3 >> 24U) & 0xFFU); TEST_ASSERT_FALSE(header_deserialize((udpard_bytes_mut_t){ .size = sizeof(buffer), .data = buffer }, &meta_out, &frame_index, diff --git a/tests/src/test_intrusive_rx.c b/tests/src/test_intrusive_rx.c index 5f9bbe3..1dec3bc 100644 --- a/tests/src/test_intrusive_rx.c +++ b/tests/src/test_intrusive_rx.c @@ -1191,10 +1191,9 @@ static void test_rx_slot_update(void) uint64_t errors_oom = 0; uint64_t errors_transfer_malformed = 0; - // Test 1: Initialize slot from idle state (slot->state != rx_slot_busy branch) + // Test 1: Initialize slot from idle state (slot->busy == false branch) { rx_slot_t slot = { 0 }; - slot.state = rx_slot_idle; rx_frame_t frame = { 0 }; frame.base = make_frame_base(mem_payload, 0, 5, "hello"); @@ -1204,10 +1203,13 @@ static void test_rx_slot_update(void) const udpard_us_t ts = 1000; - rx_slot_update(&slot, ts, mem_frag, del_payload, &frame, 5, &errors_oom, &errors_transfer_malformed); + // Single-frame transfer should complete immediately. + const bool done = + rx_slot_update(&slot, ts, mem_frag, del_payload, &frame, 5, &errors_oom, &errors_transfer_malformed); // Verify slot was initialized - TEST_ASSERT_EQUAL(rx_slot_done, slot.state); // Single-frame transfer completes immediately + TEST_ASSERT_TRUE(done); + TEST_ASSERT_FALSE(slot.busy); TEST_ASSERT_EQUAL(123, slot.transfer_id); TEST_ASSERT_EQUAL(ts, slot.ts_min); TEST_ASSERT_EQUAL(ts, slot.ts_max); @@ -1223,7 +1225,6 @@ static void test_rx_slot_update(void) // Test 2: Multi-frame transfer with timestamp updates (later/earlier branches) { rx_slot_t slot = { 0 }; - slot.state = rx_slot_idle; // First frame at offset 0 rx_frame_t frame1 = { 0 }; @@ -1233,9 +1234,12 @@ static void test_rx_slot_update(void) frame1.meta.transfer_payload_size = 10; const udpard_us_t ts1 = 2000; - rx_slot_update(&slot, ts1, mem_frag, del_payload, &frame1, 10, &errors_oom, &errors_transfer_malformed); + // First frame initializes slot but does not complete transfer. + const bool done1 = + rx_slot_update(&slot, ts1, mem_frag, del_payload, &frame1, 10, &errors_oom, &errors_transfer_malformed); - TEST_ASSERT_EQUAL(rx_slot_busy, slot.state); + TEST_ASSERT_FALSE(done1); + TEST_ASSERT_TRUE(slot.busy); TEST_ASSERT_EQUAL(ts1, slot.ts_min); TEST_ASSERT_EQUAL(ts1, slot.ts_max); TEST_ASSERT_EQUAL_size_t(3, slot.covered_prefix); @@ -1250,9 +1254,12 @@ static void test_rx_slot_update(void) frame2.meta.transfer_payload_size = 10; const udpard_us_t ts2 = 3000; // Later than ts1 - rx_slot_update(&slot, ts2, mem_frag, del_payload, &frame2, 10, &errors_oom, &errors_transfer_malformed); + // Later frame updates timestamps and CRC tracking. + const bool done2 = + rx_slot_update(&slot, ts2, mem_frag, del_payload, &frame2, 10, &errors_oom, &errors_transfer_malformed); - TEST_ASSERT_EQUAL(rx_slot_busy, slot.state); + TEST_ASSERT_FALSE(done2); + TEST_ASSERT_TRUE(slot.busy); TEST_ASSERT_EQUAL(ts1, slot.ts_min); // Unchanged (ts2 is later) TEST_ASSERT_EQUAL(ts2, slot.ts_max); // Updated to later time TEST_ASSERT_EQUAL_size_t(3, slot.covered_prefix); // Still 3 due to gap at [3-5) @@ -1267,9 +1274,12 @@ static void test_rx_slot_update(void) frame3.meta.transfer_payload_size = 10; const udpard_us_t ts3 = 1500; // Earlier than ts1 - rx_slot_update(&slot, ts3, mem_frag, del_payload, &frame3, 10, &errors_oom, &errors_transfer_malformed); + // Earlier frame updates ts_min and extends covered prefix. + const bool done3 = + rx_slot_update(&slot, ts3, mem_frag, del_payload, &frame3, 10, &errors_oom, &errors_transfer_malformed); - TEST_ASSERT_EQUAL(rx_slot_busy, slot.state); + TEST_ASSERT_FALSE(done3); + TEST_ASSERT_TRUE(slot.busy); TEST_ASSERT_EQUAL(ts3, slot.ts_min); // Updated to earlier time TEST_ASSERT_EQUAL(ts2, slot.ts_max); // Unchanged (ts3 is earlier) TEST_ASSERT_EQUAL_size_t(8, slot.covered_prefix); // Now contiguous 0-8 @@ -1284,7 +1294,6 @@ static void test_rx_slot_update(void) // Test 3: OOM handling (tree_res == rx_fragment_tree_oom branch) { rx_slot_t slot = { 0 }; - slot.state = rx_slot_idle; errors_oom = 0; // Limit allocations to trigger OOM @@ -1296,11 +1305,14 @@ static void test_rx_slot_update(void) frame.meta.transfer_id = 789; frame.meta.transfer_payload_size = 5; - rx_slot_update(&slot, 5000, mem_frag, del_payload, &frame, 5, &errors_oom, &errors_transfer_malformed); + // OOM should not complete the transfer. + const bool done = + rx_slot_update(&slot, 5000, mem_frag, del_payload, &frame, 5, &errors_oom, &errors_transfer_malformed); // Verify OOM error was counted + TEST_ASSERT_FALSE(done); TEST_ASSERT_EQUAL(1, errors_oom); - TEST_ASSERT_EQUAL(rx_slot_busy, slot.state); // Slot initialized but fragment not added + TEST_ASSERT_TRUE(slot.busy); // Slot initialized but fragment not added TEST_ASSERT_EQUAL_size_t(0, slot.covered_prefix); // No fragments accepted // Restore allocation limit @@ -1314,7 +1326,6 @@ static void test_rx_slot_update(void) // Test 4: Malformed transfer handling (CRC failure in rx_fragment_tree_finalize) { rx_slot_t slot = { 0 }; - slot.state = rx_slot_idle; errors_transfer_malformed = 0; // Single-frame transfer with incorrect CRC @@ -1324,11 +1335,14 @@ static void test_rx_slot_update(void) frame.meta.transfer_id = 999; frame.meta.transfer_payload_size = 4; - rx_slot_update(&slot, 6000, mem_frag, del_payload, &frame, 4, &errors_oom, &errors_transfer_malformed); + // CRC failure should reset the slot and report malformed. + const bool done = + rx_slot_update(&slot, 6000, mem_frag, del_payload, &frame, 4, &errors_oom, &errors_transfer_malformed); // Verify malformed error was counted and slot was reset + TEST_ASSERT_FALSE(done); TEST_ASSERT_EQUAL(1, errors_transfer_malformed); - TEST_ASSERT_EQUAL(rx_slot_idle, slot.state); // Slot reset after CRC failure + TEST_ASSERT_FALSE(slot.busy); // Slot reset after CRC failure TEST_ASSERT_EQUAL_size_t(0, slot.covered_prefix); TEST_ASSERT_NULL(slot.fragments); @@ -1340,7 +1354,6 @@ static void test_rx_slot_update(void) // Test 5: Successful completion with correct CRC (tree_res == rx_fragment_tree_done, CRC pass) { rx_slot_t slot = { 0 }; - slot.state = rx_slot_idle; errors_transfer_malformed = 0; errors_oom = 0; @@ -1356,11 +1369,14 @@ static void test_rx_slot_update(void) frame.meta.transfer_id = 1111; frame.meta.transfer_payload_size = 4; - rx_slot_update(&slot, 7000, mem_frag, del_payload, &frame, 4, &errors_oom, &errors_transfer_malformed); + // Correct CRC should complete the transfer. + const bool done = + rx_slot_update(&slot, 7000, mem_frag, del_payload, &frame, 4, &errors_oom, &errors_transfer_malformed); // Verify successful completion + TEST_ASSERT_TRUE(done); TEST_ASSERT_EQUAL(0, errors_transfer_malformed); - TEST_ASSERT_EQUAL(rx_slot_done, slot.state); // Successfully completed + TEST_ASSERT_FALSE(slot.busy); // Successfully completed TEST_ASSERT_EQUAL_size_t(4, slot.covered_prefix); TEST_ASSERT_NOT_NULL(slot.fragments); @@ -1372,7 +1388,6 @@ static void test_rx_slot_update(void) // Test 6: CRC end update only when crc_end >= slot->crc_end { rx_slot_t slot = { 0 }; - slot.state = rx_slot_idle; errors_transfer_malformed = 0; errors_oom = 0; @@ -1383,8 +1398,12 @@ static void test_rx_slot_update(void) frame1.meta.transfer_id = 2222; frame1.meta.transfer_payload_size = 20; - rx_slot_update(&slot, 8000, mem_frag, del_payload, &frame1, 20, &errors_oom, &errors_transfer_malformed); + // First frame initializes CRC tracking. + const bool done1 = + rx_slot_update(&slot, 8000, mem_frag, del_payload, &frame1, 20, &errors_oom, &errors_transfer_malformed); + TEST_ASSERT_FALSE(done1); + TEST_ASSERT_TRUE(slot.busy); TEST_ASSERT_EQUAL(10, slot.crc_end); TEST_ASSERT_EQUAL(0xAAAAAAAA, slot.crc); @@ -1395,8 +1414,12 @@ static void test_rx_slot_update(void) frame2.meta.transfer_id = 2222; frame2.meta.transfer_payload_size = 20; - rx_slot_update(&slot, 8100, mem_frag, del_payload, &frame2, 20, &errors_oom, &errors_transfer_malformed); + // Earlier CRC end should not update tracking. + const bool done2 = + rx_slot_update(&slot, 8100, mem_frag, del_payload, &frame2, 20, &errors_oom, &errors_transfer_malformed); + TEST_ASSERT_FALSE(done2); + TEST_ASSERT_TRUE(slot.busy); TEST_ASSERT_EQUAL(10, slot.crc_end); // Unchanged TEST_ASSERT_EQUAL(0xAAAAAAAA, slot.crc); // Unchanged (frame2 didn't update it) @@ -1407,8 +1430,12 @@ static void test_rx_slot_update(void) frame3.meta.transfer_id = 2222; frame3.meta.transfer_payload_size = 20; - rx_slot_update(&slot, 8200, mem_frag, del_payload, &frame3, 20, &errors_oom, &errors_transfer_malformed); + // Later CRC end should update tracking. + const bool done3 = + rx_slot_update(&slot, 8200, mem_frag, del_payload, &frame3, 20, &errors_oom, &errors_transfer_malformed); + TEST_ASSERT_FALSE(done3); + TEST_ASSERT_TRUE(slot.busy); TEST_ASSERT_EQUAL(15, slot.crc_end); // Updated TEST_ASSERT_EQUAL(0xCCCCCCCC, slot.crc); // Updated @@ -1420,7 +1447,6 @@ static void test_rx_slot_update(void) // Test 7: Inconsistent frame fields; suspicious transfer rejected. { rx_slot_t slot = { 0 }; - slot.state = rx_slot_idle; errors_transfer_malformed = 0; errors_oom = 0; @@ -1432,9 +1458,12 @@ static void test_rx_slot_update(void) frame1.meta.transfer_payload_size = 20; frame1.meta.priority = udpard_prio_high; - rx_slot_update(&slot, 9000, mem_frag, del_payload, &frame1, 20, &errors_oom, &errors_transfer_malformed); + // First frame initializes the slot. + const bool done1 = + rx_slot_update(&slot, 9000, mem_frag, del_payload, &frame1, 20, &errors_oom, &errors_transfer_malformed); - TEST_ASSERT_EQUAL(rx_slot_busy, slot.state); + TEST_ASSERT_FALSE(done1); + TEST_ASSERT_TRUE(slot.busy); TEST_ASSERT_EQUAL(20, slot.total_size); TEST_ASSERT_EQUAL(udpard_prio_high, slot.priority); TEST_ASSERT_EQUAL_size_t(5, slot.covered_prefix); @@ -1448,11 +1477,14 @@ static void test_rx_slot_update(void) frame2.meta.transfer_payload_size = 25; // DIFFERENT from frame1's 20 frame2.meta.priority = udpard_prio_high; - rx_slot_update(&slot, 9100, mem_frag, del_payload, &frame2, 25, &errors_oom, &errors_transfer_malformed); + // Inconsistent total_size should reset the slot. + const bool done2 = + rx_slot_update(&slot, 9100, mem_frag, del_payload, &frame2, 25, &errors_oom, &errors_transfer_malformed); // Verify that the malformed error was counted and slot was reset + TEST_ASSERT_FALSE(done2); TEST_ASSERT_EQUAL(1, errors_transfer_malformed); - TEST_ASSERT_EQUAL(rx_slot_idle, slot.state); // Slot reset due to inconsistent total_size + TEST_ASSERT_FALSE(slot.busy); // Slot reset due to inconsistent total_size TEST_ASSERT_EQUAL_size_t(0, slot.covered_prefix); TEST_ASSERT_NULL(slot.fragments); @@ -1467,9 +1499,12 @@ static void test_rx_slot_update(void) frame3.meta.transfer_payload_size = 30; frame3.meta.priority = udpard_prio_low; - rx_slot_update(&slot, 9200, mem_frag, del_payload, &frame3, 30, &errors_oom, &errors_transfer_malformed); + // Reinitialize after reset. + const bool done3 = + rx_slot_update(&slot, 9200, mem_frag, del_payload, &frame3, 30, &errors_oom, &errors_transfer_malformed); - TEST_ASSERT_EQUAL(rx_slot_busy, slot.state); + TEST_ASSERT_FALSE(done3); + TEST_ASSERT_TRUE(slot.busy); TEST_ASSERT_EQUAL(30, slot.total_size); TEST_ASSERT_EQUAL(udpard_prio_low, slot.priority); TEST_ASSERT_EQUAL_size_t(5, slot.covered_prefix); @@ -1483,11 +1518,14 @@ static void test_rx_slot_update(void) frame4.meta.transfer_payload_size = 30; // Same as frame3 frame4.meta.priority = udpard_prio_high; // DIFFERENT from frame3's udpard_prio_low - rx_slot_update(&slot, 9300, mem_frag, del_payload, &frame4, 30, &errors_oom, &errors_transfer_malformed); + // Inconsistent priority should reset the slot. + const bool done4 = + rx_slot_update(&slot, 9300, mem_frag, del_payload, &frame4, 30, &errors_oom, &errors_transfer_malformed); // Verify that the malformed error was counted and slot was reset + TEST_ASSERT_FALSE(done4); TEST_ASSERT_EQUAL(1, errors_transfer_malformed); - TEST_ASSERT_EQUAL(rx_slot_idle, slot.state); // Slot reset due to inconsistent priority + TEST_ASSERT_FALSE(slot.busy); // Slot reset due to inconsistent priority TEST_ASSERT_EQUAL_size_t(0, slot.covered_prefix); TEST_ASSERT_NULL(slot.fragments); @@ -1502,9 +1540,12 @@ static void test_rx_slot_update(void) frame5.meta.transfer_payload_size = 40; frame5.meta.priority = udpard_prio_nominal; - rx_slot_update(&slot, 9400, mem_frag, del_payload, &frame5, 40, &errors_oom, &errors_transfer_malformed); + // Reinitialize after reset. + const bool done5 = + rx_slot_update(&slot, 9400, mem_frag, del_payload, &frame5, 40, &errors_oom, &errors_transfer_malformed); - TEST_ASSERT_EQUAL(rx_slot_busy, slot.state); + TEST_ASSERT_FALSE(done5); + TEST_ASSERT_TRUE(slot.busy); TEST_ASSERT_EQUAL(40, slot.total_size); TEST_ASSERT_EQUAL(udpard_prio_nominal, slot.priority); TEST_ASSERT_EQUAL_size_t(5, slot.covered_prefix); @@ -1518,11 +1559,14 @@ static void test_rx_slot_update(void) frame6.meta.transfer_payload_size = 50; // DIFFERENT from frame5's 40 frame6.meta.priority = udpard_prio_fast; // DIFFERENT from frame5's udpard_prio_nominal - rx_slot_update(&slot, 9500, mem_frag, del_payload, &frame6, 50, &errors_oom, &errors_transfer_malformed); + // Inconsistent priority and total_size should reset the slot. + const bool done6 = + rx_slot_update(&slot, 9500, mem_frag, del_payload, &frame6, 50, &errors_oom, &errors_transfer_malformed); // Verify that the malformed error was counted and slot was reset + TEST_ASSERT_FALSE(done6); TEST_ASSERT_EQUAL(1, errors_transfer_malformed); - TEST_ASSERT_EQUAL(rx_slot_idle, slot.state); // Slot reset due to both inconsistencies + TEST_ASSERT_FALSE(slot.busy); // Slot reset due to both inconsistencies TEST_ASSERT_EQUAL_size_t(0, slot.covered_prefix); TEST_ASSERT_NULL(slot.fragments); @@ -1538,60 +1582,12 @@ static void test_rx_slot_update(void) // --------------------------------------------- RX SESSION --------------------------------------------- -static void test_rx_transfer_id_forward_distance(void) -{ - // Test 1: Same value (distance is 0) - TEST_ASSERT_EQUAL_UINT64(0, rx_transfer_id_forward_distance(0, 0)); - TEST_ASSERT_EQUAL_UINT64(0, rx_transfer_id_forward_distance(100, 100)); - TEST_ASSERT_EQUAL_UINT64(0, rx_transfer_id_forward_distance(UINT64_MAX, UINT64_MAX)); - - // Test 2: Simple forward distance (no wraparound) - TEST_ASSERT_EQUAL_UINT64(1, rx_transfer_id_forward_distance(0, 1)); - TEST_ASSERT_EQUAL_UINT64(10, rx_transfer_id_forward_distance(5, 15)); - TEST_ASSERT_EQUAL_UINT64(100, rx_transfer_id_forward_distance(200, 300)); - TEST_ASSERT_EQUAL_UINT64(1000, rx_transfer_id_forward_distance(1000, 2000)); - - // Test 3: Wraparound at UINT64_MAX - TEST_ASSERT_EQUAL_UINT64(1, rx_transfer_id_forward_distance(UINT64_MAX, 0)); - TEST_ASSERT_EQUAL_UINT64(2, rx_transfer_id_forward_distance(UINT64_MAX, 1)); - TEST_ASSERT_EQUAL_UINT64(10, rx_transfer_id_forward_distance(UINT64_MAX - 5, 4)); - TEST_ASSERT_EQUAL_UINT64(100, rx_transfer_id_forward_distance(UINT64_MAX - 49, 50)); - - // Test 4: Large forward distances - TEST_ASSERT_EQUAL_UINT64(UINT64_MAX, rx_transfer_id_forward_distance(0, UINT64_MAX)); - TEST_ASSERT_EQUAL_UINT64(UINT64_MAX, rx_transfer_id_forward_distance(1, 0)); - TEST_ASSERT_EQUAL_UINT64(UINT64_MAX - 1, rx_transfer_id_forward_distance(0, UINT64_MAX - 1)); - TEST_ASSERT_EQUAL_UINT64(UINT64_MAX, rx_transfer_id_forward_distance(2, 1)); - - // Test 5: Half-way point (2^63) - const uint64_t half = 1ULL << 63U; - TEST_ASSERT_EQUAL_UINT64(half, rx_transfer_id_forward_distance(0, half)); - TEST_ASSERT_EQUAL_UINT64(half, rx_transfer_id_forward_distance(100, 100 + half)); - TEST_ASSERT_EQUAL_UINT64(half, rx_transfer_id_forward_distance(UINT64_MAX, half - 1)); - - // Test 6: Backward is interpreted as large forward distance - // Going from 10 to 5 is actually going forward by UINT64_MAX - 4 - TEST_ASSERT_EQUAL_UINT64(UINT64_MAX - 4, rx_transfer_id_forward_distance(10, 5)); - TEST_ASSERT_EQUAL_UINT64(UINT64_MAX - 9, rx_transfer_id_forward_distance(100, 90)); - - // Test 7: Edge cases around 0 - TEST_ASSERT_EQUAL_UINT64(UINT64_MAX, rx_transfer_id_forward_distance(1, 0)); - TEST_ASSERT_EQUAL_UINT64(1, rx_transfer_id_forward_distance(0, 1)); - - // Test 8: Random large numbers - TEST_ASSERT_EQUAL_UINT64(0x123456789ABCDEF0ULL - 0x0FEDCBA987654321ULL, - rx_transfer_id_forward_distance(0x0FEDCBA987654321ULL, 0x123456789ABCDEF0ULL)); -} - // Captures ack transfers emitted into the TX pipelines. typedef struct { udpard_prio_t priority; uint64_t transfer_id; - uint64_t topic_hash; udpard_udpip_ep_t destination; - uint64_t acked_topic_hash; - uint64_t acked_transfer_id; } ack_tx_info_t; typedef struct @@ -1631,16 +1627,11 @@ static bool tx_capture_ack_p2p(udpard_tx_t* const tx, &frame_offset, &prefix_crc, &payload); - if (ok && (frame_index == 0U) && (frame_offset == 0U) && meta.flag_acknowledgement && - (payload.size >= ACK_SIZE_BYTES)) { - const byte_t* const pl = (const byte_t*)payload.data; + if (ok && (frame_index == 0U) && (frame_offset == 0U) && (meta.kind == frame_ack) && (payload.size == 0U)) { ack_tx_info_t* const info = &self->captured[self->captured_count++]; info->priority = meta.priority; info->transfer_id = meta.transfer_id; - info->topic_hash = meta.topic_hash; info->destination = destination; - (void)deserialize_u64(pl + 0U, &info->acked_topic_hash); - (void)deserialize_u64(pl + 8U, &info->acked_transfer_id); } udpard_tx_refcount_dec(ejection->datagram); return true; @@ -1685,11 +1676,6 @@ typedef struct uint64_t count; } message; struct - { - udpard_remote_t remote; - uint64_t count; - } collision; - struct { ack_tx_info_t last; uint64_t count; @@ -1712,15 +1698,7 @@ static void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, cons cb_result->message.count++; } -static void on_collision(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_remote_t remote) -{ - callback_result_t* const cb_result = (callback_result_t* const)rx->user; - cb_result->rx = rx; - cb_result->port = port; - cb_result->collision.remote = remote; - cb_result->collision.count++; -} -static const udpard_rx_port_vtable_t callbacks = { &on_message, &on_collision }; +static const udpard_rx_port_vtable_t callbacks = { .on_message = &on_message }; /// Checks that ack transfers are emitted into the TX queues. static void test_rx_ack_enqueued(void) @@ -1748,13 +1726,10 @@ static void test_rx_ack_enqueued(void) callback_result_t cb_result = { 0 }; rx.user = &cb_result; - const uint64_t topic_hash = 0x4E81E200CB479D4CULL; - udpard_rx_port_t port; - const udpard_rx_mode_t mode = udpard_rx_unordered; - const udpard_us_t window = 0; - const uint64_t remote_uid = 0xA1B2C3D4E5F60718ULL; - const size_t extent = 1000; - TEST_ASSERT(udpard_rx_port_new(&port, topic_hash, extent, mode, window, rx_mem, &callbacks)); + udpard_rx_port_t port; + const uint64_t remote_uid = 0xA1B2C3D4E5F60718ULL; + const size_t extent = 1000; + TEST_ASSERT(udpard_rx_port_new(&port, extent, rx_mem, &callbacks)); rx_session_factory_args_t fac_args = { .owner = &port, .sessions_by_animation = &rx.list_session_by_animation, @@ -1769,11 +1744,10 @@ static void test_rx_ack_enqueued(void) TEST_ASSERT_NOT_NULL(ses); meta_t meta = { .priority = udpard_prio_high, - .flag_reliable = true, + .kind = frame_msg_reliable, .transfer_payload_size = 5, .transfer_id = 77, - .sender_uid = remote_uid, - .topic_hash = topic_hash }; + .sender_uid = remote_uid }; udpard_us_t now = 0; const udpard_udpip_ep_t ep0 = { .ip = 0x0A000001, .port = 0x1234 }; now += 100; @@ -1785,8 +1759,7 @@ static void test_rx_ack_enqueued(void) cb_result.ack.last = tx_fix.captured[tx_fix.captured_count - 1U]; } TEST_ASSERT(cb_result.ack.count >= 1); - TEST_ASSERT_EQUAL_UINT64(topic_hash, cb_result.ack.last.acked_topic_hash); - TEST_ASSERT_EQUAL_UINT64(meta.transfer_id, cb_result.ack.last.acked_transfer_id); + TEST_ASSERT_EQUAL_UINT64(meta.transfer_id, cb_result.ack.last.transfer_id); TEST_ASSERT_EQUAL_UINT32(ep0.ip, cb_result.ack.last.destination.ip); TEST_ASSERT_EQUAL_UINT16(ep0.port, cb_result.ack.last.destination.port); @@ -1804,7 +1777,7 @@ static void test_rx_ack_enqueued(void) cb_result.ack.last = tx_fix.captured[tx_fix.captured_count - 1U]; } TEST_ASSERT(cb_result.ack.count >= 2); // acks on interfaces 0 and 1 - TEST_ASSERT_EQUAL_UINT64(meta.transfer_id, cb_result.ack.last.acked_transfer_id); + TEST_ASSERT_EQUAL_UINT64(meta.transfer_id, cb_result.ack.last.transfer_id); udpard_rx_port_free(&rx, &port); tx_fixture_free(&tx_fix); @@ -1816,150 +1789,6 @@ static void test_rx_ack_enqueued(void) instrumented_allocator_reset(&alloc_payload); } -/// Tests the ORDERED reassembly mode (strictly increasing transfer-ID sequence). -static void test_rx_session_ordered(void) -{ - instrumented_allocator_t alloc_frag = { 0 }; - instrumented_allocator_new(&alloc_frag); - const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); - - instrumented_allocator_t alloc_session = { 0 }; - instrumented_allocator_new(&alloc_session); - const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); - - instrumented_allocator_t alloc_payload = { 0 }; - instrumented_allocator_new(&alloc_payload); - const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); - const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - - const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; - - udpard_rx_t rx; - udpard_rx_new(&rx, NULL); - callback_result_t cb_result = { 0 }; - rx.user = &cb_result; - - udpard_us_t now = 0; - const uint64_t remote_uid = 0xA1B2C3D4E5F60718ULL; - udpard_rx_port_t port; - TEST_ASSERT( - udpard_rx_port_new(&port, 0x4E81E200CB479D4CULL, 1000, udpard_rx_ordered, 20 * KILO, rx_mem, &callbacks)); - rx_session_factory_args_t fac_args = { - .owner = &port, - .sessions_by_animation = &rx.list_session_by_animation, - .remote_uid = remote_uid, - .now = now, - }; - rx_session_t* const ses = (rx_session_t*)cavl2_find_or_insert(&port.index_session_by_remote_uid, - &remote_uid, - &cavl_compare_rx_session_by_remote_uid, - &fac_args, - &cavl_factory_rx_session_by_remote_uid); - TEST_ASSERT_NOT_NULL(ses); - - meta_t meta = { .priority = udpard_prio_high, - .flag_reliable = true, - .transfer_payload_size = 10, - .transfer_id = 42, - .sender_uid = remote_uid, - .topic_hash = port.topic_hash }; - now += 1000; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, - make_frame_ptr(meta, mem_payload, "0123456789", 5, 5), - del_payload, - 0); - now += 1000; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000002, .port = 0x4321 }, - make_frame_ptr(meta, mem_payload, "0123456789", 0, 5), - del_payload, - 2); - TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(udpard_prio_high, cb_result.message.history[0].priority); - TEST_ASSERT_EQUAL(42, cb_result.message.history[0].transfer_id); - TEST_ASSERT_EQUAL(remote_uid, cb_result.message.history[0].remote.uid); - TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[0], 10, "0123456789", 10)); - udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); - cb_result.message.history[0].payload = NULL; - cb_result.message.history[0].payload = NULL; - cb_result.message.history[0].payload = NULL; - - meta.flag_reliable = false; - now += 500; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000003, .port = 0x1111 }, - make_frame_ptr(meta, mem_payload, "abcdef", 0, 6), - del_payload, - 1); - TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); - - meta.flag_reliable = true; - meta.transfer_payload_size = 3; - meta.transfer_id = 44; - now += 500; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, - make_frame_ptr(meta, mem_payload, "444", 0, 3), - del_payload, - 0); - TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(1, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_payload.allocated_fragments); - - meta.transfer_id = 43; - now += 500; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, - make_frame_ptr(meta, mem_payload, "433", 0, 3), - del_payload, - 0); - udpard_rx_poll(&rx, now); - TEST_ASSERT_EQUAL(3, cb_result.message.count); - TEST_ASSERT_EQUAL(44, cb_result.message.history[0].transfer_id); - TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[0], 3, "444", 3)); - TEST_ASSERT_EQUAL(43, cb_result.message.history[1].transfer_id); - TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[1], 3, "433", 3)); - udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); - cb_result.message.history[0].payload = NULL; - udpard_fragment_free_all(cb_result.message.history[1].payload, udpard_make_deleter(mem_frag)); - TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); - - now += 25 * KILO; - meta.transfer_id = 41; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, - make_frame_ptr(meta, mem_payload, "old", 0, 3), - del_payload, - 0); - TEST_ASSERT_EQUAL(3, cb_result.message.count); - TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); - - udpard_rx_port_free(&rx, &port); - TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_session); - instrumented_allocator_reset(&alloc_payload); -} - static void test_rx_session_unordered(void) { // Memory and rx for P2P unordered session. @@ -1980,9 +1809,8 @@ static void test_rx_session_unordered(void) callback_result_t cb_result = { 0 }; rx.user = &cb_result; - const uint64_t topic_hash = 0xC3C8E4974254E1F5ULL; - udpard_rx_port_t port = { 0 }; - TEST_ASSERT(udpard_rx_port_new(&port, topic_hash, SIZE_MAX, udpard_rx_unordered, 0, rx_mem, &callbacks)); + udpard_rx_port_t port = { 0 }; + TEST_ASSERT(udpard_rx_port_new(&port, SIZE_MAX, rx_mem, &callbacks)); udpard_us_t now = 0; const uint64_t remote_uid = 0xA1B2C3D4E5F60718ULL; @@ -2001,11 +1829,10 @@ static void test_rx_session_unordered(void) // Single-frame transfer is ejected immediately. meta_t meta = { .priority = udpard_prio_high, - .flag_reliable = false, + .kind = frame_msg_best, .transfer_payload_size = 5, .transfer_id = 100, - .sender_uid = remote_uid, - .topic_hash = port.topic_hash }; + .sender_uid = remote_uid }; now += 1000; rx_session_update(ses, &rx, @@ -2066,7 +1893,7 @@ static void test_rx_session_unordered(void) meta.transfer_id = 200; meta.transfer_payload_size = 10; meta.priority = udpard_prio_fast; - meta.flag_reliable = true; + meta.kind = frame_msg_reliable; now += 500; rx_session_update(ses, &rx, @@ -2124,9 +1951,8 @@ static void test_rx_session_unordered_reject_old(void) callback_result_t cb_result = { 0 }; rx.user = &cb_result; - const uint64_t local_uid = 0xFACEB00CFACEB00CULL; - udpard_rx_port_t port = { 0 }; - TEST_ASSERT(udpard_rx_port_new(&port, local_uid, SIZE_MAX, udpard_rx_unordered, 0, rx_mem, &callbacks)); + udpard_rx_port_t port = { 0 }; + TEST_ASSERT(udpard_rx_port_new(&port, SIZE_MAX, rx_mem, &callbacks)); udpard_us_t now = 0; const uint64_t remote_uid = 0x0123456789ABCDEFULL; @@ -2144,11 +1970,10 @@ static void test_rx_session_unordered_reject_old(void) TEST_ASSERT_NOT_NULL(ses); meta_t meta = { .priority = udpard_prio_fast, - .flag_reliable = false, + .kind = frame_msg_best, .transfer_payload_size = 3, .transfer_id = 10, - .sender_uid = remote_uid, - .topic_hash = local_uid }; + .sender_uid = remote_uid }; now += 1000; rx_session_update(ses, &rx, @@ -2178,7 +2003,7 @@ static void test_rx_session_unordered_reject_old(void) meta.transfer_id = 10; meta.transfer_payload_size = 3; - meta.flag_reliable = true; + meta.kind = frame_msg_reliable; now += 1000; rx_session_update(ses, &rx, @@ -2194,8 +2019,8 @@ static void test_rx_session_unordered_reject_old(void) cb_result.ack.last = tx_fix.captured[tx_fix.captured_count - 1U]; } TEST_ASSERT_GREATER_OR_EQUAL_UINT64(1, cb_result.ack.count); - TEST_ASSERT_EQUAL_UINT64(10, cb_result.ack.last.acked_transfer_id); - TEST_ASSERT_EQUAL_UINT64(port.topic_hash, cb_result.ack.last.acked_topic_hash); + TEST_ASSERT_EQUAL_UINT64(10, cb_result.ack.last.transfer_id); + TEST_ASSERT_EQUAL_UINT64(meta.transfer_id, cb_result.ack.last.transfer_id); udpard_rx_port_free(&rx, &port); tx_fixture_free(&tx_fix); @@ -2228,7 +2053,7 @@ static void test_rx_session_unordered_duplicates(void) rx.user = &cb_result; udpard_rx_port_t port = { 0 }; - TEST_ASSERT(udpard_rx_port_new(&port, 0xFEE1DEADBEEFF00DULL, SIZE_MAX, udpard_rx_unordered, 0, rx_mem, &callbacks)); + TEST_ASSERT(udpard_rx_port_new(&port, SIZE_MAX, rx_mem, &callbacks)); udpard_us_t now = 0; const uint64_t remote_uid = 0xAABBCCDDEEFF0011ULL; @@ -2246,11 +2071,10 @@ static void test_rx_session_unordered_duplicates(void) TEST_ASSERT_NOT_NULL(ses); meta_t meta = { .priority = udpard_prio_nominal, - .flag_reliable = false, + .kind = frame_msg_best, .transfer_payload_size = 2, .transfer_id = 5, - .sender_uid = remote_uid, - .topic_hash = port.topic_hash }; + .sender_uid = remote_uid }; now += 1000; rx_session_update(ses, &rx, @@ -2282,208 +2106,6 @@ static void test_rx_session_unordered_duplicates(void) instrumented_allocator_reset(&alloc_payload); } -static void test_rx_session_ordered_reject_stale_after_jump(void) -{ - // Ordered session releases interned transfers once gaps are filled. - instrumented_allocator_t alloc_frag = { 0 }; - instrumented_allocator_new(&alloc_frag); - instrumented_allocator_t alloc_session = { 0 }; - instrumented_allocator_new(&alloc_session); - instrumented_allocator_t alloc_payload = { 0 }; - instrumented_allocator_new(&alloc_payload); - const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); - const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); - const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); - const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; - - udpard_rx_t rx; - udpard_rx_new(&rx, NULL); - callback_result_t cb_result = { 0 }; - rx.user = &cb_result; - - udpard_rx_port_t port = { 0 }; - TEST_ASSERT( - udpard_rx_port_new(&port, 0x123456789ABCDEF0ULL, 1000, udpard_rx_ordered, 20 * KILO, rx_mem, &callbacks)); - - udpard_us_t now = 0; - const uint64_t remote_uid = 0xCAFEBEEFFACEFEEDULL; - rx_session_factory_args_t fac_args = { - .owner = &port, - .sessions_by_animation = &rx.list_session_by_animation, - .remote_uid = remote_uid, - .now = now, - }; - rx_session_t* const ses = (rx_session_t*)cavl2_find_or_insert(&port.index_session_by_remote_uid, - &remote_uid, - &cavl_compare_rx_session_by_remote_uid, - &fac_args, - &cavl_factory_rx_session_by_remote_uid); - TEST_ASSERT_NOT_NULL(ses); - - meta_t meta = { .priority = udpard_prio_nominal, - .flag_reliable = false, - .transfer_payload_size = 2, - .transfer_id = 10, - .sender_uid = remote_uid, - .topic_hash = port.topic_hash }; - now += 1000; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x01010101, .port = 0x1111 }, - make_frame_ptr(meta, mem_payload, "aa", 0, 2), - del_payload, - 0); - TEST_ASSERT_EQUAL(1, cb_result.message.count); - udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); - cb_result.message.history[0].payload = NULL; - - // Intern two transfers out of order. - meta.transfer_id = 12; - now += 100; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x02020202, .port = 0x2222 }, - make_frame_ptr(meta, mem_payload, "bb", 0, 2), - del_payload, - 1); - // Depending on implementation, the jump may be dropped or interned. - TEST_ASSERT(cb_result.message.count >= 1); - meta.transfer_id = 11; - now += 100; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x03030303, .port = 0x3333 }, - make_frame_ptr(meta, mem_payload, "cc", 0, 2), - del_payload, - 0); - TEST_ASSERT_EQUAL(3, cb_result.message.count); - TEST_ASSERT_EQUAL(12, cb_result.message.history[0].transfer_id); - TEST_ASSERT_EQUAL(11, cb_result.message.history[1].transfer_id); - udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); - cb_result.message.history[0].payload = NULL; - udpard_fragment_free_all(cb_result.message.history[1].payload, udpard_make_deleter(mem_frag)); - cb_result.message.history[1].payload = NULL; - - // Very old transfer is still accepted once the head has advanced. - meta.transfer_id = 5; - now += 100; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x04040404, .port = 0x4444 }, - make_frame_ptr(meta, mem_payload, "dd", 0, 2), - del_payload, - 2); - if ((cb_result.message.count > 0) && (cb_result.message.history[0].payload != NULL)) { - udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); - cb_result.message.history[0].payload = NULL; - } - TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); - - udpard_rx_port_free(&rx, &port); - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_session); - instrumented_allocator_reset(&alloc_payload); -} - -static void test_rx_session_ordered_zero_reordering_window(void) -{ - // Zero window ordered session should only accept strictly sequential IDs. - instrumented_allocator_t alloc_frag = { 0 }; - instrumented_allocator_new(&alloc_frag); - instrumented_allocator_t alloc_session = { 0 }; - instrumented_allocator_new(&alloc_session); - instrumented_allocator_t alloc_payload = { 0 }; - instrumented_allocator_new(&alloc_payload); - const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); - const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); - const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); - const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; - - udpard_rx_t rx; - udpard_rx_new(&rx, NULL); - callback_result_t cb_result = { 0 }; - rx.user = &cb_result; - - udpard_rx_port_t port = { 0 }; - TEST_ASSERT(udpard_rx_port_new(&port, 0x0F0E0D0C0B0A0908ULL, 256, udpard_rx_ordered, 0, rx_mem, &callbacks)); - - udpard_us_t now = 0; - const uint64_t remote_uid = 0x0102030405060708ULL; - rx_session_factory_args_t fac_args = { - .owner = &port, - .sessions_by_animation = &rx.list_session_by_animation, - .remote_uid = remote_uid, - .now = now, - }; - rx_session_t* const ses = (rx_session_t*)cavl2_find_or_insert(&port.index_session_by_remote_uid, - &remote_uid, - &cavl_compare_rx_session_by_remote_uid, - &fac_args, - &cavl_factory_rx_session_by_remote_uid); - TEST_ASSERT_NOT_NULL(ses); - - meta_t meta = { .priority = udpard_prio_nominal, - .flag_reliable = false, - .transfer_payload_size = 2, - .transfer_id = 1, - .sender_uid = remote_uid, - .topic_hash = port.topic_hash }; - now += 1000; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0xAA000001, .port = 0x1111 }, - make_frame_ptr(meta, mem_payload, "x1", 0, 2), - del_payload, - 0); - TEST_ASSERT(cb_result.message.count >= 1); - udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); - cb_result.message.history[0].payload = NULL; - - // Jump is dropped with zero window. - meta.transfer_id = 3; - now += 10; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0xAA000001, .port = 0x1111 }, - make_frame_ptr(meta, mem_payload, "x3", 0, 2), - del_payload, - 1); - TEST_ASSERT(cb_result.message.count >= 1); - - // Next expected transfer is accepted. - meta.transfer_id = 2; - now += 10; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0xAA000001, .port = 0x1111 }, - make_frame_ptr(meta, mem_payload, "x2", 0, 2), - del_payload, - 0); - TEST_ASSERT(cb_result.message.count >= 1); - if (cb_result.message.history[0].payload != NULL) { - udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); - cb_result.message.history[0].payload = NULL; - } - if ((cb_result.message.count > 1) && (cb_result.message.history[1].payload != NULL)) { - udpard_fragment_free_all(cb_result.message.history[1].payload, udpard_make_deleter(mem_frag)); - cb_result.message.history[1].payload = NULL; - } - - udpard_rx_port_free(&rx, &port); - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_session); - instrumented_allocator_reset(&alloc_payload); -} - static void test_rx_port(void) { // P2P ports behave like ordinary ports for payload delivery. @@ -2504,20 +2126,18 @@ static void test_rx_port(void) callback_result_t cb_result = { 0 }; rx.user = &cb_result; - const uint64_t local_uid = 0xCAFED00DCAFED00DULL; - udpard_rx_port_t port = { 0 }; - TEST_ASSERT(udpard_rx_port_new(&port, local_uid, 64, udpard_rx_unordered, 0, rx_mem, &callbacks)); + udpard_rx_port_t port = { 0 }; + TEST_ASSERT(udpard_rx_port_new_p2p(&port, 64, rx_mem, &callbacks)); // Compose a P2P response datagram without a P2P header. const uint64_t resp_tid = 55; const uint8_t payload[3] = { 'a', 'b', 'c' }; meta_t meta = { .priority = udpard_prio_fast, - .flag_reliable = false, + .kind = frame_msg_best, .transfer_payload_size = sizeof(payload), .transfer_id = resp_tid, - .sender_uid = 0x0BADF00D0BADF00DULL, - .topic_hash = port.topic_hash }; + .sender_uid = 0x0BADF00D0BADF00DULL }; rx_frame_t* frame = make_frame_ptr(meta, mem_payload, payload, 0, sizeof(payload)); byte_t dgram[HEADER_SIZE_BYTES + sizeof(payload)]; header_serialize(dgram, meta, 0, 0, frame->base.crc); @@ -2573,15 +2193,13 @@ static void test_rx_port_timeouts(void) rx.user = &cb_result; udpard_rx_port_t port = { 0 }; - TEST_ASSERT( - udpard_rx_port_new(&port, 0xBADC0FFEE0DDF00DULL, 128, udpard_rx_ordered, 20 * KILO, rx_mem, &callbacks)); + TEST_ASSERT(udpard_rx_port_new(&port, 128, rx_mem, &callbacks)); meta_t meta = { .priority = udpard_prio_nominal, - .flag_reliable = false, + .kind = frame_msg_best, .transfer_payload_size = 4, .transfer_id = 1, - .sender_uid = 0x1111222233334444ULL, - .topic_hash = port.topic_hash }; + .sender_uid = 0x1111222233334444ULL }; rx_frame_t* frame = make_frame_ptr(meta, mem_payload, "ping", 0, 4); const byte_t payload_bytes[] = { 'p', 'i', 'n', 'g' }; byte_t dgram[HEADER_SIZE_BYTES + sizeof(payload_bytes)]; @@ -2637,14 +2255,13 @@ static void test_rx_port_oom(void) rx.user = &cb_result; udpard_rx_port_t port = { 0 }; - TEST_ASSERT(udpard_rx_port_new(&port, 0xCAFEBABECAFEBABEULL, 64, udpard_rx_unordered, 0, rx_mem, &callbacks)); + TEST_ASSERT(udpard_rx_port_new(&port, 64, rx_mem, &callbacks)); meta_t meta = { .priority = udpard_prio_nominal, - .flag_reliable = false, + .kind = frame_msg_best, .transfer_payload_size = 4, .transfer_id = 1, - .sender_uid = 0x0101010101010101ULL, - .topic_hash = port.topic_hash }; + .sender_uid = 0x0101010101010101ULL }; rx_frame_t* frame = make_frame_ptr(meta, mem_payload, "oom!", 0, 4); const byte_t payload_bytes[] = { 'o', 'o', 'm', '!' }; byte_t dgram[HEADER_SIZE_BYTES + sizeof(payload_bytes)]; @@ -2694,11 +2311,9 @@ static void test_rx_port_free_loop(void) rx.user = &cb_result; udpard_rx_port_t port_p2p = { 0 }; - TEST_ASSERT( - udpard_rx_port_new(&port_p2p, 0xCAFED00DCAFED00DULL, SIZE_MAX, udpard_rx_unordered, 0, rx_mem, &callbacks)); - udpard_rx_port_t port_extra = { 0 }; - const uint64_t topic_hash_extra = 0xDEADBEEFF00D1234ULL; - TEST_ASSERT(udpard_rx_port_new(&port_extra, topic_hash_extra, 1000, udpard_rx_ordered, 5000, rx_mem, &callbacks)); + TEST_ASSERT(udpard_rx_port_new_p2p(&port_p2p, SIZE_MAX, rx_mem, &callbacks)); + udpard_rx_port_t port_extra = { 0 }; + TEST_ASSERT(udpard_rx_port_new(&port_extra, 1000, rx_mem, &callbacks)); udpard_us_t now = 0; @@ -2706,11 +2321,10 @@ static void test_rx_port_free_loop(void) { const char* payload = "INCOMPLETE"; meta_t meta = { .priority = udpard_prio_slow, - .flag_reliable = false, + .kind = frame_msg_best, .transfer_payload_size = (uint32_t)strlen(payload), .transfer_id = 10, - .sender_uid = 0xAAAAULL, - .topic_hash = port_p2p.topic_hash }; + .sender_uid = 0xAAAAULL }; rx_frame_t* frame = make_frame_ptr(meta, mem_payload, payload, 0, 4); byte_t dgram[HEADER_SIZE_BYTES + 4]; header_serialize(dgram, meta, 0, 0, frame->base.crc); @@ -2732,11 +2346,10 @@ static void test_rx_port_free_loop(void) { const char* payload = "FRAGMENTS"; meta_t meta = { .priority = udpard_prio_fast, - .flag_reliable = false, + .kind = frame_msg_best, .transfer_payload_size = (uint32_t)strlen(payload), .transfer_id = 20, - .sender_uid = 0xBBBBULL, - .topic_hash = topic_hash_extra }; + .sender_uid = 0xBBBBULL }; rx_frame_t* frame = make_frame_ptr(meta, mem_payload, payload, 0, 3); byte_t dgram[HEADER_SIZE_BYTES + 3]; header_serialize(dgram, meta, 0, 0, frame->base.crc); @@ -2767,22 +2380,12 @@ static void test_rx_port_free_loop(void) instrumented_allocator_reset(&alloc_payload); } -static size_t g_collision_count = 0; // NOLINT(*-avoid-non-const-global-variables) - static void stub_on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) { (void)rx; udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); } -static void stub_on_collision(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_remote_t remote) -{ - (void)rx; - (void)port; - (void)remote; - g_collision_count++; -} - static udpard_udpip_ep_t make_ep(const uint32_t ip) { return (udpard_udpip_ep_t){ .ip = ip, .port = 1U }; } static void test_rx_additional_coverage(void) @@ -2808,88 +2411,31 @@ static void test_rx_additional_coverage(void) TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_mem)); // Session helpers and free paths. - udpard_rx_port_t port = { .memory = mem, - .vtable = &(udpard_rx_port_vtable_t){ .on_message = stub_on_message, - .on_collision = stub_on_collision }, - .mode = udpard_rx_ordered, - .reordering_window = 10, - .topic_hash = 1 }; - rx_session_t* ses = mem_res_alloc(mem.session, sizeof(rx_session_t)); + const udpard_rx_port_vtable_t vtb = { .on_message = stub_on_message }; + udpard_rx_port_t port = { 0 }; + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 8, mem, &vtb)); + udpard_list_t anim_list = { 0 }; + rx_session_factory_args_t fac_args = { + .owner = &port, .sessions_by_animation = &anim_list, .remote_uid = 77, .now = 0 + }; + rx_session_t* const ses = (rx_session_t*)cavl2_find_or_insert(&port.index_session_by_remote_uid, + &fac_args.remote_uid, + &cavl_compare_rx_session_by_remote_uid, + &fac_args, + &cavl_factory_rx_session_by_remote_uid); TEST_ASSERT_NOT_NULL(ses); - mem_zero(sizeof(*ses), ses); - ses->port = &port; - ses->remote.uid = 77; - ses->slots[0].state = rx_slot_done; - ses->slots[0].transfer_id = 5; - TEST_ASSERT_TRUE(rx_session_is_transfer_interned(ses, 5)); - ses->reordering_window_deadline = 5; - // Comparator smoke-test with stable key. - const rx_reordering_key_t dl_key = { .deadline = 5, .remote_uid = ses->remote.uid }; - (void)cavl_compare_rx_session_by_reordering_deadline(&dl_key, &ses->index_reordering_window); - // Comparator branches for UID and deadline ordering. + for (size_t i = 0; i < RX_TRANSFER_HISTORY_COUNT; i++) { + ses->history[i] = 1; + } + ses->history[0] = 5; + TEST_ASSERT_TRUE(rx_session_is_transfer_ejected(ses, 5)); + TEST_ASSERT_FALSE(rx_session_is_transfer_ejected(ses, 6)); TEST_ASSERT_EQUAL(-1, cavl_compare_rx_session_by_remote_uid(&(uint64_t){ 10 }, &ses->index_remote_uid)); TEST_ASSERT_EQUAL(1, cavl_compare_rx_session_by_remote_uid(&(uint64_t){ 100 }, &ses->index_remote_uid)); - rx_reordering_key_t dl_key_hi = { .deadline = 10, .remote_uid = ses->remote.uid + 1U }; - TEST_ASSERT_EQUAL(1, cavl_compare_rx_session_by_reordering_deadline(&dl_key_hi, &ses->index_reordering_window)); - rx_reordering_key_t dl_key_lo = { .deadline = 1, .remote_uid = ses->remote.uid - 1U }; - TEST_ASSERT_EQUAL(-1, cavl_compare_rx_session_by_reordering_deadline(&dl_key_lo, &ses->index_reordering_window)); - rx_reordering_key_t dl_key_uid_lo = { .deadline = 5, .remote_uid = ses->remote.uid - 1U }; - TEST_ASSERT_EQUAL(-1, - cavl_compare_rx_session_by_reordering_deadline(&dl_key_uid_lo, &ses->index_reordering_window)); - rx_reordering_key_t dl_key_uid_hi = { .deadline = 5, .remote_uid = ses->remote.uid + 1U }; - TEST_ASSERT_EQUAL(1, cavl_compare_rx_session_by_reordering_deadline(&dl_key_uid_hi, &ses->index_reordering_window)); - udpard_list_t anim_list = { 0 }; - udpard_tree_t* by_reorder = NULL; - cavl2_find_or_insert(&port.index_session_by_remote_uid, - &ses->remote.uid, - cavl_compare_rx_session_by_remote_uid, - &ses->index_remote_uid, - cavl2_trivial_factory); - ses->reordering_window_deadline = 3; - const rx_reordering_key_t reorder_key = { .deadline = ses->reordering_window_deadline, - .remote_uid = ses->remote.uid }; - const udpard_tree_t* const tree_reorder = cavl2_find_or_insert(&by_reorder, - &reorder_key, - cavl_compare_rx_session_by_reordering_deadline, - &ses->index_reordering_window, - cavl2_trivial_factory); - TEST_ASSERT_EQUAL_PTR(&ses->index_reordering_window, tree_reorder); - enlist_head(&anim_list, &ses->list_by_animation); - rx_session_free(ses, &anim_list, &by_reorder); - - // Ordered scan cleans late busy slots. - rx_session_t ses_busy; - mem_zero(sizeof(ses_busy), &ses_busy); - ses_busy.port = &port; - ses_busy.history[0] = 10; - ses_busy.slots[0].state = rx_slot_busy; - ses_busy.slots[0].transfer_id = 10; - ses_busy.slots[0].ts_min = 0; - ses_busy.slots[0].ts_max = 0; - udpard_rx_t rx = { 0 }; - rx_session_ordered_scan_slots(&ses_busy, &rx, 10, false); - - // Ordered scan resets late busy slots. - rx_session_t ses_late; - mem_zero(sizeof(ses_late), &ses_late); - ses_late.port = &port; - ses_late.history[0] = 42; - ses_late.slots[0].state = rx_slot_busy; - ses_late.slots[0].transfer_id = 42; - rx_session_ordered_scan_slots(&ses_late, &rx, 10, false); - TEST_ASSERT_EQUAL(rx_slot_idle, ses_late.slots[0].state); - - // Forced scan ejects a done slot. - rx_session_t ses_force; - mem_zero(sizeof(ses_force), &ses_force); - ses_force.port = &port; - ses_force.history[0] = 1; - ses_force.slots[0].state = rx_slot_done; - ses_force.slots[0].transfer_id = 100; - rx_session_ordered_scan_slots(&ses_force, &rx, 0, true); - TEST_ASSERT_EQUAL(rx_slot_idle, ses_force.slots[0].state); - - // Slot acquisition covers stale busy, busy eviction, and done eviction. + rx_session_free(ses, &anim_list); + + // Slot acquisition covers stale busy and eviction. + udpard_rx_t rx = { 0 }; rx_session_t ses_slots; mem_zero(sizeof(ses_slots), &ses_slots); ses_slots.port = &port; @@ -2897,59 +2443,21 @@ static void test_rx_additional_coverage(void) for (size_t i = 0; i < RX_TRANSFER_HISTORY_COUNT; i++) { ses_slots.history[i] = 1; } - ses_slots.slots[0].state = rx_slot_busy; + ses_slots.slots[0].busy = true; ses_slots.slots[0].ts_max = 0; ses_slots.slots[0].transfer_id = 1; - rx_slot_t* slot = rx_session_get_slot(&ses_slots, &rx, SESSION_LIFETIME + 1, 99); + rx_slot_t* slot = rx_session_get_slot(&ses_slots, SESSION_LIFETIME + 1, 99); TEST_ASSERT_NOT_NULL(slot); for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - ses_slots.slots[i].state = (i == 0) ? rx_slot_busy : rx_slot_done; + ses_slots.slots[i].busy = true; ses_slots.slots[i].ts_max = 10 + (udpard_us_t)i; } - slot = rx_session_get_slot(&ses_slots, &rx, 50, 2); - TEST_ASSERT_NOT_NULL(slot); - for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - ses_slots.slots[i].state = rx_slot_done; - ses_slots.slots[i].transfer_id = i + 1U; - ses_slots.slots[i].ts_min = (udpard_us_t)i; - ses_slots.slots[i].ts_max = (udpard_us_t)i; - } - port.vtable = &(udpard_rx_port_vtable_t){ .on_message = stub_on_message, .on_collision = stub_on_collision }; - slot = rx_session_get_slot(&ses_slots, &rx, 60, 3); + slot = rx_session_get_slot(&ses_slots, 50, 2); TEST_ASSERT_NOT_NULL(slot); - // Ordered update retransmits ACK for ejected transfers. - rx_session_t ses_ack; - mem_zero(sizeof(ses_ack), &ses_ack); - ses_ack.port = &port; - ses_ack.remote.uid = 55; - ses_ack.history[0] = 7; - ses_ack.initialized = true; - rx_frame_t ack_frame; - mem_zero(sizeof(ack_frame), &ack_frame); - void* ack_buf = mem_res_alloc(mem.fragment, ACK_SIZE_BYTES); - TEST_ASSERT_NOT_NULL(ack_buf); - memset(ack_buf, 0, ACK_SIZE_BYTES); - ack_frame.base.payload = (udpard_bytes_t){ .data = ack_buf, .size = ACK_SIZE_BYTES }; - ack_frame.base.origin = (udpard_bytes_mut_t){ .data = ack_buf, .size = ACK_SIZE_BYTES }; - ack_frame.base.offset = 0; - ack_frame.meta.priority = udpard_prio_nominal; - ack_frame.meta.flag_reliable = true; - ack_frame.meta.transfer_payload_size = ACK_SIZE_BYTES; - ack_frame.meta.transfer_id = 7; - ack_frame.meta.sender_uid = ses_ack.remote.uid; - ack_frame.meta.topic_hash = port.topic_hash; - rx.errors_ack_tx = 0; - rx.tx = NULL; - rx_session_update_ordered(&ses_ack, &rx, 0, &ack_frame, instrumented_allocator_make_deleter(&alloc_frag)); - TEST_ASSERT_EQUAL_UINT64(1U, rx.errors_ack_tx); - // Stateless accept success, OOM, malformed. - g_collision_count = 0; - port.vtable = &(udpard_rx_port_vtable_t){ .on_message = stub_on_message, .on_collision = stub_on_collision }; - port.extent = 8; - port.mode = udpard_rx_stateless; - port.reordering_window = 0; + udpard_rx_port_t port_stateless = { 0 }; + TEST_ASSERT_TRUE(udpard_rx_port_new_stateless(&port_stateless, 8, mem, &vtb)); rx_frame_t frame; byte_t payload[4] = { 1, 2, 3, 4 }; mem_zero(sizeof(frame), &frame); @@ -2958,19 +2466,23 @@ static void test_rx_additional_coverage(void) frame.base.payload = (udpard_bytes_t){ .data = payload_buf, .size = sizeof(payload) }; frame.base.origin = (udpard_bytes_mut_t){ .data = payload_buf, .size = sizeof(payload) }; frame.base.crc = crc_full(frame.base.payload.size, frame.base.payload.data); + frame.meta.priority = udpard_prio_nominal; frame.meta.transfer_payload_size = (uint32_t)frame.base.payload.size; frame.meta.sender_uid = 9; frame.meta.transfer_id = 11; - rx_port_accept_stateless(&rx, &port, 0, make_ep(1), &frame, instrumented_allocator_make_deleter(&alloc_frag), 0); + rx_port_accept_stateless( + &rx, &port_stateless, 0, make_ep(1), &frame, instrumented_allocator_make_deleter(&alloc_frag), 0); alloc_frag.limit_fragments = 0; frame.base.payload.data = payload; frame.base.payload.size = sizeof(payload); frame.base.origin = (udpard_bytes_mut_t){ 0 }; frame.base.crc = crc_full(frame.base.payload.size, frame.base.payload.data); - rx_port_accept_stateless(&rx, &port, 0, make_ep(1), &frame, instrumented_allocator_make_deleter(&alloc_frag), 0); + rx_port_accept_stateless( + &rx, &port_stateless, 0, make_ep(1), &frame, instrumented_allocator_make_deleter(&alloc_frag), 0); frame.base.payload.size = 0; frame.meta.transfer_payload_size = 8; - rx_port_accept_stateless(&rx, &port, 0, make_ep(1), &frame, instrumented_allocator_make_deleter(&alloc_frag), 0); + rx_port_accept_stateless( + &rx, &port_stateless, 0, make_ep(1), &frame, instrumented_allocator_make_deleter(&alloc_frag), 0); // Stateless accept rejects nonzero offsets. alloc_frag.limit_fragments = SIZE_MAX; void* payload_buf2 = mem_res_alloc(mem.fragment, sizeof(payload)); @@ -2980,40 +2492,30 @@ static void test_rx_additional_coverage(void) frame.base.origin = (udpard_bytes_mut_t){ .data = payload_buf2, .size = sizeof(payload) }; frame.base.offset = 1U; frame.meta.transfer_payload_size = (uint32_t)sizeof(payload); - rx_port_accept_stateless(&rx, &port, 0, make_ep(1), &frame, instrumented_allocator_make_deleter(&alloc_frag), 0); - frame.base.offset = 0; - udpard_rx_port_t port_stateless_new = { 0 }; - TEST_ASSERT_TRUE(udpard_rx_port_new(&port_stateless_new, 22, 8, udpard_rx_stateless, 0, mem, port.vtable)); - TEST_ASSERT_NOT_NULL(port_stateless_new.vtable_private); - udpard_rx_port_free(&rx, &port_stateless_new); - instrumented_allocator_reset(&alloc_frag); + rx_port_accept_stateless( + &rx, &port_stateless, 0, make_ep(1), &frame, instrumented_allocator_make_deleter(&alloc_frag), 0); + frame.base.offset = 0; + udpard_rx_port_free(&rx, &port_stateless); - // Port push collision and malformed header. + // ACK frames are rejected on non-P2P ports. udpard_rx_port_t port_normal = { 0 }; - TEST_ASSERT_TRUE(udpard_rx_port_new(&port_normal, 1, 8, udpard_rx_ordered, 10, mem, port.vtable)); - udpard_bytes_mut_t bad_payload = { .data = mem_res_alloc(mem.fragment, 4), .size = 4 }; + TEST_ASSERT_TRUE(udpard_rx_port_new(&port_normal, 8, mem, &vtb)); + byte_t ack_dgram[HEADER_SIZE_BYTES] = { 0 }; + meta_t ack_meta = { .priority = udpard_prio_nominal, + .kind = frame_ack, + .transfer_payload_size = 0, + .transfer_id = 1, + .sender_uid = 2 }; + header_serialize(ack_dgram, ack_meta, 0, 0, crc_full(0, NULL)); + udpard_bytes_mut_t ack_payload = { .data = mem_res_alloc(mem.fragment, sizeof(ack_dgram)), + .size = sizeof(ack_dgram) }; + memcpy(ack_payload.data, ack_dgram, sizeof(ack_dgram)); + const uint64_t malformed_before = rx.errors_frame_malformed; TEST_ASSERT(udpard_rx_port_push( - &rx, &port_normal, 0, make_ep(2), bad_payload, instrumented_allocator_make_deleter(&alloc_frag), 0)); - byte_t good_dgram[HEADER_SIZE_BYTES + 1] = { 0 }; - meta_t meta = { .priority = udpard_prio_nominal, - .flag_reliable = false, - .transfer_payload_size = 1, - .transfer_id = 1, - .sender_uid = 2, - .topic_hash = 99 }; - good_dgram[HEADER_SIZE_BYTES] = 0xAA; - header_serialize(good_dgram, meta, 0, 0, crc_full(1, &good_dgram[HEADER_SIZE_BYTES])); - udpard_bytes_mut_t good_payload = { .data = mem_res_alloc(mem.fragment, sizeof(good_dgram)), - .size = sizeof(good_dgram) }; - memcpy(good_payload.data, good_dgram, sizeof(good_dgram)); - TEST_ASSERT(udpard_rx_port_push( - &rx, &port_normal, 0, make_ep(3), good_payload, instrumented_allocator_make_deleter(&alloc_frag), 1)); - TEST_ASSERT_GREATER_THAN_UINT64(0, g_collision_count); + &rx, &port_normal, 0, make_ep(3), ack_payload, instrumented_allocator_make_deleter(&alloc_frag), 0)); + TEST_ASSERT_EQUAL_UINT64(malformed_before + 1U, rx.errors_frame_malformed); udpard_rx_port_free(&rx, &port_normal); - // Short ACK messages are ignored. - rx.errors_ack_tx = 0; - rx_accept_ack(&rx, (udpard_bytes_t){ .data = payload, .size = 1U }); - TEST_ASSERT_EQUAL_UINT64(0, rx.errors_ack_tx); + instrumented_allocator_reset(&alloc_frag); instrumented_allocator_reset(&alloc_ses); } @@ -3032,15 +2534,11 @@ int main(void) RUN_TEST(test_rx_slot_update); - RUN_TEST(test_rx_transfer_id_forward_distance); RUN_TEST(test_rx_ack_enqueued); - RUN_TEST(test_rx_session_ordered); RUN_TEST(test_rx_session_unordered); RUN_TEST(test_rx_session_unordered_reject_old); - RUN_TEST(test_rx_session_ordered_reject_stale_after_jump); RUN_TEST(test_rx_session_unordered_duplicates); - RUN_TEST(test_rx_session_ordered_zero_reordering_window); RUN_TEST(test_rx_port); RUN_TEST(test_rx_port_timeouts); diff --git a/tests/src/test_intrusive_tx.c b/tests/src/test_intrusive_tx.c index c513de2..34ad3d1 100644 --- a/tests/src/test_intrusive_tx.c +++ b/tests/src/test_intrusive_tx.c @@ -102,6 +102,18 @@ static tx_transfer_t* latest_transfer(udpard_tx_t* const tx) return LIST_MEMBER(tx->agewise.head, tx_transfer_t, agewise); } +// Looks up a transfer by transfer-ID. +static tx_transfer_t* find_transfer_by_id(udpard_tx_t* const tx, const uint64_t transfer_id) +{ + if (tx == NULL) { + return NULL; + } + const tx_key_transfer_id_t key = { .transfer_id = transfer_id, .seq_no = 0 }; + tx_transfer_t* const tr = CAVL2_TO_OWNER( + cavl2_lower_bound(tx->index_transfer_id, &key, &tx_cavl_compare_transfer_id), tx_transfer_t, index_transfer_id); + return ((tr != NULL) && (tr->transfer_id == transfer_id)) ? tr : NULL; +} + static void test_bytes_scattered_read(void) { // Skips empty fragments and spans boundaries. @@ -157,49 +169,44 @@ static void test_tx_serialize_header(void) header_buffer_t buffer; const meta_t meta = { .priority = udpard_prio_fast, - .flag_reliable = false, - .flag_acknowledgement = false, + .kind = frame_msg_best, .transfer_payload_size = 12345, .transfer_id = 0xBADC0FFEE0DDF00DULL, .sender_uid = 0x0123456789ABCDEFULL, - .topic_hash = 0xFEDCBA9876543210ULL, }; (void)header_serialize(buffer.data, meta, 12345, 0, 0); TEST_ASSERT_EQUAL(HEADER_SIZE_BYTES, sizeof(buffer.data)); // Verify version and priority in first byte TEST_ASSERT_EQUAL((HEADER_VERSION | ((unsigned)udpard_prio_fast << 5U)), buffer.data[0]); + TEST_ASSERT_EQUAL_UINT8(frame_msg_best, buffer.data[1]); } // Test case 2: Reliable flag { header_buffer_t buffer; const meta_t meta = { .priority = udpard_prio_nominal, - .flag_reliable = true, - .flag_acknowledgement = false, + .kind = frame_msg_reliable, .transfer_payload_size = 5000, .transfer_id = 0xAAAAAAAAAAAAAAAAULL, .sender_uid = 0xBBBBBBBBBBBBBBBBULL, - .topic_hash = 0xCCCCCCCCCCCCCCCCULL, }; (void)header_serialize(buffer.data, meta, 100, 200, 0); TEST_ASSERT_EQUAL((HEADER_VERSION | ((unsigned)udpard_prio_nominal << 5U)), buffer.data[0]); - TEST_ASSERT_EQUAL(HEADER_FLAG_RELIABLE, buffer.data[1]); + TEST_ASSERT_EQUAL_UINT8(frame_msg_reliable, buffer.data[1]); } // Test case 3: ACK flag { header_buffer_t buffer; const meta_t meta = { .priority = udpard_prio_nominal, - .flag_reliable = false, - .flag_acknowledgement = true, - .transfer_payload_size = 16, + .kind = frame_ack, + .transfer_payload_size = 0, .transfer_id = 0x1111111111111111ULL, .sender_uid = 0x2222222222222222ULL, - .topic_hash = 0x3333333333333333ULL, }; (void)header_serialize(buffer.data, meta, 0, 0, 0); TEST_ASSERT_EQUAL((HEADER_VERSION | ((unsigned)udpard_prio_nominal << 5U)), buffer.data[0]); - TEST_ASSERT_EQUAL(HEADER_FLAG_ACKNOWLEDGEMENT, buffer.data[1]); + TEST_ASSERT_EQUAL_UINT8(frame_ack, buffer.data[1]); } } @@ -251,31 +258,24 @@ static void test_tx_validation_and_free(void) &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); tx_transfer_t* const tr = mem_alloc(mem.transfer, sizeof(tx_transfer_t)); mem_zero(sizeof(*tr), tr); - tr->priority = udpard_prio_fast; - tr->deadline = 10; - tr->staged_until = 1; - tr->remote_topic_hash = 99; - tr->remote_transfer_id = 100; - tx_transfer_key_t key = { .topic_hash = 5, .transfer_id = 7 }; + tr->priority = udpard_prio_fast; + tr->deadline = 10; + tr->staged_until = 1; + tr->seq_no = 1; + tr->transfer_id = 7; + tr->kind = frame_msg_best; // Insert with stable ordering keys. - const tx_time_key_t staged_key = { .time = tr->staged_until, - .topic_hash = tr->topic_hash, - .transfer_id = tr->transfer_id }; - const tx_time_key_t deadline_key = { .time = tr->deadline, - .topic_hash = tr->topic_hash, - .transfer_id = tr->transfer_id }; + (void)cavl2_find_or_insert(&tx.index_staged, tr, tx_cavl_compare_staged, &tr->index_staged, cavl2_trivial_factory); (void)cavl2_find_or_insert( - &tx.index_staged, &staged_key, tx_cavl_compare_staged, &tr->index_staged, cavl2_trivial_factory); + &tx.index_deadline, tr, tx_cavl_compare_deadline, &tr->index_deadline, cavl2_trivial_factory); + const tx_key_transfer_id_t key_id = { .transfer_id = tr->transfer_id, .seq_no = tr->seq_no }; (void)cavl2_find_or_insert( - &tx.index_deadline, &deadline_key, tx_cavl_compare_deadline, &tr->index_deadline, cavl2_trivial_factory); - (void)cavl2_find_or_insert( - &tx.index_transfer, &key, tx_cavl_compare_transfer, &tr->index_transfer, cavl2_trivial_factory); - (void)cavl2_find_or_insert( - &tx.index_transfer_ack, &key, tx_cavl_compare_transfer_remote, &tr->index_transfer_ack, cavl2_trivial_factory); + &tx.index_transfer_id, &key_id, tx_cavl_compare_transfer_id, &tr->index_transfer_id, cavl2_trivial_factory); enlist_head(&tx.agewise, &tr->agewise); tx_transfer_retire(&tx, tr, true); TEST_ASSERT_NULL(tx.index_staged); - TEST_ASSERT_NULL(tx.index_transfer_ack); + TEST_ASSERT_NULL(tx.index_transfer_id); + TEST_ASSERT_NULL(tx.index_deadline); instrumented_allocator_reset(&alloc_transfer); instrumented_allocator_reset(&alloc_payload); } @@ -284,68 +284,46 @@ static void test_tx_comparators_and_feedback(void) { tx_transfer_t tr; mem_zero(sizeof(tr), &tr); - tr.staged_until = 5; - tr.deadline = 7; - tr.topic_hash = 10; - tr.transfer_id = 20; - tr.remote_topic_hash = 3; - tr.remote_transfer_id = 4; + tr.staged_until = 5; + tr.deadline = 7; + tr.transfer_id = 20; + tr.seq_no = 9; // Staged/deadline comparisons both ways. - tx_time_key_t tkey = { .time = 6, .topic_hash = tr.topic_hash, .transfer_id = tr.transfer_id }; - TEST_ASSERT_EQUAL(1, tx_cavl_compare_staged(&tkey, &tr.index_staged)); - tkey.time = 4; - TEST_ASSERT_EQUAL(-1, tx_cavl_compare_staged(&tkey, &tr.index_staged)); - tkey.time = 8; - TEST_ASSERT_EQUAL(1, tx_cavl_compare_deadline(&tkey, &tr.index_deadline)); - tkey.time = 6; - TEST_ASSERT_EQUAL(-1, tx_cavl_compare_deadline(&tkey, &tr.index_deadline)); - // Staged comparator covers topic_hash/transfer_id branches. - tkey = (tx_time_key_t){ .time = tr.staged_until, .topic_hash = tr.topic_hash - 1, .transfer_id = tr.transfer_id }; - TEST_ASSERT_EQUAL(-1, tx_cavl_compare_staged(&tkey, &tr.index_staged)); - tkey.topic_hash = tr.topic_hash + 1; - TEST_ASSERT_EQUAL(1, tx_cavl_compare_staged(&tkey, &tr.index_staged)); - tkey.topic_hash = tr.topic_hash; - tkey.transfer_id = tr.transfer_id - 1; - TEST_ASSERT_EQUAL(-1, tx_cavl_compare_staged(&tkey, &tr.index_staged)); - tkey.transfer_id = tr.transfer_id + 1; - TEST_ASSERT_EQUAL(1, tx_cavl_compare_staged(&tkey, &tr.index_staged)); - // Deadline comparator covers topic_hash/transfer_id branches. - tkey = (tx_time_key_t){ .time = tr.deadline, .topic_hash = tr.topic_hash - 1, .transfer_id = tr.transfer_id }; - TEST_ASSERT_EQUAL(-1, tx_cavl_compare_deadline(&tkey, &tr.index_deadline)); - tkey.topic_hash = tr.topic_hash + 1; - TEST_ASSERT_EQUAL(1, tx_cavl_compare_deadline(&tkey, &tr.index_deadline)); - tkey.topic_hash = tr.topic_hash; - tkey.transfer_id = tr.transfer_id - 1; - TEST_ASSERT_EQUAL(-1, tx_cavl_compare_deadline(&tkey, &tr.index_deadline)); - tkey.transfer_id = tr.transfer_id + 1; - TEST_ASSERT_EQUAL(1, tx_cavl_compare_deadline(&tkey, &tr.index_deadline)); - - // Transfer comparator covers all branches. - tx_transfer_key_t key = { .topic_hash = 5, .transfer_id = 1 }; - TEST_ASSERT_EQUAL(-1, tx_cavl_compare_transfer(&key, &tr.index_transfer)); - key.topic_hash = 15; - TEST_ASSERT_EQUAL(1, tx_cavl_compare_transfer(&key, &tr.index_transfer)); - key.topic_hash = tr.topic_hash; - key.transfer_id = 15; - TEST_ASSERT_EQUAL(-1, tx_cavl_compare_transfer(&key, &tr.index_transfer)); - key.transfer_id = 25; - TEST_ASSERT_EQUAL(1, tx_cavl_compare_transfer(&key, &tr.index_transfer)); - key.transfer_id = tr.transfer_id; - TEST_ASSERT_EQUAL(0, tx_cavl_compare_transfer(&key, &tr.index_transfer)); - - // Remote comparator mirrors the above. - tx_transfer_key_t rkey = { .topic_hash = 2, .transfer_id = 1 }; - TEST_ASSERT_EQUAL(-1, tx_cavl_compare_transfer_remote(&rkey, &tr.index_transfer_ack)); - rkey.topic_hash = 5; - TEST_ASSERT_EQUAL(1, tx_cavl_compare_transfer_remote(&rkey, &tr.index_transfer_ack)); - rkey.topic_hash = tr.remote_topic_hash; - rkey.transfer_id = 2; - TEST_ASSERT_EQUAL(-1, tx_cavl_compare_transfer_remote(&rkey, &tr.index_transfer_ack)); - rkey.transfer_id = 6; - TEST_ASSERT_EQUAL(1, tx_cavl_compare_transfer_remote(&rkey, &tr.index_transfer_ack)); - rkey.transfer_id = tr.remote_transfer_id; - TEST_ASSERT_EQUAL(0, tx_cavl_compare_transfer_remote(&rkey, &tr.index_transfer_ack)); + tx_transfer_t key = tr; + key.staged_until = 6; + TEST_ASSERT_EQUAL(1, tx_cavl_compare_staged(&key, &tr.index_staged)); + key.staged_until = 4; + TEST_ASSERT_EQUAL(-1, tx_cavl_compare_staged(&key, &tr.index_staged)); + key.deadline = 8; + TEST_ASSERT_EQUAL(1, tx_cavl_compare_deadline(&key, &tr.index_deadline)); + key.deadline = 6; + TEST_ASSERT_EQUAL(-1, tx_cavl_compare_deadline(&key, &tr.index_deadline)); + // Staged comparator covers seq_no branches. + key.staged_until = tr.staged_until; + key.seq_no = tr.seq_no - 1; + TEST_ASSERT_EQUAL(-1, tx_cavl_compare_staged(&key, &tr.index_staged)); + key.seq_no = tr.seq_no + 1; + TEST_ASSERT_EQUAL(1, tx_cavl_compare_staged(&key, &tr.index_staged)); + // Deadline comparator covers seq_no branches. + key.deadline = tr.deadline; + key.seq_no = tr.seq_no - 1; + TEST_ASSERT_EQUAL(-1, tx_cavl_compare_deadline(&key, &tr.index_deadline)); + key.seq_no = tr.seq_no + 1; + TEST_ASSERT_EQUAL(1, tx_cavl_compare_deadline(&key, &tr.index_deadline)); + + // Transfer-ID comparator covers all branches. + tx_key_transfer_id_t key_id = { .transfer_id = 10, .seq_no = tr.seq_no }; + TEST_ASSERT_EQUAL(-1, tx_cavl_compare_transfer_id(&key_id, &tr.index_transfer_id)); + key_id.transfer_id = 30; + TEST_ASSERT_EQUAL(1, tx_cavl_compare_transfer_id(&key_id, &tr.index_transfer_id)); + key_id.transfer_id = tr.transfer_id; + key_id.seq_no = tr.seq_no - 1; + TEST_ASSERT_EQUAL(-1, tx_cavl_compare_transfer_id(&key_id, &tr.index_transfer_id)); + key_id.seq_no = tr.seq_no + 1; + TEST_ASSERT_EQUAL(1, tx_cavl_compare_transfer_id(&key_id, &tr.index_transfer_id)); + key_id.seq_no = tr.seq_no; + TEST_ASSERT_EQUAL(0, tx_cavl_compare_transfer_id(&key_id, &tr.index_transfer_id)); } static void test_tx_spool_and_queue_errors(void) @@ -360,12 +338,10 @@ static void test_tx_spool_and_queue_errors(void) const udpard_bytes_scattered_t payload = make_scattered(buffer, sizeof(buffer)); const meta_t meta = { .priority = udpard_prio_fast, - .flag_reliable = false, - .flag_acknowledgement = false, + .kind = frame_msg_best, .transfer_payload_size = (uint32_t)payload.bytes.size, .transfer_id = 1, .sender_uid = 1, - .topic_hash = 1, }; TEST_ASSERT_NULL(tx_spool(&tx, tx.memory.payload[0], 32, meta, payload)); TEST_ASSERT_EQUAL_size_t(0, tx.enqueued_frames_count); @@ -388,8 +364,8 @@ static void test_tx_spool_and_queue_errors(void) byte_t big_buf[2000] = { 0 }; const udpard_bytes_scattered_t big_payload = make_scattered(big_buf, sizeof(big_buf)); const uint16_t iface_bitmap_01 = (1U << 0U); - TEST_ASSERT_FALSE(udpard_tx_push( - &tx, 0, 1000, iface_bitmap_01, udpard_prio_fast, 11, 1, big_payload, NULL, UDPARD_USER_CONTEXT_NULL)); + TEST_ASSERT_FALSE( + udpard_tx_push(&tx, 0, 1000, iface_bitmap_01, udpard_prio_fast, 11, big_payload, NULL, UDPARD_USER_CONTEXT_NULL)); TEST_ASSERT_EQUAL_size_t(1, tx.errors_capacity); // Immediate rejection when the request exceeds limits. @@ -411,20 +387,17 @@ static void test_tx_spool_and_queue_errors(void) mem_zero(sizeof(victim), &victim); victim.priority = udpard_prio_fast; victim.deadline = 1; - victim.topic_hash = 7; victim.transfer_id = 9; + victim.seq_no = 1; + victim.kind = frame_msg_best; // Insert into deadline index with stable key. - const tx_time_key_t deadline_key = { .time = victim.deadline, - .topic_hash = victim.topic_hash, - .transfer_id = victim.transfer_id }; - (void)cavl2_find_or_insert( - &tx_sac.index_deadline, &deadline_key, tx_cavl_compare_deadline, &victim.index_deadline, cavl2_trivial_factory); (void)cavl2_find_or_insert( - &tx_sac.index_transfer, - &(tx_transfer_key_t){ .topic_hash = victim.topic_hash, .transfer_id = victim.transfer_id }, - tx_cavl_compare_transfer, - &victim.index_transfer, - cavl2_trivial_factory); + &tx_sac.index_deadline, &victim, tx_cavl_compare_deadline, &victim.index_deadline, cavl2_trivial_factory); + (void)cavl2_find_or_insert(&tx_sac.index_transfer_id, + &(tx_key_transfer_id_t){ .transfer_id = victim.transfer_id, .seq_no = victim.seq_no }, + tx_cavl_compare_transfer_id, + &victim.index_transfer_id, + cavl2_trivial_factory); enlist_head(&tx_sac.agewise, &victim.agewise); TEST_ASSERT_FALSE(tx_ensure_queue_space(&tx_sac, 1)); TEST_ASSERT_EQUAL_size_t(1, tx_sac.errors_sacrifice); @@ -440,7 +413,7 @@ static void test_tx_spool_and_queue_errors(void) mem, &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); TEST_ASSERT_FALSE(udpard_tx_push( - &tx, 0, 1000, iface_bitmap_01, udpard_prio_fast, 12, 2, make_scattered(NULL, 0), NULL, UDPARD_USER_CONTEXT_NULL)); + &tx, 0, 1000, iface_bitmap_01, udpard_prio_fast, 12, make_scattered(NULL, 0), NULL, UDPARD_USER_CONTEXT_NULL)); TEST_ASSERT_EQUAL_size_t(1, tx.errors_oom); // Spool OOM inside tx_push. @@ -452,8 +425,8 @@ static void test_tx_spool_and_queue_errors(void) 4U, mem, &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); - TEST_ASSERT_FALSE(udpard_tx_push( - &tx, 0, 1000, iface_bitmap_01, udpard_prio_fast, 13, 3, big_payload, NULL, UDPARD_USER_CONTEXT_NULL)); + TEST_ASSERT_FALSE( + udpard_tx_push(&tx, 0, 1000, iface_bitmap_01, udpard_prio_fast, 13, big_payload, NULL, UDPARD_USER_CONTEXT_NULL)); TEST_ASSERT_EQUAL_size_t(1, tx.errors_oom); // Reliable transfer gets staged. @@ -473,7 +446,6 @@ static void test_tx_spool_and_queue_errors(void) iface_bitmap_01, udpard_prio_nominal, 14, - 4, make_scattered(NULL, 0), record_feedback, make_user_context(&fstate))); @@ -507,7 +479,6 @@ static void test_tx_ack_and_scheduler(void) 1000, iface_bitmap_01, udpard_prio_fast, - 21, 42, make_scattered(NULL, 0), record_feedback, @@ -531,19 +502,11 @@ static void test_tx_ack_and_scheduler(void) 8U, mem, &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); - TEST_ASSERT_TRUE(udpard_tx_push(&tx_be, - 0, - 1000, - iface_bitmap_01, - udpard_prio_fast, - 22, - 43, - make_scattered(NULL, 0), - NULL, - UDPARD_USER_CONTEXT_NULL)); + TEST_ASSERT_TRUE(udpard_tx_push( + &tx_be, 0, 1000, iface_bitmap_01, udpard_prio_fast, 43, make_scattered(NULL, 0), NULL, UDPARD_USER_CONTEXT_NULL)); udpard_rx_t rx_be = { .tx = &tx_be }; tx_receive_ack(&rx_be, 22, 43); - TEST_ASSERT_NOT_NULL(tx_transfer_find(&tx_be, 22, 43)); + TEST_ASSERT_NOT_NULL(find_transfer_by_id(&tx_be, 43)); udpard_tx_free(&tx_be); // Ack suppressed when coverage not improved. @@ -555,22 +518,30 @@ static void test_tx_ack_and_scheduler(void) 4U, mem, &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); - tx_transfer_t prior; - mem_zero(sizeof(prior), &prior); - prior.p2p_destination[0] = make_ep(3); - prior.iface_bitmap = 1U; // matches p2p_destination[0] being valid - prior.remote_topic_hash = 7; - prior.remote_transfer_id = 8; - cavl2_find_or_insert(&tx2.index_transfer_ack, - &(tx_transfer_key_t){ .topic_hash = 7, .transfer_id = 8 }, - tx_cavl_compare_transfer_remote, - &prior.index_transfer_ack, + tx_transfer_t* prior = mem_alloc(mem.transfer, sizeof(tx_transfer_t)); + mem_zero(sizeof(*prior), prior); + prior->kind = frame_ack; + prior->is_p2p = true; + prior->transfer_id = 8; + prior->seq_no = 1; + prior->deadline = 100; + prior->priority = udpard_prio_fast; + prior->p2p_remote.uid = 9; + prior->p2p_remote.endpoints[0] = make_ep(3); + cavl2_find_or_insert( + &tx2.index_deadline, prior, tx_cavl_compare_deadline, &prior->index_deadline, cavl2_trivial_factory); + cavl2_find_or_insert(&tx2.index_transfer_id, + &(tx_key_transfer_id_t){ .transfer_id = prior->transfer_id, .seq_no = prior->seq_no }, + tx_cavl_compare_transfer_id, + &prior->index_transfer_id, cavl2_trivial_factory); + enlist_head(&tx2.agewise, &prior->agewise); rx.errors_ack_tx = 0; rx.tx = &tx2; - tx_send_ack(&rx, 0, udpard_prio_fast, 7, 8, (udpard_remote_t){ .uid = 9, .endpoints = { make_ep(3) } }); + tx_send_ack(&rx, 0, udpard_prio_fast, 8, (udpard_remote_t){ .uid = 9, .endpoints = { make_ep(3) } }); TEST_ASSERT_EQUAL_UINT64(0, rx.errors_ack_tx); TEST_ASSERT_EQUAL_UINT32(0U, udpard_tx_pending_ifaces(&tx2)); + tx_transfer_retire(&tx2, prior, false); udpard_tx_free(&tx2); // Ack replaced with broader coverage. @@ -583,9 +554,8 @@ static void test_tx_ack_and_scheduler(void) mem, &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); rx.tx = &tx3; - tx_send_ack(&rx, 0, udpard_prio_fast, 9, 9, (udpard_remote_t){ .uid = 11, .endpoints = { make_ep(4) } }); - tx_send_ack( - &rx, 0, udpard_prio_fast, 9, 9, (udpard_remote_t){ .uid = 11, .endpoints = { make_ep(4), make_ep(5) } }); + tx_send_ack(&rx, 0, udpard_prio_fast, 9, (udpard_remote_t){ .uid = 11, .endpoints = { make_ep(4) } }); + tx_send_ack(&rx, 0, udpard_prio_fast, 9, (udpard_remote_t){ .uid = 11, .endpoints = { make_ep(4), make_ep(5) } }); TEST_ASSERT_NOT_EQUAL(0U, udpard_tx_pending_ifaces(&tx3)); udpard_tx_free(&tx3); @@ -604,13 +574,13 @@ static void test_tx_ack_and_scheduler(void) &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); rx.errors_ack_tx = 0; rx.tx = &tx6; - tx_send_ack(&rx, 0, udpard_prio_fast, 2, 2, (udpard_remote_t){ .uid = 1, .endpoints = { make_ep(6) } }); + tx_send_ack(&rx, 0, udpard_prio_fast, 2, (udpard_remote_t){ .uid = 1, .endpoints = { make_ep(6) } }); TEST_ASSERT_GREATER_THAN_UINT64(0, rx.errors_ack_tx); udpard_tx_free(&tx6); // Ack push failure increments error. udpard_rx_t rx_fail = { .tx = NULL }; - tx_send_ack(&rx_fail, 0, udpard_prio_fast, 1, 1, (udpard_remote_t){ 0 }); + tx_send_ack(&rx_fail, 0, udpard_prio_fast, 1, (udpard_remote_t){ 0 }); TEST_ASSERT_GREATER_THAN_UINT64(0, rx_fail.errors_ack_tx); // Expired transfer purge with feedback. @@ -627,21 +597,18 @@ static void test_tx_ack_and_scheduler(void) mem_zero(sizeof(*exp), exp); exp->deadline = 1; exp->priority = udpard_prio_slow; - exp->topic_hash = 55; exp->transfer_id = 66; + exp->seq_no = 1; + exp->kind = frame_msg_reliable; exp->user = make_user_context(&fstate); - exp->reliable = true; exp->feedback = record_feedback; // Insert into deadline index with stable key. - const tx_time_key_t tx4_deadline_key = { .time = exp->deadline, - .topic_hash = exp->topic_hash, - .transfer_id = exp->transfer_id }; (void)cavl2_find_or_insert( - &tx4.index_deadline, &tx4_deadline_key, tx_cavl_compare_deadline, &exp->index_deadline, cavl2_trivial_factory); - (void)cavl2_find_or_insert(&tx4.index_transfer, - &(tx_transfer_key_t){ .topic_hash = 55, .transfer_id = 66 }, - tx_cavl_compare_transfer, - &exp->index_transfer, + &tx4.index_deadline, exp, tx_cavl_compare_deadline, &exp->index_deadline, cavl2_trivial_factory); + (void)cavl2_find_or_insert(&tx4.index_transfer_id, + &(tx_key_transfer_id_t){ .transfer_id = exp->transfer_id, .seq_no = exp->seq_no }, + tx_cavl_compare_transfer_id, + &exp->index_transfer_id, cavl2_trivial_factory); tx_purge_expired_transfers(&tx4, 2); TEST_ASSERT_GREATER_THAN_UINT64(0, tx4.errors_expiration); @@ -658,19 +625,17 @@ static void test_tx_ack_and_scheduler(void) &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); tx_transfer_t staged; mem_zero(sizeof(staged), &staged); - staged.staged_until = 0; - staged.deadline = 100; - staged.priority = udpard_prio_fast; - staged.iface_bitmap = (1U << 0U); - staged.p2p_destination[0] = make_ep(7); - tx_frame_t dummy_frame = { 0 }; + staged.staged_until = 0; + staged.deadline = 100; + staged.priority = udpard_prio_fast; + staged.seq_no = 1; + staged.transfer_id = 7; + staged.kind = frame_msg_reliable; + tx_frame_t dummy_frame = { 0 }; staged.head[0] = staged.cursor[0] = &dummy_frame; // Insert into staged index with stable key. - const tx_time_key_t tx5_staged_key = { .time = staged.staged_until, - .topic_hash = staged.topic_hash, - .transfer_id = staged.transfer_id }; cavl2_find_or_insert( - &tx5.index_staged, &tx5_staged_key, tx_cavl_compare_staged, &staged.index_staged, cavl2_trivial_factory); + &tx5.index_staged, &staged, tx_cavl_compare_staged, &staged.index_staged, cavl2_trivial_factory); tx5.ack_baseline_timeout = 1; tx_promote_staged_transfers(&tx5, 1); TEST_ASSERT_NOT_NULL(tx5.queue[0][staged.priority].head); @@ -706,6 +671,7 @@ static void test_tx_stage_if(void) tr.priority = udpard_prio_nominal; tr.deadline = 1000; tr.staged_until = 100; + tr.kind = frame_msg_reliable; udpard_us_t expected = tr.staged_until; @@ -755,7 +721,6 @@ static void test_tx_stage_if_via_tx_push(void) iface_bitmap_12, udpard_prio_nominal, 77, - 1, make_scattered(NULL, 0), record_feedback, make_user_context(&fb))); @@ -801,7 +766,6 @@ static void test_tx_stage_if_short_deadline(void) iface_bitmap_1, udpard_prio_nominal, 78, - 1, make_scattered(NULL, 0), record_feedback, make_user_context(&fb))); @@ -842,165 +806,23 @@ static void test_tx_cancel(void) iface_bitmap_1, udpard_prio_fast, 200, - 1, make_scattered(NULL, 0), record_feedback, make_user_context(&fstate))); - TEST_ASSERT_NOT_NULL(tx_transfer_find(&tx, 200, 1)); - TEST_ASSERT_TRUE(udpard_tx_cancel(&tx, 200, 1)); - TEST_ASSERT_NULL(tx_transfer_find(&tx, 200, 1)); + TEST_ASSERT_NOT_NULL(find_transfer_by_id(&tx, 200)); + TEST_ASSERT_TRUE(udpard_tx_cancel(&tx, 200, true)); + TEST_ASSERT_NULL(find_transfer_by_id(&tx, 200)); TEST_ASSERT_EQUAL_size_t(1, fstate.count); TEST_ASSERT_EQUAL_UINT32(0, fstate.last.acknowledgements); TEST_ASSERT_EQUAL_size_t(0, tx.enqueued_frames_count); - TEST_ASSERT_FALSE(udpard_tx_cancel(&tx, 200, 1)); + TEST_ASSERT_FALSE(udpard_tx_cancel(&tx, 200, true)); // Best-effort transfer cancels quietly. - TEST_ASSERT_GREATER_THAN_UINT32(0, - udpard_tx_push(&tx, - 0, - 100, - iface_bitmap_1, - udpard_prio_fast, - 201, - 2, - make_scattered(NULL, 0), - NULL, - UDPARD_USER_CONTEXT_NULL)); - TEST_ASSERT_TRUE(udpard_tx_cancel(&tx, 201, 2)); - TEST_ASSERT_EQUAL_size_t(0, tx.enqueued_frames_count); - - udpard_tx_free(&tx); - instrumented_allocator_reset(&alloc); -} - -// Cancels all transfers matching a topic hash. -static void test_tx_cancel_all(void) -{ - // NULL self returns zero. - TEST_ASSERT_EQUAL_size_t(0, udpard_tx_cancel_all(NULL, 0)); - - instrumented_allocator_t alloc = { 0 }; - instrumented_allocator_new(&alloc); - udpard_tx_mem_resources_t mem = { .transfer = instrumented_allocator_make_resource(&alloc) }; - for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - mem.payload[i] = instrumented_allocator_make_resource(&alloc); - } - - udpard_tx_t tx = { 0 }; - feedback_state_t fstate = { 0 }; - eject_state_t eject = { .count = 0, .allow = false }; // Block ejection to retain frames. - const uint16_t iface_bitmap_1 = (1U << 0U); - udpard_tx_vtable_t vt = { .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag }; - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 40U, 1U, 16U, mem, &vt)); - tx.user = &eject; - - // Cancel with no matching transfers returns zero. - TEST_ASSERT_EQUAL_size_t(0, udpard_tx_cancel_all(&tx, 999)); - - // Push multiple transfers with different topic hashes. - // Topic 100: transfers 1, 2, 3 (reliable) - TEST_ASSERT_TRUE(udpard_tx_push(&tx, - 0, - 1000, - iface_bitmap_1, - udpard_prio_fast, - 100, - 1, - make_scattered(NULL, 0), - record_feedback, - make_user_context(&fstate))); - TEST_ASSERT_TRUE(udpard_tx_push(&tx, - 0, - 1000, - iface_bitmap_1, - udpard_prio_fast, - 100, - 2, - make_scattered(NULL, 0), - record_feedback, - make_user_context(&fstate))); - TEST_ASSERT_TRUE(udpard_tx_push(&tx, - 0, - 1000, - iface_bitmap_1, - udpard_prio_fast, - 100, - 3, - make_scattered(NULL, 0), - record_feedback, - make_user_context(&fstate))); - // Topic 200: transfers 1, 2 (best-effort) - TEST_ASSERT_TRUE(udpard_tx_push(&tx, - 0, - 1000, - iface_bitmap_1, - udpard_prio_nominal, - 200, - 1, - make_scattered(NULL, 0), - NULL, - UDPARD_USER_CONTEXT_NULL)); - TEST_ASSERT_TRUE(udpard_tx_push(&tx, - 0, - 1000, - iface_bitmap_1, - udpard_prio_nominal, - 200, - 2, - make_scattered(NULL, 0), - NULL, - UDPARD_USER_CONTEXT_NULL)); - // Topic 300: transfer 1 (reliable) - TEST_ASSERT_TRUE(udpard_tx_push(&tx, - 0, - 1000, - iface_bitmap_1, - udpard_prio_low, - 300, - 1, - make_scattered(NULL, 0), - record_feedback, - make_user_context(&fstate))); - - TEST_ASSERT_EQUAL_size_t(6, tx.enqueued_frames_count); - TEST_ASSERT_NOT_NULL(tx_transfer_find(&tx, 100, 1)); - TEST_ASSERT_NOT_NULL(tx_transfer_find(&tx, 100, 2)); - TEST_ASSERT_NOT_NULL(tx_transfer_find(&tx, 100, 3)); - TEST_ASSERT_NOT_NULL(tx_transfer_find(&tx, 200, 1)); - TEST_ASSERT_NOT_NULL(tx_transfer_find(&tx, 200, 2)); - TEST_ASSERT_NOT_NULL(tx_transfer_find(&tx, 300, 1)); - - // Cancel all topic 100 transfers; feedback invoked for each reliable transfer. - fstate.count = 0; - TEST_ASSERT_EQUAL_size_t(3, udpard_tx_cancel_all(&tx, 100)); - TEST_ASSERT_EQUAL_size_t(3, fstate.count); - TEST_ASSERT_EQUAL_UINT32(0, fstate.last.acknowledgements); - TEST_ASSERT_NULL(tx_transfer_find(&tx, 100, 1)); - TEST_ASSERT_NULL(tx_transfer_find(&tx, 100, 2)); - TEST_ASSERT_NULL(tx_transfer_find(&tx, 100, 3)); - // Other topics remain. - TEST_ASSERT_NOT_NULL(tx_transfer_find(&tx, 200, 1)); - TEST_ASSERT_NOT_NULL(tx_transfer_find(&tx, 200, 2)); - TEST_ASSERT_NOT_NULL(tx_transfer_find(&tx, 300, 1)); - TEST_ASSERT_EQUAL_size_t(3, tx.enqueued_frames_count); - - // Cancel topic 200 (best-effort, no feedback). - fstate.count = 0; - TEST_ASSERT_EQUAL_size_t(2, udpard_tx_cancel_all(&tx, 200)); - TEST_ASSERT_EQUAL_size_t(0, fstate.count); - TEST_ASSERT_NULL(tx_transfer_find(&tx, 200, 1)); - TEST_ASSERT_NULL(tx_transfer_find(&tx, 200, 2)); - TEST_ASSERT_NOT_NULL(tx_transfer_find(&tx, 300, 1)); - TEST_ASSERT_EQUAL_size_t(1, tx.enqueued_frames_count); - - // Cancel already-cancelled topic returns zero. - TEST_ASSERT_EQUAL_size_t(0, udpard_tx_cancel_all(&tx, 100)); - - // Cancel last remaining topic. - fstate.count = 0; - TEST_ASSERT_EQUAL_size_t(1, udpard_tx_cancel_all(&tx, 300)); - TEST_ASSERT_EQUAL_size_t(1, fstate.count); - TEST_ASSERT_NULL(tx_transfer_find(&tx, 300, 1)); + TEST_ASSERT_GREATER_THAN_UINT32( + 0, + udpard_tx_push( + &tx, 0, 100, iface_bitmap_1, udpard_prio_fast, 201, make_scattered(NULL, 0), NULL, UDPARD_USER_CONTEXT_NULL)); + TEST_ASSERT_TRUE(udpard_tx_cancel(&tx, 201, false)); TEST_ASSERT_EQUAL_size_t(0, tx.enqueued_frames_count); udpard_tx_free(&tx); @@ -1037,7 +859,6 @@ static void test_tx_spool_deduplication(void) iface_bitmap_12, udpard_prio_nominal, 1, - 1, make_scattered(payload_big, sizeof(payload_big)), NULL, UDPARD_USER_CONTEXT_NULL)); @@ -1066,7 +887,6 @@ static void test_tx_spool_deduplication(void) iface_bitmap_12, udpard_prio_nominal, 2, - 2, make_scattered(payload_small, sizeof(payload_small)), NULL, UDPARD_USER_CONTEXT_NULL)); @@ -1093,7 +913,6 @@ static void test_tx_spool_deduplication(void) iface_bitmap_12, udpard_prio_nominal, 3, - 3, make_scattered(payload_split, sizeof(payload_split)), NULL, UDPARD_USER_CONTEXT_NULL)); @@ -1127,7 +946,6 @@ static void test_tx_spool_deduplication(void) iface_bitmap_12, udpard_prio_nominal, 4, - 4, make_scattered(payload_one, sizeof(payload_one)), NULL, UDPARD_USER_CONTEXT_NULL)); @@ -1161,7 +979,7 @@ static void test_tx_eject_only_from_poll(void) // Push a subject transfer; eject must NOT be called. eject.count = 0; TEST_ASSERT_TRUE(udpard_tx_push( - &tx, 0, 1000, iface_bitmap_1, udpard_prio_fast, 100, 1, make_scattered(NULL, 0), NULL, UDPARD_USER_CONTEXT_NULL)); + &tx, 0, 1000, iface_bitmap_1, udpard_prio_fast, 100, make_scattered(NULL, 0), NULL, UDPARD_USER_CONTEXT_NULL)); TEST_ASSERT_EQUAL_size_t(0, eject.count); // eject NOT called from push // Push a P2P transfer; eject must NOT be called. @@ -1177,16 +995,8 @@ static void test_tx_eject_only_from_poll(void) // Push more transfers while frames are pending; eject still must NOT be called. const size_t eject_count_before = eject.count; eject.allow = false; // block ejection to keep frames pending - TEST_ASSERT_TRUE(udpard_tx_push(&tx, - 0, - 1000, - iface_bitmap_1, - udpard_prio_nominal, - 200, - 2, - make_scattered(NULL, 0), - NULL, - UDPARD_USER_CONTEXT_NULL)); + TEST_ASSERT_TRUE(udpard_tx_push( + &tx, 0, 1000, iface_bitmap_1, udpard_prio_nominal, 200, make_scattered(NULL, 0), NULL, UDPARD_USER_CONTEXT_NULL)); TEST_ASSERT_EQUAL_size_t(eject_count_before, eject.count); // eject NOT called from push TEST_ASSERT_TRUE(udpard_tx_push_p2p( @@ -1217,7 +1027,6 @@ int main(void) RUN_TEST(test_tx_stage_if_via_tx_push); RUN_TEST(test_tx_stage_if_short_deadline); RUN_TEST(test_tx_cancel); - RUN_TEST(test_tx_cancel_all); RUN_TEST(test_tx_spool_deduplication); RUN_TEST(test_tx_eject_only_from_poll); RUN_TEST(test_tx_ack_and_scheduler); From 2110818eb47ea06b54fbb0d2bd4e497238ecb100 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Mon, 2 Feb 2026 23:51:41 +0200 Subject: [PATCH 08/13] add tests --- tests/src/test_intrusive_tx.c | 167 ++++++++++++++++++++++++++++++++++ 1 file changed, 167 insertions(+) diff --git a/tests/src/test_intrusive_tx.c b/tests/src/test_intrusive_tx.c index 34ad3d1..43ab553 100644 --- a/tests/src/test_intrusive_tx.c +++ b/tests/src/test_intrusive_tx.c @@ -114,6 +114,27 @@ static tx_transfer_t* find_transfer_by_id(udpard_tx_t* const tx, const uint64_t return ((tr != NULL) && (tr->transfer_id == transfer_id)) ? tr : NULL; } +// Counts transfers by transfer-ID and kind. +static size_t count_transfers_by_id_and_kind(udpard_tx_t* const tx, const uint64_t transfer_id, const frame_kind_t kind) +{ + if (tx == NULL) { + return 0; + } + size_t count = 0; + const tx_key_transfer_id_t key = { .transfer_id = transfer_id, .seq_no = 0 }; + for (tx_transfer_t* tr = + CAVL2_TO_OWNER(cavl2_lower_bound(tx->index_transfer_id, &key, &tx_cavl_compare_transfer_id), + tx_transfer_t, + index_transfer_id); + (tr != NULL) && (tr->transfer_id == transfer_id); + tr = CAVL2_TO_OWNER(cavl2_next_greater(&tr->index_transfer_id), tx_transfer_t, index_transfer_id)) { + if (tr->kind == kind) { + count++; + } + } + return count; +} + static void test_bytes_scattered_read(void) { // Skips empty fragments and spans boundaries. @@ -509,6 +530,70 @@ static void test_tx_ack_and_scheduler(void) TEST_ASSERT_NOT_NULL(find_transfer_by_id(&tx_be, 43)); udpard_tx_free(&tx_be); + // ACK acceptance skips colliding P2P transfers from other remotes. + udpard_tx_t tx_coll_rx = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new( + &tx_coll_rx, + 10U, + 1U, + 8U, + mem, + &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); + udpard_rx_t rx_coll = { .tx = &tx_coll_rx }; + feedback_state_t fb_a = { 0 }; + feedback_state_t fb_b = { 0 }; + const uint64_t coll_id = 55; + // Insert first colliding transfer. + tx_transfer_t* tr_a = mem_alloc(mem.transfer, sizeof(tx_transfer_t)); + mem_zero(sizeof(*tr_a), tr_a); + tr_a->kind = frame_msg_reliable; + tr_a->is_p2p = true; + tr_a->transfer_id = coll_id; + tr_a->seq_no = 1; + tr_a->deadline = 10; + tr_a->priority = udpard_prio_fast; + tr_a->p2p_remote.uid = 1001; + tr_a->user = make_user_context(&fb_a); + tr_a->feedback = record_feedback; + cavl2_find_or_insert( + &tx_coll_rx.index_deadline, tr_a, tx_cavl_compare_deadline, &tr_a->index_deadline, cavl2_trivial_factory); + cavl2_find_or_insert(&tx_coll_rx.index_transfer_id, + &(tx_key_transfer_id_t){ .transfer_id = tr_a->transfer_id, .seq_no = tr_a->seq_no }, + tx_cavl_compare_transfer_id, + &tr_a->index_transfer_id, + cavl2_trivial_factory); + enlist_head(&tx_coll_rx.agewise, &tr_a->agewise); + // Insert second colliding transfer with different remote UID. + tx_transfer_t* tr_b = mem_alloc(mem.transfer, sizeof(tx_transfer_t)); + mem_zero(sizeof(*tr_b), tr_b); + tr_b->kind = frame_msg_reliable; + tr_b->is_p2p = true; + tr_b->transfer_id = coll_id; + tr_b->seq_no = 2; + tr_b->deadline = 10; + tr_b->priority = udpard_prio_fast; + tr_b->p2p_remote.uid = 1002; + tr_b->user = make_user_context(&fb_b); + tr_b->feedback = record_feedback; + cavl2_find_or_insert( + &tx_coll_rx.index_deadline, tr_b, tx_cavl_compare_deadline, &tr_b->index_deadline, cavl2_trivial_factory); + cavl2_find_or_insert(&tx_coll_rx.index_transfer_id, + &(tx_key_transfer_id_t){ .transfer_id = tr_b->transfer_id, .seq_no = tr_b->seq_no }, + tx_cavl_compare_transfer_id, + &tr_b->index_transfer_id, + cavl2_trivial_factory); + enlist_head(&tx_coll_rx.agewise, &tr_b->agewise); + // Accept ack for the second transfer only. + tx_receive_ack(&rx_coll, tr_b->p2p_remote.uid, coll_id); + TEST_ASSERT_EQUAL_size_t(0, fb_a.count); + TEST_ASSERT_EQUAL_size_t(1, fb_b.count); + TEST_ASSERT_EQUAL_size_t(1, count_transfers_by_id_and_kind(&tx_coll_rx, coll_id, frame_msg_reliable)); + // Accept ack for the first transfer. + tx_receive_ack(&rx_coll, tr_a->p2p_remote.uid, coll_id); + TEST_ASSERT_EQUAL_size_t(1, fb_a.count); + TEST_ASSERT_EQUAL_size_t(0, count_transfers_by_id_and_kind(&tx_coll_rx, coll_id, frame_msg_reliable)); + udpard_tx_free(&tx_coll_rx); + // Ack suppressed when coverage not improved. udpard_tx_t tx2 = { 0 }; TEST_ASSERT_TRUE(udpard_tx_new( @@ -559,6 +644,33 @@ static void test_tx_ack_and_scheduler(void) TEST_ASSERT_NOT_EQUAL(0U, udpard_tx_pending_ifaces(&tx3)); udpard_tx_free(&tx3); + // Ack emission ignores colliding non-ack transfers. + udpard_tx_t tx_coll_ack = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new( + &tx_coll_ack, + 12U, + 3U, + 4U, + mem, + &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); + rx.tx = &tx_coll_ack; + rx.errors_ack_tx = 0; + TEST_ASSERT_TRUE(udpard_tx_push(&tx_coll_ack, + 0, + 1000, + iface_bitmap_01, + udpard_prio_fast, + 60, + make_scattered(NULL, 0), + record_feedback, + make_user_context(&fstate))); + TEST_ASSERT_EQUAL_size_t(1, count_transfers_by_id_and_kind(&tx_coll_ack, 60, frame_msg_reliable)); + tx_send_ack(&rx, 0, udpard_prio_fast, 60, (udpard_remote_t){ .uid = 77, .endpoints = { make_ep(7) } }); + TEST_ASSERT_EQUAL_UINT64(0, rx.errors_ack_tx); + TEST_ASSERT_EQUAL_size_t(1, count_transfers_by_id_and_kind(&tx_coll_ack, 60, frame_msg_reliable)); + TEST_ASSERT_EQUAL_size_t(1, count_transfers_by_id_and_kind(&tx_coll_ack, 60, frame_ack)); + udpard_tx_free(&tx_coll_ack); + // Ack push failure with TX present. udpard_tx_mem_resources_t fail_mem = { .transfer = { .vtable = &mem_vtable_noop_alloc, .context = NULL } }; for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { @@ -825,6 +937,61 @@ static void test_tx_cancel(void) TEST_ASSERT_TRUE(udpard_tx_cancel(&tx, 201, false)); TEST_ASSERT_EQUAL_size_t(0, tx.enqueued_frames_count); + // Collisions cancel all reliable transfers with the same ID. + fstate.count = 0; + const uint64_t coll_id = 300; + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + 0, + 100, + iface_bitmap_1, + udpard_prio_fast, + coll_id, + make_scattered(NULL, 0), + record_feedback, + make_user_context(&fstate))); + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + 0, + 100, + iface_bitmap_1, + udpard_prio_fast, + coll_id, + make_scattered(NULL, 0), + record_feedback, + make_user_context(&fstate))); + TEST_ASSERT_EQUAL_size_t(2, count_transfers_by_id_and_kind(&tx, coll_id, frame_msg_reliable)); + TEST_ASSERT_TRUE(udpard_tx_cancel(&tx, coll_id, true)); + TEST_ASSERT_EQUAL_size_t(0, count_transfers_by_id_and_kind(&tx, coll_id, frame_msg_reliable)); + TEST_ASSERT_EQUAL_size_t(2, fstate.count); + + // Best-effort collisions do not cancel reliable transfers. + fstate.count = 0; + const uint64_t coll_id2 = 301; + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + 0, + 100, + iface_bitmap_1, + udpard_prio_fast, + coll_id2, + make_scattered(NULL, 0), + record_feedback, + make_user_context(&fstate))); + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + 0, + 100, + iface_bitmap_1, + udpard_prio_fast, + coll_id2, + make_scattered(NULL, 0), + NULL, + UDPARD_USER_CONTEXT_NULL)); + TEST_ASSERT_EQUAL_size_t(1, count_transfers_by_id_and_kind(&tx, coll_id2, frame_msg_reliable)); + TEST_ASSERT_EQUAL_size_t(1, count_transfers_by_id_and_kind(&tx, coll_id2, frame_msg_best)); + TEST_ASSERT_TRUE(udpard_tx_cancel(&tx, coll_id2, false)); + TEST_ASSERT_EQUAL_size_t(1, count_transfers_by_id_and_kind(&tx, coll_id2, frame_msg_reliable)); + TEST_ASSERT_EQUAL_size_t(0, count_transfers_by_id_and_kind(&tx, coll_id2, frame_msg_best)); + TEST_ASSERT_TRUE(udpard_tx_cancel(&tx, coll_id2, true)); + TEST_ASSERT_EQUAL_size_t(0, count_transfers_by_id_and_kind(&tx, coll_id2, frame_msg_reliable)); + udpard_tx_free(&tx); instrumented_allocator_reset(&alloc); } From c425dd1db11c9385b57a25540e38fc2eba9231af Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Tue, 3 Feb 2026 00:36:03 +0200 Subject: [PATCH 09/13] tests --- tests/src/test_fragment.cpp | 13 +++ tests/src/test_intrusive_guards.c | 112 ++++++++++++++++++++++- tests/src/test_intrusive_rx.c | 54 +++++++++++ tests/src/test_intrusive_tx.c | 147 ++++++++++++++++++++++++++++++ 4 files changed, 324 insertions(+), 2 deletions(-) diff --git a/tests/src/test_fragment.cpp b/tests/src/test_fragment.cpp index f416f04..a870d13 100644 --- a/tests/src/test_fragment.cpp +++ b/tests/src/test_fragment.cpp @@ -86,6 +86,19 @@ void test_udpard_fragment_seek() instrumented_allocator_reset(&alloc_frag); instrumented_allocator_reset(&alloc_payload); + // Test 1b: Seek before the first fragment returns NULL. + udpard_fragment_t* single_hi = make_test_fragment(mem_frag, mem_payload, del_payload, 5, 2, "hi"); + TEST_ASSERT_NOT_NULL(single_hi); + single_hi->index_offset.up = nullptr; + single_hi->index_offset.lr[0] = nullptr; + single_hi->index_offset.lr[1] = nullptr; + single_hi->index_offset.bf = 0; + TEST_ASSERT_NULL(udpard_fragment_seek(single_hi, 1)); + mem_res_free(mem_payload, single_hi->origin.size, single_hi->origin.data); + mem_res_free(mem_frag, sizeof(udpard_fragment_t), single_hi); + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_payload); + // Test 2: Tree with root and child - to test the root-finding loop. // Create a simple tree: root at offset 5, left child at offset 0, right child at offset 10 udpard_fragment_t* root = make_test_fragment(mem_frag, mem_payload, del_payload, 5, 3, "mid"); diff --git a/tests/src/test_intrusive_guards.c b/tests/src/test_intrusive_guards.c index d51620a..34bdb63 100644 --- a/tests/src/test_intrusive_guards.c +++ b/tests/src/test_intrusive_guards.c @@ -87,6 +87,8 @@ static void test_mem_endpoint_list_guards(void) udpard_listed_t tail = { 0 }; enlist_head(&list, &tail); TEST_ASSERT_TRUE(is_listed(&list, &member)); + // is_listed returns true when next is populated. + TEST_ASSERT_TRUE(is_listed(&list, &tail)); // NULL endpoint list yields empty bitmap. TEST_ASSERT_EQUAL_UINT16(0U, valid_ep_bitmap(NULL)); @@ -158,12 +160,19 @@ static void test_tx_guards(void) // Push helpers reject invalid timing and null handles. const uint16_t iface_bitmap_1 = (1U << 0U); const udpard_bytes_scattered_t empty_payload = { .bytes = { .size = 0U, .data = NULL }, .next = NULL }; + const udpard_remote_t remote_ok = { .uid = 1, .endpoints = { { .ip = 1U, .port = UDP_PORT } } }; TEST_ASSERT_FALSE( udpard_tx_push(&tx, 10, 5, iface_bitmap_1, udpard_prio_fast, 1U, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL)); TEST_ASSERT_FALSE( udpard_tx_push(NULL, 0, 0, iface_bitmap_1, udpard_prio_fast, 1U, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL)); - TEST_ASSERT_FALSE(udpard_tx_push_p2p( - NULL, 0, 0, udpard_prio_fast, (udpard_remote_t){ 0 }, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL, NULL)); + TEST_ASSERT_FALSE( + udpard_tx_push_p2p(NULL, 0, 0, udpard_prio_fast, remote_ok, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL, NULL)); + // P2P pushes reject expired deadlines. + TEST_ASSERT_FALSE( + udpard_tx_push_p2p(&tx, 2, 1, udpard_prio_fast, remote_ok, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL, NULL)); + // P2P pushes reject negative timestamps. + TEST_ASSERT_FALSE( + udpard_tx_push_p2p(&tx, -1, 0, udpard_prio_fast, remote_ok, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL, NULL)); // Reject invalid payload pointer and empty interface bitmap. const udpard_bytes_scattered_t bad_payload = { .bytes = { .size = 1U, .data = NULL }, .next = NULL }; TEST_ASSERT_FALSE( @@ -174,6 +183,34 @@ static void test_tx_guards(void) TEST_ASSERT_FALSE( udpard_tx_push_p2p(&tx, 0, 1, udpard_prio_fast, remote_bad, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL, NULL)); + // Reject invalid timestamps and priority. + TEST_ASSERT_FALSE( + udpard_tx_push(&tx, -1, 0, iface_bitmap_1, udpard_prio_fast, 1U, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL)); + // Use an out-of-range priority without a constant enum cast. + udpard_prio_t bad_prio = udpard_prio_optional; + const unsigned bad_prio_raw = UDPARD_PRIORITY_COUNT; + memcpy(&bad_prio, &bad_prio_raw, sizeof(bad_prio)); + TEST_ASSERT_FALSE( + udpard_tx_push(&tx, 0, 1, iface_bitmap_1, bad_prio, 1U, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL)); + + // Reject zero local UID. + const uint64_t saved_uid = tx.local_uid; + tx.local_uid = 0U; + TEST_ASSERT_FALSE( + udpard_tx_push(&tx, 0, 1, iface_bitmap_1, udpard_prio_fast, 1U, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL)); + tx.local_uid = saved_uid; + + // P2P guard paths cover local UID, priority, and payload pointer. + uint64_t out_tid = 0; + tx.local_uid = 0U; + TEST_ASSERT_FALSE(udpard_tx_push_p2p( + &tx, 0, 1, udpard_prio_fast, remote_ok, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL, &out_tid)); + tx.local_uid = saved_uid; + TEST_ASSERT_FALSE( + udpard_tx_push_p2p(&tx, 0, 1, bad_prio, remote_ok, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL, &out_tid)); + TEST_ASSERT_FALSE(udpard_tx_push_p2p( + &tx, 0, 1, udpard_prio_fast, remote_ok, bad_payload, NULL, UDPARD_USER_CONTEXT_NULL, &out_tid)); + // Poll and refcount no-ops on null data. udpard_tx_poll(NULL, 0, 0); udpard_tx_poll(&tx, (udpard_us_t)-1, 0); @@ -199,6 +236,20 @@ static void test_tx_predictor_sharing(void) make_mem(&shared_tag[1]), make_mem(&shared_tag[1]) }; TEST_ASSERT_EQUAL_size_t(2U, tx_predict_frame_count(mtu, mem_arr_split, iface_bitmap_12, 16U)); + + // Shared spool when payload fits smaller MTU despite mismatch. + const size_t mtu_mixed[UDPARD_IFACE_COUNT_MAX] = { 64U, 128U, 128U }; + const uint16_t iface_bitmap_01 = (1U << 0U) | (1U << 1U); + TEST_ASSERT_EQUAL_size_t(1U, tx_predict_frame_count(mtu_mixed, mem_arr, iface_bitmap_01, 32U)); + + // Gapped bitmap exercises the unset-bit branch. + static char gap_tag[3]; + const udpard_mem_t mem_gap[UDPARD_IFACE_COUNT_MAX] = { make_mem(&gap_tag[0]), + make_mem(&gap_tag[1]), + make_mem(&gap_tag[2]) }; + const size_t mtu_gap[UDPARD_IFACE_COUNT_MAX] = { 64U, 64U, 64U }; + const uint16_t iface_bitmap_02 = (1U << 0U) | (1U << 2U); + TEST_ASSERT_EQUAL_size_t(2U, tx_predict_frame_count(mtu_gap, mem_gap, iface_bitmap_02, 16U)); } static void test_rx_guards(void) @@ -229,7 +280,11 @@ static void test_rx_guards(void) TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_fragment)); bad_fragment.fragment.vtable = &vtable_no_alloc; TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_fragment)); + bad_fragment.fragment.vtable = NULL; + TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_fragment)); TEST_ASSERT_TRUE(udpard_rx_port_new_stateless(&port, 8U, rx_mem, &rx_vtb)); + TEST_ASSERT_FALSE(udpard_rx_port_new_stateless(&port, 8U, bad_fragment, &rx_vtb)); + TEST_ASSERT_FALSE(udpard_rx_port_new_p2p(&port, 8U, bad_fragment, &rx_vtb)); // Invalid datagram inputs are rejected without processing. udpard_rx_t rx; @@ -250,6 +305,59 @@ static void test_rx_guards(void) small_payload, (udpard_deleter_t){ .vtable = &(udpard_deleter_vtable_t){ .free = NULL }, .context = NULL }, 0)); + // Cover each guard term with a valid baseline payload. + const udpard_deleter_t deleter_ok = { .vtable = &deleter_vtable, .context = NULL }; + byte_t dgram[HEADER_SIZE_BYTES]; + const meta_t meta = { .priority = udpard_prio_nominal, + .kind = frame_msg_best, + .transfer_payload_size = 0, + .transfer_id = 1, + .sender_uid = 2 }; + header_serialize(dgram, meta, 0, 0, crc_full(0, NULL)); + const udpard_bytes_mut_t dgram_view = { .size = sizeof(dgram), .data = dgram }; + const udpard_udpip_ep_t ep_ok = { .ip = 1U, .port = UDP_PORT }; + TEST_ASSERT_FALSE(udpard_rx_port_push(NULL, &port, 0, ep_ok, dgram_view, deleter_ok, 0)); + TEST_ASSERT_FALSE(udpard_rx_port_push(&rx, NULL, 0, ep_ok, dgram_view, deleter_ok, 0)); + TEST_ASSERT_FALSE(udpard_rx_port_push(&rx, &port, -1, ep_ok, dgram_view, deleter_ok, 0)); + TEST_ASSERT_FALSE( + udpard_rx_port_push(&rx, &port, 0, (udpard_udpip_ep_t){ .ip = 0U, .port = UDP_PORT }, dgram_view, deleter_ok, 0)); + TEST_ASSERT_FALSE( + udpard_rx_port_push(&rx, &port, 0, ep_ok, (udpard_bytes_mut_t){ .size = 1U, .data = NULL }, deleter_ok, 0)); + TEST_ASSERT_FALSE(udpard_rx_port_push(&rx, &port, 0, ep_ok, dgram_view, deleter_ok, UDPARD_IFACE_COUNT_MAX)); + TEST_ASSERT_FALSE( + udpard_rx_port_push(&rx, &port, 0, ep_ok, dgram_view, (udpard_deleter_t){ .vtable = NULL, .context = NULL }, 0)); + TEST_ASSERT_FALSE( + udpard_rx_port_push(&rx, + &port, + 0, + ep_ok, + dgram_view, + (udpard_deleter_t){ .vtable = &(udpard_deleter_vtable_t){ .free = NULL }, .context = NULL }, + 0)); + + // ACK frames are accepted on P2P ports. + udpard_rx_port_t port_p2p; + TEST_ASSERT_TRUE(udpard_rx_port_new_p2p(&port_p2p, 8U, rx_mem, &rx_vtb)); + const meta_t ack_meta = { .priority = udpard_prio_nominal, + .kind = frame_ack, + .transfer_payload_size = 0, + .transfer_id = 2, + .sender_uid = 3 }; + header_serialize(dgram, ack_meta, 0, 0, crc_full(0, NULL)); + TEST_ASSERT_TRUE(udpard_rx_port_push(&rx, &port_p2p, 0, ep_ok, dgram_view, deleter_ok, 0)); + + // ACK frames are rejected on non-P2P ports. + const uint64_t errors_before_ack = rx.errors_frame_malformed; + header_serialize(dgram, ack_meta, 0, 0, crc_full(0, NULL)); + TEST_ASSERT_TRUE(udpard_rx_port_push(&rx, &port, 0, ep_ok, dgram_view, deleter_ok, 0)); + TEST_ASSERT_EQUAL_UINT64(errors_before_ack + 1U, rx.errors_frame_malformed); + + // Malformed frames are rejected after parsing. + const uint64_t errors_before_bad = rx.errors_frame_malformed; + header_serialize(dgram, meta, 0, 0, crc_full(0, NULL)); + dgram[HEADER_SIZE_BYTES - 1] ^= 0xFFU; + TEST_ASSERT_TRUE(udpard_rx_port_push(&rx, &port, 0, ep_ok, dgram_view, deleter_ok, 0)); + TEST_ASSERT_EQUAL_UINT64(errors_before_bad + 1U, rx.errors_frame_malformed); // Port freeing should tolerate null rx. udpard_rx_port_free(NULL, &port); diff --git a/tests/src/test_intrusive_rx.c b/tests/src/test_intrusive_rx.c index 1dec3bc..afc7b35 100644 --- a/tests/src/test_intrusive_rx.c +++ b/tests/src/test_intrusive_rx.c @@ -202,6 +202,60 @@ static void test_rx_fragment_tree_update_a(void) instrumented_allocator_reset(&alloc_frag); instrumented_allocator_reset(&alloc_payload); + // Redundant fragment removal when a larger fragment bridges neighbors. + { + udpard_tree_t* root = NULL; + size_t cov = 0; + rx_fragment_tree_update_result_t res = rx_fragment_tree_rejected; + const char payload[] = "abcdefghij"; + + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 0, 2, payload), + 10, + 10, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 2, 2, payload + 2), + 10, + 10, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 6, 2, payload + 6), + 10, + 10, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); + TEST_ASSERT_EQUAL_size_t(3, tree_count(root)); + + res = rx_fragment_tree_update(&root, // + mem_frag, + del_payload, + make_frame_base(mem_payload, 1, 6, payload + 1), + 10, + 10, + &cov); + TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); + TEST_ASSERT_EQUAL_size_t(3, tree_count(root)); + TEST_ASSERT_EQUAL_size_t(0, fragment_at(root, 0)->offset); + TEST_ASSERT_EQUAL_size_t(1, fragment_at(root, 1)->offset); + TEST_ASSERT_EQUAL_size_t(6, fragment_at(root, 2)->offset); + + // Cleanup. + udpard_fragment_free_all((udpard_fragment_t*)root, udpard_make_deleter(mem_frag)); + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); + } + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_payload); + // Non-empty payload test with zero extent. { udpard_tree_t* root = NULL; diff --git a/tests/src/test_intrusive_tx.c b/tests/src/test_intrusive_tx.c index 43ab553..a92410e 100644 --- a/tests/src/test_intrusive_tx.c +++ b/tests/src/test_intrusive_tx.c @@ -258,6 +258,9 @@ static void test_tx_validation_and_free(void) TEST_ASSERT_FALSE(tx_validate_mem_resources(bad_transfer)); bad_transfer.transfer = (udpard_mem_t){ .vtable = &vtable_no_alloc, .context = NULL }; TEST_ASSERT_FALSE(tx_validate_mem_resources(bad_transfer)); + // Reject null transfer vtable. + bad_transfer.transfer = (udpard_mem_t){ .vtable = NULL, .context = NULL }; + TEST_ASSERT_FALSE(tx_validate_mem_resources(bad_transfer)); instrumented_allocator_t alloc_transfer = { 0 }; instrumented_allocator_t alloc_payload = { 0 }; @@ -320,6 +323,14 @@ static void test_tx_comparators_and_feedback(void) TEST_ASSERT_EQUAL(1, tx_cavl_compare_deadline(&key, &tr.index_deadline)); key.deadline = 6; TEST_ASSERT_EQUAL(-1, tx_cavl_compare_deadline(&key, &tr.index_deadline)); + + // Equality returns zero for staged and deadline comparators. + key.staged_until = tr.staged_until; + key.seq_no = tr.seq_no; + TEST_ASSERT_EQUAL(0, tx_cavl_compare_staged(&key, &tr.index_staged)); + key.deadline = tr.deadline; + key.seq_no = tr.seq_no; + TEST_ASSERT_EQUAL(0, tx_cavl_compare_deadline(&key, &tr.index_deadline)); // Staged comparator covers seq_no branches. key.staged_until = tr.staged_until; key.seq_no = tr.seq_no - 1; @@ -530,6 +541,35 @@ static void test_tx_ack_and_scheduler(void) TEST_ASSERT_NOT_NULL(find_transfer_by_id(&tx_be, 43)); udpard_tx_free(&tx_be); + // Ack lookup misses when the lower bound has a different transfer-ID. + udpard_tx_t tx_miss = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new( + &tx_miss, + 10U, + 1U, + 8U, + mem, + &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); + tx_transfer_t* miss = mem_alloc(mem.transfer, sizeof(tx_transfer_t)); + mem_zero(sizeof(*miss), miss); + miss->kind = frame_msg_best; + miss->transfer_id = 100; + miss->seq_no = 1; + miss->deadline = 50; + miss->priority = udpard_prio_fast; + cavl2_find_or_insert( + &tx_miss.index_deadline, miss, tx_cavl_compare_deadline, &miss->index_deadline, cavl2_trivial_factory); + cavl2_find_or_insert(&tx_miss.index_transfer_id, + &(tx_key_transfer_id_t){ .transfer_id = miss->transfer_id, .seq_no = miss->seq_no }, + tx_cavl_compare_transfer_id, + &miss->index_transfer_id, + cavl2_trivial_factory); + enlist_head(&tx_miss.agewise, &miss->agewise); + udpard_rx_t rx_miss = { .tx = &tx_miss }; + tx_receive_ack(&rx_miss, 21, 99); + TEST_ASSERT_NOT_NULL(find_transfer_by_id(&tx_miss, 100)); + udpard_tx_free(&tx_miss); + // ACK acceptance skips colliding P2P transfers from other remotes. udpard_tx_t tx_coll_rx = { 0 }; TEST_ASSERT_TRUE(udpard_tx_new( @@ -629,6 +669,39 @@ static void test_tx_ack_and_scheduler(void) tx_transfer_retire(&tx2, prior, false); udpard_tx_free(&tx2); + // Ack search skips prior with the same transfer-ID but different UID. + udpard_tx_t tx_uid = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new( + &tx_uid, + 11U, + 2U, + 4U, + mem, + &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); + rx.tx = &tx_uid; + rx.errors_ack_tx = 0; + tx_transfer_t* prior_uid = mem_alloc(mem.transfer, sizeof(tx_transfer_t)); + mem_zero(sizeof(*prior_uid), prior_uid); + prior_uid->kind = frame_ack; + prior_uid->is_p2p = true; + prior_uid->transfer_id = 7; + prior_uid->seq_no = 1; + prior_uid->deadline = 100; + prior_uid->priority = udpard_prio_fast; + prior_uid->p2p_remote.uid = 1; + prior_uid->p2p_remote.endpoints[0] = make_ep(2); + cavl2_find_or_insert( + &tx_uid.index_deadline, prior_uid, tx_cavl_compare_deadline, &prior_uid->index_deadline, cavl2_trivial_factory); + cavl2_find_or_insert(&tx_uid.index_transfer_id, + &(tx_key_transfer_id_t){ .transfer_id = prior_uid->transfer_id, .seq_no = prior_uid->seq_no }, + tx_cavl_compare_transfer_id, + &prior_uid->index_transfer_id, + cavl2_trivial_factory); + enlist_head(&tx_uid.agewise, &prior_uid->agewise); + tx_send_ack(&rx, 0, udpard_prio_fast, 7, (udpard_remote_t){ .uid = 2, .endpoints = { make_ep(3) } }); + TEST_ASSERT_EQUAL_size_t(2, count_transfers_by_id_and_kind(&tx_uid, 7, frame_ack)); + udpard_tx_free(&tx_uid); + // Ack replaced with broader coverage. udpard_tx_t tx3 = { 0 }; TEST_ASSERT_TRUE(udpard_tx_new( @@ -644,6 +717,44 @@ static void test_tx_ack_and_scheduler(void) TEST_ASSERT_NOT_EQUAL(0U, udpard_tx_pending_ifaces(&tx3)); udpard_tx_free(&tx3); + // Ack search ignores prior with different transfer-ID. + udpard_tx_t tx_mismatch = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new( + &tx_mismatch, + 12U, + 3U, + 4U, + mem, + &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); + rx.tx = &tx_mismatch; + rx.errors_ack_tx = 0; + tx_transfer_t* prior_ack = mem_alloc(mem.transfer, sizeof(tx_transfer_t)); + mem_zero(sizeof(*prior_ack), prior_ack); + prior_ack->kind = frame_ack; + prior_ack->is_p2p = true; + prior_ack->transfer_id = 100; + prior_ack->seq_no = 1; + prior_ack->deadline = 100; + prior_ack->priority = udpard_prio_fast; + prior_ack->p2p_remote.uid = 9; + prior_ack->p2p_remote.endpoints[0] = make_ep(3); + cavl2_find_or_insert(&tx_mismatch.index_deadline, + prior_ack, + tx_cavl_compare_deadline, + &prior_ack->index_deadline, + cavl2_trivial_factory); + cavl2_find_or_insert(&tx_mismatch.index_transfer_id, + &(tx_key_transfer_id_t){ .transfer_id = prior_ack->transfer_id, .seq_no = prior_ack->seq_no }, + tx_cavl_compare_transfer_id, + &prior_ack->index_transfer_id, + cavl2_trivial_factory); + enlist_head(&tx_mismatch.agewise, &prior_ack->agewise); + tx_send_ack(&rx, 0, udpard_prio_fast, 99, (udpard_remote_t){ .uid = 9, .endpoints = { make_ep(4) } }); + TEST_ASSERT_EQUAL_UINT64(0, rx.errors_ack_tx); + TEST_ASSERT_EQUAL_size_t(1, count_transfers_by_id_and_kind(&tx_mismatch, 100, frame_ack)); + TEST_ASSERT_EQUAL_size_t(1, count_transfers_by_id_and_kind(&tx_mismatch, 99, frame_ack)); + udpard_tx_free(&tx_mismatch); + // Ack emission ignores colliding non-ack transfers. udpard_tx_t tx_coll_ack = { 0 }; TEST_ASSERT_TRUE(udpard_tx_new( @@ -892,6 +1003,33 @@ static void test_tx_stage_if_short_deadline(void) instrumented_allocator_reset(&alloc); } +static void test_tx_push_p2p_success(void) +{ + // Successful P2P push uses valid endpoints and returns a transfer-ID. + instrumented_allocator_t alloc = { 0 }; + instrumented_allocator_new(&alloc); + udpard_tx_mem_resources_t mem = { .transfer = instrumented_allocator_make_resource(&alloc) }; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + mem.payload[i] = instrumented_allocator_make_resource(&alloc); + } + udpard_tx_t tx = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new( + &tx, + 1U, + 2U, + 8U, + mem, + &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); + const udpard_remote_t remote = { .uid = 42, .endpoints = { make_ep(11) } }; + uint64_t out_tid = 0; + TEST_ASSERT_TRUE(udpard_tx_push_p2p( + &tx, 0, 10, udpard_prio_fast, remote, make_scattered(NULL, 0), NULL, UDPARD_USER_CONTEXT_NULL, &out_tid)); + TEST_ASSERT_NOT_EQUAL(0U, out_tid); + TEST_ASSERT_TRUE(udpard_tx_cancel(&tx, out_tid, false)); + udpard_tx_free(&tx); + instrumented_allocator_reset(&alloc); +} + // Cancels transfers and reports outcome. static void test_tx_cancel(void) { @@ -992,6 +1130,14 @@ static void test_tx_cancel(void) TEST_ASSERT_TRUE(udpard_tx_cancel(&tx, coll_id2, true)); TEST_ASSERT_EQUAL_size_t(0, count_transfers_by_id_and_kind(&tx, coll_id2, frame_msg_reliable)); + // Cancel misses when ID is not present but tree is non-empty. + TEST_ASSERT_TRUE(udpard_tx_push( + &tx, 0, 100, iface_bitmap_1, udpard_prio_fast, 400, make_scattered(NULL, 0), NULL, UDPARD_USER_CONTEXT_NULL)); + TEST_ASSERT_FALSE(udpard_tx_cancel(&tx, 399, false)); + TEST_ASSERT_NOT_NULL(find_transfer_by_id(&tx, 400)); + TEST_ASSERT_TRUE(udpard_tx_cancel(&tx, 400, false)); + TEST_ASSERT_NULL(find_transfer_by_id(&tx, 400)); + udpard_tx_free(&tx); instrumented_allocator_reset(&alloc); } @@ -1193,6 +1339,7 @@ int main(void) RUN_TEST(test_tx_stage_if); RUN_TEST(test_tx_stage_if_via_tx_push); RUN_TEST(test_tx_stage_if_short_deadline); + RUN_TEST(test_tx_push_p2p_success); RUN_TEST(test_tx_cancel); RUN_TEST(test_tx_spool_deduplication); RUN_TEST(test_tx_eject_only_from_poll); From 0615c0b21f8bad9387ac30fc9c06666810ec8717 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Tue, 3 Feb 2026 15:01:24 +0200 Subject: [PATCH 10/13] dynamically allocate rx_slot_t to conserve memory --- libudpard/udpard.c | 201 +++++++++++++++++++++++++-------------------- libudpard/udpard.h | 15 ++-- 2 files changed, 120 insertions(+), 96 deletions(-) diff --git a/libudpard/udpard.c b/libudpard/udpard.c index 1efc16c..e0e70a3 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -104,6 +104,11 @@ static void mem_free_payload(const udpard_deleter_t memory, const udpard_bytes_m } } +static bool mem_validate(const udpard_mem_t mem) +{ + return (mem.vtable != NULL) && (mem.vtable->alloc != NULL) && (mem.vtable->base.free != NULL); +} + static byte_t* serialize_u32(byte_t* ptr, const uint32_t value) { for (size_t i = 0; i < sizeof(value); i++) { @@ -616,15 +621,11 @@ typedef struct tx_transfer_t static bool tx_validate_mem_resources(const udpard_tx_mem_resources_t memory) { for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - if ((memory.payload[i].vtable == NULL) || // - (memory.payload[i].vtable->base.free == NULL) || // - (memory.payload[i].vtable->alloc == NULL)) { + if (!mem_validate(memory.payload[i])) { return false; } } - return (memory.transfer.vtable != NULL) && // - (memory.transfer.vtable->base.free != NULL) && // - (memory.transfer.vtable->alloc != NULL); + return mem_validate(memory.transfer); } static void tx_transfer_free_payload(tx_transfer_t* const tr) @@ -1658,8 +1659,6 @@ static bool rx_fragment_tree_finalize(udpard_tree_t* const root, const uint32_t /// The redundant interfaces may use distinct MTU, which requires special fragment tree handling. typedef struct { - bool busy; - uint64_t transfer_id; ///< Which transfer we're reassembling here. udpard_us_t ts_min; ///< Earliest frame timestamp, aka transfer reception timestamp. @@ -1676,30 +1675,49 @@ typedef struct udpard_tree_t* fragments; } rx_slot_t; -static void rx_slot_reset(rx_slot_t* const slot, const udpard_mem_t fragment_memory) +static rx_slot_t* rx_slot_new(const udpard_mem_t slot_memory) { - udpard_fragment_free_all((udpard_fragment_t*)slot->fragments, udpard_make_deleter(fragment_memory)); - slot->fragments = NULL; - slot->busy = false; - slot->covered_prefix = 0U; - slot->crc_end = 0U; - slot->crc = CRC_INITIAL; + rx_slot_t* const slot = mem_alloc(slot_memory, sizeof(rx_slot_t)); + if (slot != NULL) { + mem_zero(sizeof(*slot), slot); + slot->ts_min = HEAT_DEATH; + slot->ts_max = BIG_BANG; + slot->covered_prefix = 0; + slot->crc_end = 0; + slot->crc = CRC_INITIAL; + slot->fragments = NULL; + } + return slot; } +/// Will NULL out the original slot pointer. +static void rx_slot_destroy(rx_slot_t** const slot_ref, + const udpard_mem_t fragment_memory, + const udpard_mem_t slot_memory) +{ + UDPARD_ASSERT((slot_ref != NULL) && (*slot_ref != NULL)); + udpard_fragment_free_all((udpard_fragment_t*)(*slot_ref)->fragments, udpard_make_deleter(fragment_memory)); + mem_free(slot_memory, sizeof(rx_slot_t), *slot_ref); + *slot_ref = NULL; +} + +typedef enum +{ + rx_slot_incomplete, + rx_slot_complete, + rx_slot_failure, +} rx_slot_update_result_t; + /// The caller will accept the ownership of the fragments iff the result is true. -static bool rx_slot_update(rx_slot_t* const slot, - const udpard_us_t ts, - const udpard_mem_t fragment_memory, - const udpard_deleter_t payload_deleter, - rx_frame_t* const frame, - const size_t extent, - uint64_t* const errors_oom, - uint64_t* const errors_transfer_malformed) -{ - bool done = false; - if (!slot->busy) { - rx_slot_reset(slot, fragment_memory); - slot->busy = true; +static rx_slot_update_result_t rx_slot_update(rx_slot_t* const slot, + const udpard_us_t ts, + const udpard_mem_t fragment_memory, + const udpard_deleter_t payload_deleter, + rx_frame_t* const frame, + const size_t extent, + uint64_t* const errors_oom) +{ + if ((slot->ts_min == HEAT_DEATH) && (slot->ts_max == BIG_BANG)) { slot->transfer_id = frame->meta.transfer_id; slot->ts_min = ts; slot->ts_max = ts; @@ -1709,10 +1727,8 @@ static bool rx_slot_update(rx_slot_t* const slot, } // Enforce consistent per-frame values throughout the transfer. if ((slot->total_size != frame->meta.transfer_payload_size) || (slot->priority != frame->meta.priority)) { - ++*errors_transfer_malformed; mem_free_payload(payload_deleter, frame->base.origin); - rx_slot_reset(slot, fragment_memory); - return false; + return rx_slot_failure; } const rx_fragment_tree_update_result_t tree_res = rx_fragment_tree_update(&slot->fragments, fragment_memory, @@ -1735,14 +1751,11 @@ static bool rx_slot_update(rx_slot_t* const slot, } if (tree_res == rx_fragment_tree_done) { if (rx_fragment_tree_finalize(slot->fragments, slot->crc)) { - slot->busy = false; - done = true; - } else { - ++*errors_transfer_malformed; - rx_slot_reset(slot, fragment_memory); + return rx_slot_complete; } + return rx_slot_failure; } - return done; + return rx_slot_incomplete; } // --------------------------------------------- SESSION & PORT --------------------------------------------- @@ -1754,8 +1767,6 @@ typedef struct rx_session_t udpard_tree_t index_remote_uid; ///< Must be the first member. udpard_remote_t remote; ///< Most recent discovered reverse path for P2P to the sender. - udpard_rx_port_t* port; - /// LRU last animated list for automatic retirement of stale sessions. udpard_listed_t list_by_animation; udpard_us_t last_animated_ts; @@ -1767,10 +1778,9 @@ typedef struct rx_session_t bool initialized; ///< Set after the first frame is seen. - // TODO: Static slots are taking too much space; allocate them dynamically instead. - // Each is <=56 bytes so it fits nicely into a 64-byte o1heap block. - // The slot state enum can be replaced with a simple "done" flag. - rx_slot_t slots[RX_SLOT_COUNT]; + udpard_rx_port_t* port; + + rx_slot_t* slots[RX_SLOT_COUNT]; } rx_session_t; /// The reassembly strategy is composed once at initialization time by choosing a vtable with the desired behavior. @@ -1823,8 +1833,7 @@ static udpard_tree_t* cavl_factory_rx_session_by_remote_uid(void* const user) out->index_remote_uid = (udpard_tree_t){ NULL, { NULL, NULL }, 0 }; out->list_by_animation = (udpard_listed_t){ NULL, NULL }; for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - out->slots[i].fragments = NULL; - rx_slot_reset(&out->slots[i], args->owner->memory.fragment); + out->slots[i] = NULL; } out->remote.uid = args->remote_uid; out->port = args->owner; @@ -1840,7 +1849,9 @@ static udpard_tree_t* cavl_factory_rx_session_by_remote_uid(void* const user) static void rx_session_free(rx_session_t* const self, udpard_list_t* const sessions_by_animation) { for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - rx_slot_reset(&self->slots[i], self->port->memory.fragment); + if (self->slots[i] != NULL) { + rx_slot_destroy(&self->slots[i], self->port->memory.fragment, self->port->memory.slot); + } } cavl2_remove(&self->port->index_session_by_remote_uid, &self->index_remote_uid); delist(sessions_by_animation, &self->list_by_animation); @@ -1848,8 +1859,10 @@ static void rx_session_free(rx_session_t* const self, udpard_list_t* const sessi } /// The payload ownership is transferred to the application. -static void rx_session_eject(rx_session_t* const self, udpard_rx_t* const rx, rx_slot_t* const slot) +static void rx_session_eject(rx_session_t* const self, udpard_rx_t* const rx, rx_slot_t** const slot_ref) { + rx_slot_t* const slot = *slot_ref; + // Update the history -- overwrite the oldest entry. self->history_current = (self->history_current + 1U) % RX_TRANSFER_HISTORY_COUNT; self->history[self->history_current] = slot->transfer_id; @@ -1866,48 +1879,48 @@ static void rx_session_eject(rx_session_t* const self, udpard_rx_t* const rx, rx }; self->port->vtable->on_message(rx, self->port, transfer); - // Finally, reset the slot. + // Finally, destroy the slot to reclaim memory. slot->fragments = NULL; // Transfer ownership to the application. - rx_slot_reset(slot, self->port->memory.fragment); + rx_slot_destroy(slot_ref, self->port->memory.fragment, self->port->memory.slot); } -/// Finds an existing in-progress slot with the specified transfer-ID, or allocates a new one. -/// Allocation always succeeds so the result is never NULL, but it may cancel a stale slot with incomplete transfer. -static rx_slot_t* rx_session_get_slot(rx_session_t* const self, const udpard_us_t ts, const uint64_t transfer_id) +/// Finds an existing in-progress slot with the specified transfer-ID, or allocates a new one. Returns NULL of OOM. +/// We return a pointer to pointer to allow the caller to NULL out the slot on destruction. +static rx_slot_t** rx_session_get_slot(rx_session_t* const self, const udpard_us_t ts, const uint64_t transfer_id) { // First, check if one is in progress already; resume it if so. for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - if (self->slots[i].busy && (self->slots[i].transfer_id == transfer_id)) { - return &self->slots[i]; + if ((self->slots[i] != NULL) && (self->slots[i]->transfer_id == transfer_id)) { + return &self->slots[i]; // Not checking for timeout; transfer-IDs are unique so it's fine. } } // Use this opportunity to check for timed-out in-progress slots. This may free up a slot for the search below. for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - if (self->slots[i].busy && (ts >= (self->slots[i].ts_max + SESSION_LIFETIME))) { - rx_slot_reset(&self->slots[i], self->port->memory.fragment); + if ((self->slots[i] != NULL) && (ts >= (self->slots[i]->ts_max + SESSION_LIFETIME))) { + rx_slot_destroy(&self->slots[i], self->port->memory.fragment, self->port->memory.slot); } } // This appears to be a new transfer, so we will need to allocate a new slot for it. for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - if (!self->slots[i].busy) { + if (self->slots[i] == NULL) { + self->slots[i] = rx_slot_new(self->port->memory.slot); // may fail return &self->slots[i]; } } // All slots are currently occupied; find the oldest slot to sacrifice. - rx_slot_t* slot = NULL; - udpard_us_t oldest_ts = HEAT_DEATH; + size_t oldest_index = 0; for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - UDPARD_ASSERT(self->slots[i].busy); // Checked this already. - if (self->slots[i].ts_max < oldest_ts) { - oldest_ts = self->slots[i].ts_max; - slot = &self->slots[i]; + UDPARD_ASSERT(self->slots[i] != NULL); // Checked this already. + UDPARD_ASSERT(self->slots[oldest_index] != NULL); + if (self->slots[i]->ts_max < self->slots[oldest_index]->ts_max) { + oldest_index = i; } } - UDPARD_ASSERT((slot != NULL) && slot->busy); // It is probably just a stale transfer, so it's a no-brainer to evict it, it's probably dead anyway. - rx_slot_reset(slot, self->port->memory.fragment); - UDPARD_ASSERT((slot != NULL) && !slot->busy); - return slot; + // We allocate immediately after destruction so we expect no OOM; still fine if it OOMs but we lose one transfer. + rx_slot_destroy(&self->slots[oldest_index], self->port->memory.fragment, self->port->memory.slot); + self->slots[oldest_index] = rx_slot_new(self->port->memory.slot); // may fail + return &self->slots[oldest_index]; } static void rx_session_update(rx_session_t* const self, @@ -1939,24 +1952,28 @@ static void rx_session_update(rx_session_t* const self, } } - // UNORDERED mode update. There are no other modes now -- there used to be ORDERED in an experimental revision once. + // UNORDERED mode update. + // There are no other modes now -- there used to be ORDERED; last commit 9296213e0270afc164e193d88dcb74f97a348767. if (!rx_session_is_transfer_ejected(self, frame->meta.transfer_id)) { - rx_slot_t* const slot = rx_session_get_slot(self, ts, frame->meta.transfer_id); // new or continuation - UDPARD_ASSERT(slot != NULL); - UDPARD_ASSERT((!slot->busy) || (slot->transfer_id == frame->meta.transfer_id)); - const bool done = rx_slot_update(slot, - ts, - self->port->memory.fragment, - payload_deleter, - frame, - self->port->extent, - &rx->errors_oom, - &rx->errors_transfer_malformed); - if (done) { - if (frame->meta.kind == frame_msg_reliable) { - tx_send_ack(rx, ts, slot->priority, slot->transfer_id, self->remote); + rx_slot_t** const slot_ref = rx_session_get_slot(self, ts, frame->meta.transfer_id); // new or continuation + rx_slot_t* const slot = *slot_ref; + if (slot == NULL) { + mem_free_payload(payload_deleter, frame->base.origin); + rx->errors_oom++; + } else { + const rx_slot_update_result_t upd_res = rx_slot_update( + slot, ts, self->port->memory.fragment, payload_deleter, frame, self->port->extent, &rx->errors_oom); + if (upd_res == rx_slot_complete) { + if (frame->meta.kind == frame_msg_reliable) { + tx_send_ack(rx, ts, slot->priority, slot->transfer_id, self->remote); + } + rx_session_eject(self, rx, slot_ref); // will destroy the slot. + } else if (upd_res == rx_slot_failure) { + rx->errors_transfer_malformed++; + rx_slot_destroy(slot_ref, self->port->memory.fragment, self->port->memory.slot); + } else { + UDPARD_ASSERT(upd_res == rx_slot_incomplete); } - rx_session_eject(self, rx, slot); } } else { // retransmit ACK if needed if ((frame->meta.kind == frame_msg_reliable) && (frame->base.offset == 0U)) { @@ -2042,10 +2059,7 @@ static const udpard_rx_port_vtable_private_t rx_port_vtb_stateless = { .accept = static bool rx_validate_mem_resources(const udpard_rx_mem_resources_t memory) { - return (memory.session.vtable != NULL) && (memory.session.vtable->base.free != NULL) && - (memory.session.vtable->alloc != NULL) && // - (memory.fragment.vtable != NULL) && (memory.fragment.vtable->base.free != NULL) && - (memory.fragment.vtable->alloc != NULL); + return mem_validate(memory.session) && mem_validate(memory.slot) && mem_validate(memory.fragment); } void udpard_rx_new(udpard_rx_t* const self, udpard_tx_t* const tx) @@ -2074,7 +2088,8 @@ bool udpard_rx_port_new(udpard_rx_port_t* const self, const udpard_rx_mem_resources_t memory, const udpard_rx_port_vtable_t* const vtable) { - bool ok = (self != NULL) && rx_validate_mem_resources(memory) && (vtable != NULL) && (vtable->on_message != NULL); + const bool ok = + (self != NULL) && rx_validate_mem_resources(memory) && (vtable != NULL) && (vtable->on_message != NULL); if (ok) { mem_zero(sizeof(*self), self); self->extent = extent; @@ -2159,3 +2174,13 @@ bool udpard_rx_port_push(udpard_rx_t* const rx, } return ok; } + +// --------------------------------------------- HEAP OBJECT SIZE LIMITS --------------------------------------------- + +// On a 32-bit platform, the block overhead of o1heap is 8 bytes. +// Rounding up to the power of 2 results in possible allocation sizes of 8, 24, 56, 120, 248, 504, 1016, ... bytes. + +static_assert((sizeof(void*) > 4) || (sizeof(tx_transfer_t) <= (256 - 8)), "tx_transfer_t is too large"); +static_assert((sizeof(void*) > 4) || (sizeof(rx_session_t) <= (512 - 8)), "rx_session_t is too large"); +static_assert((sizeof(void*) > 4) || (sizeof(rx_slot_t) <= (64 - 8)), "rx_slot_t is too large"); +static_assert((sizeof(void*) > 4) || (sizeof(udpard_fragment_t) <= (64 - 8)), "udpard_fragment_t is too large"); diff --git a/libudpard/udpard.h b/libudpard/udpard.h index fc0be4d..cdf6033 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -28,6 +28,7 @@ /// The TX pipeline adds a small overhead of sizeof(tx_frame_t). /// - sizeof(tx_transfer_t) blocks for the TX pipeline to store outgoing transfer metadata. /// - sizeof(rx_session_t) blocks for the RX pipeline to store incoming transfer session metadata. +/// - sizeof(rx_slot_t) blocks for the RX pipeline to store specific transfer reassembly state. /// - sizeof(udpard_fragment_t) blocks for the RX pipeline to store received data fragments. /// /// Suitable memory allocators may be found here: @@ -619,15 +620,12 @@ typedef struct udpard_rx_t /// These are used to serve the memory needs of the library to keep state while reassembling incoming transfers. /// Several memory resources are provided to enable fine control over the allocated memory if necessary; however, /// simple applications may choose to use the same memory resource implemented via malloc()/free() for all of them. +/// Instances are fixed-size, so a trivial zero-fragmentation block allocator is sufficient. typedef struct udpard_rx_mem_resources_t { - /// Provides memory for rx_session_t described below. - /// Each instance is fixed-size, so a trivial zero-fragmentation block allocator is sufficient. - udpard_mem_t session; - - /// The udpard_fragment_t handles are allocated per payload fragment; each contains a pointer to its fragment. - /// Each instance is of a very small fixed size, so a trivial zero-fragmentation block allocator is sufficient. - udpard_mem_t fragment; + udpard_mem_t session; ///< Provides memory for rx_session_t. + udpard_mem_t slot; ///< Provides memory for rx_slot_t. + udpard_mem_t fragment; ///< udpard_fragment_t are allocated per payload fragment; each points to its fragment. } udpard_rx_mem_resources_t; typedef struct udpard_rx_port_t udpard_rx_port_t; @@ -658,7 +656,8 @@ struct udpard_rx_port_t /// For example, if the local node is subscribed to a certain subject and there are X nodes publishing /// transfers on that subject, then there will be X sessions created for that subject. /// - /// Each session instance takes sizeof(rx_session_t) bytes of dynamic memory for itself. + /// Each session instance takes sizeof(rx_session_t) bytes of dynamic memory for itself, plus sizeof(rx_slot_t) + /// times the number of concurrent interleaved transfers from the remote node (usually 1), up to the static limit. /// On top of that, each session instance holds memory for the transfer payload fragments and small fixed-size /// metadata objects of type udpard_fragment_t, one handle per fragment. /// From a4795fdf15a0a59aa564878cbbccde0f15139f4b Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Tue, 3 Feb 2026 15:54:22 +0200 Subject: [PATCH 11/13] update the tests --- tests/src/test_e2e_api.cpp | 3 + tests/src/test_e2e_edge.cpp | 4 + tests/src/test_e2e_random.cpp | 2 + tests/src/test_e2e_responses.cpp | 4 + tests/src/test_integration_sockets.cpp | 1 + tests/src/test_intrusive_guards.c | 9 +- tests/src/test_intrusive_rx.c | 424 +++++++++++-------------- 7 files changed, 215 insertions(+), 232 deletions(-) diff --git a/tests/src/test_e2e_api.cpp b/tests/src/test_e2e_api.cpp index 7e5f737..2f507ed 100644 --- a/tests/src/test_e2e_api.cpp +++ b/tests/src/test_e2e_api.cpp @@ -157,6 +157,7 @@ void test_reliable_delivery_under_losses() res = instrumented_allocator_make_resource(&pub_tx_alloc_payload); } const udpard_rx_mem_resources_t pub_rx_mem{ .session = instrumented_allocator_make_resource(&pub_rx_alloc_session), + .slot = instrumented_allocator_make_resource(&pub_rx_alloc_session), .fragment = instrumented_allocator_make_resource(&pub_rx_alloc_frag) }; udpard_tx_mem_resources_t sub_tx_mem{}; @@ -165,6 +166,7 @@ void test_reliable_delivery_under_losses() res = instrumented_allocator_make_resource(&sub_tx_alloc_payload); } const udpard_rx_mem_resources_t sub_rx_mem{ .session = instrumented_allocator_make_resource(&sub_rx_alloc_session), + .slot = instrumented_allocator_make_resource(&sub_rx_alloc_session), .fragment = instrumented_allocator_make_resource(&sub_rx_alloc_frag) }; // Publisher node: single TX, single RX (linked to TX for ACK processing). @@ -368,6 +370,7 @@ void test_reliable_stats_and_failures() instrumented_allocator_new(&src_alloc_transfer); instrumented_allocator_new(&src_alloc_payload); const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), + .slot = instrumented_allocator_make_resource(&rx_alloc_session), .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; udpard_tx_mem_resources_t src_mem{}; src_mem.transfer = instrumented_allocator_make_resource(&src_alloc_transfer); diff --git a/tests/src/test_e2e_edge.cpp b/tests/src/test_e2e_edge.cpp index 48aabbe..261369b 100644 --- a/tests/src/test_e2e_edge.cpp +++ b/tests/src/test_e2e_edge.cpp @@ -119,6 +119,7 @@ struct Fixture res = instrumented_allocator_make_resource(&tx_alloc_payload); } const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), + .slot = instrumented_allocator_make_resource(&rx_alloc_session), .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; tx_payload_deleter = udpard_deleter_t{ .vtable = &tx_refcount_deleter_vt, .context = nullptr }; source = { .ip = 0x0A000001U, .port = 7501U }; @@ -328,6 +329,7 @@ void test_udpard_tx_push_p2p() tx.user = &frames; const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), + .slot = instrumented_allocator_make_resource(&rx_alloc_session), .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; udpard_rx_t rx{}; udpard_rx_port_t port{}; @@ -390,6 +392,7 @@ void test_udpard_tx_minimum_mtu() res = instrumented_allocator_make_resource(&tx_alloc_payload); } const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), + .slot = instrumented_allocator_make_resource(&rx_alloc_session), .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; udpard_tx_t tx{}; @@ -503,6 +506,7 @@ void test_udpard_rx_zero_extent() res = instrumented_allocator_make_resource(&tx_alloc_payload); } const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), + .slot = instrumented_allocator_make_resource(&rx_alloc_session), .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; udpard_tx_t tx{}; diff --git a/tests/src/test_e2e_random.cpp b/tests/src/test_e2e_random.cpp index 948cca9..92da30b 100644 --- a/tests/src/test_e2e_random.cpp +++ b/tests/src/test_e2e_random.cpp @@ -207,6 +207,7 @@ void test_udpard_tx_rx_end_to_end() instrumented_allocator_t rx_alloc_session{}; instrumented_allocator_new(&rx_alloc_session); const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), + .slot = instrumented_allocator_make_resource(&rx_alloc_session), .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; udpard_rx_t rx; udpard_rx_new(&rx, &ack_tx); @@ -215,6 +216,7 @@ void test_udpard_tx_rx_end_to_end() instrumented_allocator_new(&ack_rx_alloc_frag); instrumented_allocator_new(&ack_rx_alloc_session); const udpard_rx_mem_resources_t ack_rx_mem{ .session = instrumented_allocator_make_resource(&ack_rx_alloc_session), + .slot = instrumented_allocator_make_resource(&ack_rx_alloc_session), .fragment = instrumented_allocator_make_resource(&ack_rx_alloc_frag) }; udpard_rx_t ack_rx{}; udpard_rx_port_t ack_port{}; diff --git a/tests/src/test_e2e_responses.cpp b/tests/src/test_e2e_responses.cpp index 1ae3657..8ec7e9f 100644 --- a/tests/src/test_e2e_responses.cpp +++ b/tests/src/test_e2e_responses.cpp @@ -215,6 +215,7 @@ void test_topic_with_p2p_response() res = instrumented_allocator_make_resource(&a_tx_alloc_payload); } const udpard_rx_mem_resources_t a_rx_mem{ .session = instrumented_allocator_make_resource(&a_rx_alloc_session), + .slot = instrumented_allocator_make_resource(&a_rx_alloc_session), .fragment = instrumented_allocator_make_resource(&a_rx_alloc_frag) }; udpard_tx_mem_resources_t b_tx_mem{}; @@ -223,6 +224,7 @@ void test_topic_with_p2p_response() res = instrumented_allocator_make_resource(&b_tx_alloc_payload); } const udpard_rx_mem_resources_t b_rx_mem{ .session = instrumented_allocator_make_resource(&b_rx_alloc_session), + .slot = instrumented_allocator_make_resource(&b_rx_alloc_session), .fragment = instrumented_allocator_make_resource(&b_rx_alloc_frag) }; // ================================================================================================================ @@ -497,6 +499,7 @@ void test_topic_with_p2p_response_under_loss() res = instrumented_allocator_make_resource(&a_tx_alloc_payload); } const udpard_rx_mem_resources_t a_rx_mem{ .session = instrumented_allocator_make_resource(&a_rx_alloc_session), + .slot = instrumented_allocator_make_resource(&a_rx_alloc_session), .fragment = instrumented_allocator_make_resource(&a_rx_alloc_frag) }; udpard_tx_mem_resources_t b_tx_mem{}; @@ -505,6 +508,7 @@ void test_topic_with_p2p_response_under_loss() res = instrumented_allocator_make_resource(&b_tx_alloc_payload); } const udpard_rx_mem_resources_t b_rx_mem{ .session = instrumented_allocator_make_resource(&b_rx_alloc_session), + .slot = instrumented_allocator_make_resource(&b_rx_alloc_session), .fragment = instrumented_allocator_make_resource(&b_rx_alloc_frag) }; // ================================================================================================================ diff --git a/tests/src/test_integration_sockets.cpp b/tests/src/test_integration_sockets.cpp index 24470cd..4a15c2a 100644 --- a/tests/src/test_integration_sockets.cpp +++ b/tests/src/test_integration_sockets.cpp @@ -203,6 +203,7 @@ struct RxFixture instrumented_allocator_new(&session); instrumented_allocator_new(&fragment); mem.session = instrumented_allocator_make_resource(&session); + mem.slot = instrumented_allocator_make_resource(&session); mem.fragment = instrumented_allocator_make_resource(&fragment); udpard_rx_new(&rx, nullptr); rx.user = &ctx; diff --git a/tests/src/test_intrusive_guards.c b/tests/src/test_intrusive_guards.c index 34bdb63..1a48fc6 100644 --- a/tests/src/test_intrusive_guards.c +++ b/tests/src/test_intrusive_guards.c @@ -257,7 +257,9 @@ static void test_rx_guards(void) // RX port creation guards reject invalid parameters. static char rx_tag_a; static char rx_tag_b; - const udpard_rx_mem_resources_t rx_mem = { .session = make_mem(&rx_tag_a), .fragment = make_mem(&rx_tag_b) }; + const udpard_rx_mem_resources_t rx_mem = { .session = make_mem(&rx_tag_a), + .slot = make_mem(&rx_tag_a), + .fragment = make_mem(&rx_tag_b) }; const udpard_rx_port_vtable_t rx_vtb = { .on_message = on_message_stub }; udpard_rx_port_t port; TEST_ASSERT_FALSE(udpard_rx_port_new(NULL, 0, rx_mem, &rx_vtb)); @@ -275,6 +277,11 @@ static void test_rx_guards(void) TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_session)); bad_session.session.vtable = &vtable_no_alloc; TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_session)); + udpard_rx_mem_resources_t bad_slot = rx_mem; + bad_slot.slot.vtable = &vtable_no_free; + TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_slot)); + bad_slot.slot.vtable = &vtable_no_alloc; + TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_slot)); udpard_rx_mem_resources_t bad_fragment = rx_mem; bad_fragment.fragment.vtable = &vtable_no_free; TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_fragment)); diff --git a/tests/src/test_intrusive_rx.c b/tests/src/test_intrusive_rx.c index afc7b35..d5ebe6d 100644 --- a/tests/src/test_intrusive_rx.c +++ b/tests/src/test_intrusive_rx.c @@ -1237,17 +1237,21 @@ static void test_rx_slot_update(void) instrumented_allocator_new(&alloc_frag); const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + instrumented_allocator_t alloc_slot = { 0 }; + instrumented_allocator_new(&alloc_slot); + const udpard_mem_t mem_slot = instrumented_allocator_make_resource(&alloc_slot); + instrumented_allocator_t alloc_payload = { 0 }; instrumented_allocator_new(&alloc_payload); const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - uint64_t errors_oom = 0; - uint64_t errors_transfer_malformed = 0; + uint64_t errors_oom = 0; - // Test 1: Initialize slot from idle state (slot->busy == false branch) + // Test 1: Initialize slot from idle state. { - rx_slot_t slot = { 0 }; + rx_slot_t* slot = rx_slot_new(mem_slot); + TEST_ASSERT_NOT_NULL(slot); rx_frame_t frame = { 0 }; frame.base = make_frame_base(mem_payload, 0, 5, "hello"); @@ -1257,30 +1261,28 @@ static void test_rx_slot_update(void) const udpard_us_t ts = 1000; - // Single-frame transfer should complete immediately. - const bool done = - rx_slot_update(&slot, ts, mem_frag, del_payload, &frame, 5, &errors_oom, &errors_transfer_malformed); - - // Verify slot was initialized - TEST_ASSERT_TRUE(done); - TEST_ASSERT_FALSE(slot.busy); - TEST_ASSERT_EQUAL(123, slot.transfer_id); - TEST_ASSERT_EQUAL(ts, slot.ts_min); - TEST_ASSERT_EQUAL(ts, slot.ts_max); - TEST_ASSERT_EQUAL_size_t(5, slot.covered_prefix); + const rx_slot_update_result_t res = + rx_slot_update(slot, ts, mem_frag, del_payload, &frame, 5, &errors_oom); + + TEST_ASSERT_EQUAL(rx_slot_complete, res); + TEST_ASSERT_EQUAL(123, slot->transfer_id); + TEST_ASSERT_EQUAL(ts, slot->ts_min); + TEST_ASSERT_EQUAL(ts, slot->ts_max); + TEST_ASSERT_EQUAL_size_t(5, slot->covered_prefix); TEST_ASSERT_EQUAL(0, errors_oom); - rx_slot_reset(&slot, mem_frag); - rx_slot_reset(&slot, mem_frag); // idempotent + rx_slot_destroy(&slot, mem_frag, mem_slot); + TEST_ASSERT_NULL(slot); } instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_slot); instrumented_allocator_reset(&alloc_payload); - // Test 2: Multi-frame transfer with timestamp updates (later/earlier branches) + // Test 2: Multi-frame transfer with timestamp updates. { - rx_slot_t slot = { 0 }; + rx_slot_t* slot = rx_slot_new(mem_slot); + TEST_ASSERT_NOT_NULL(slot); - // First frame at offset 0 rx_frame_t frame1 = { 0 }; frame1.base = make_frame_base(mem_payload, 0, 3, "abc"); frame1.base.crc = 0x12345678; @@ -1288,69 +1290,63 @@ static void test_rx_slot_update(void) frame1.meta.transfer_payload_size = 10; const udpard_us_t ts1 = 2000; - // First frame initializes slot but does not complete transfer. - const bool done1 = - rx_slot_update(&slot, ts1, mem_frag, del_payload, &frame1, 10, &errors_oom, &errors_transfer_malformed); - - TEST_ASSERT_FALSE(done1); - TEST_ASSERT_TRUE(slot.busy); - TEST_ASSERT_EQUAL(ts1, slot.ts_min); - TEST_ASSERT_EQUAL(ts1, slot.ts_max); - TEST_ASSERT_EQUAL_size_t(3, slot.covered_prefix); - TEST_ASSERT_EQUAL(3, slot.crc_end); - TEST_ASSERT_EQUAL(0x12345678, slot.crc); - - // Second frame at offset 5, with later timestamp + const rx_slot_update_result_t res1 = + rx_slot_update(slot, ts1, mem_frag, del_payload, &frame1, 10, &errors_oom); + + TEST_ASSERT_EQUAL(rx_slot_incomplete, res1); + TEST_ASSERT_EQUAL(ts1, slot->ts_min); + TEST_ASSERT_EQUAL(ts1, slot->ts_max); + TEST_ASSERT_EQUAL_size_t(3, slot->covered_prefix); + TEST_ASSERT_EQUAL(3, slot->crc_end); + TEST_ASSERT_EQUAL(0x12345678, slot->crc); + rx_frame_t frame2 = { 0 }; frame2.base = make_frame_base(mem_payload, 5, 3, "def"); frame2.base.crc = 0x87654321; frame2.meta.transfer_id = 456; frame2.meta.transfer_payload_size = 10; - const udpard_us_t ts2 = 3000; // Later than ts1 - // Later frame updates timestamps and CRC tracking. - const bool done2 = - rx_slot_update(&slot, ts2, mem_frag, del_payload, &frame2, 10, &errors_oom, &errors_transfer_malformed); + const udpard_us_t ts2 = 3000; + const rx_slot_update_result_t res2 = + rx_slot_update(slot, ts2, mem_frag, del_payload, &frame2, 10, &errors_oom); - TEST_ASSERT_FALSE(done2); - TEST_ASSERT_TRUE(slot.busy); - TEST_ASSERT_EQUAL(ts1, slot.ts_min); // Unchanged (ts2 is later) - TEST_ASSERT_EQUAL(ts2, slot.ts_max); // Updated to later time - TEST_ASSERT_EQUAL_size_t(3, slot.covered_prefix); // Still 3 due to gap at [3-5) - TEST_ASSERT_EQUAL(8, slot.crc_end); // Updated to end of frame2 - TEST_ASSERT_EQUAL(0x87654321, slot.crc); // Updated to frame2's CRC + TEST_ASSERT_EQUAL(rx_slot_incomplete, res2); + TEST_ASSERT_EQUAL(ts1, slot->ts_min); + TEST_ASSERT_EQUAL(ts2, slot->ts_max); + TEST_ASSERT_EQUAL_size_t(3, slot->covered_prefix); + TEST_ASSERT_EQUAL(8, slot->crc_end); + TEST_ASSERT_EQUAL(0x87654321, slot->crc); - // Third frame at offset 3 (fills gap), with earlier timestamp rx_frame_t frame3 = { 0 }; frame3.base = make_frame_base(mem_payload, 3, 2, "XX"); frame3.base.crc = 0xAABBCCDD; frame3.meta.transfer_id = 456; frame3.meta.transfer_payload_size = 10; - const udpard_us_t ts3 = 1500; // Earlier than ts1 - // Earlier frame updates ts_min and extends covered prefix. - const bool done3 = - rx_slot_update(&slot, ts3, mem_frag, del_payload, &frame3, 10, &errors_oom, &errors_transfer_malformed); + const udpard_us_t ts3 = 1500; + const rx_slot_update_result_t res3 = + rx_slot_update(slot, ts3, mem_frag, del_payload, &frame3, 10, &errors_oom); - TEST_ASSERT_FALSE(done3); - TEST_ASSERT_TRUE(slot.busy); - TEST_ASSERT_EQUAL(ts3, slot.ts_min); // Updated to earlier time - TEST_ASSERT_EQUAL(ts2, slot.ts_max); // Unchanged (ts3 is earlier) - TEST_ASSERT_EQUAL_size_t(8, slot.covered_prefix); // Now contiguous 0-8 - TEST_ASSERT_EQUAL(8, slot.crc_end); // Unchanged (frame3 doesn't extend beyond frame2) - TEST_ASSERT_EQUAL(0x87654321, slot.crc); // Unchanged (crc_end didn't increase) + TEST_ASSERT_EQUAL(rx_slot_incomplete, res3); + TEST_ASSERT_EQUAL(ts3, slot->ts_min); + TEST_ASSERT_EQUAL(ts2, slot->ts_max); + TEST_ASSERT_EQUAL_size_t(8, slot->covered_prefix); + TEST_ASSERT_EQUAL(8, slot->crc_end); + TEST_ASSERT_EQUAL(0x87654321, slot->crc); - rx_slot_reset(&slot, mem_frag); + rx_slot_destroy(&slot, mem_frag, mem_slot); + TEST_ASSERT_NULL(slot); } instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_slot); instrumented_allocator_reset(&alloc_payload); - // Test 3: OOM handling (tree_res == rx_fragment_tree_oom branch) + // Test 3: OOM handling. { - rx_slot_t slot = { 0 }; - errors_oom = 0; + rx_slot_t* slot = rx_slot_new(mem_slot); + TEST_ASSERT_NOT_NULL(slot); + errors_oom = 0; - // Limit allocations to trigger OOM alloc_frag.limit_fragments = 0; rx_frame_t frame = { 0 }; @@ -1359,62 +1355,51 @@ static void test_rx_slot_update(void) frame.meta.transfer_id = 789; frame.meta.transfer_payload_size = 5; - // OOM should not complete the transfer. - const bool done = - rx_slot_update(&slot, 5000, mem_frag, del_payload, &frame, 5, &errors_oom, &errors_transfer_malformed); + const rx_slot_update_result_t res = + rx_slot_update(slot, 5000, mem_frag, del_payload, &frame, 5, &errors_oom); - // Verify OOM error was counted - TEST_ASSERT_FALSE(done); + TEST_ASSERT_EQUAL(rx_slot_incomplete, res); TEST_ASSERT_EQUAL(1, errors_oom); - TEST_ASSERT_TRUE(slot.busy); // Slot initialized but fragment not added - TEST_ASSERT_EQUAL_size_t(0, slot.covered_prefix); // No fragments accepted - - // Restore allocation limit - alloc_frag.limit_fragments = SIZE_MAX; + TEST_ASSERT_EQUAL_size_t(0, slot->covered_prefix); - rx_slot_reset(&slot, mem_frag); + rx_slot_destroy(&slot, mem_frag, mem_slot); + TEST_ASSERT_NULL(slot); } instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_slot); instrumented_allocator_reset(&alloc_payload); - // Test 4: Malformed transfer handling (CRC failure in rx_fragment_tree_finalize) + // Test 4: Malformed transfer handling (CRC failure). { - rx_slot_t slot = { 0 }; - errors_transfer_malformed = 0; + rx_slot_t* slot = rx_slot_new(mem_slot); + TEST_ASSERT_NOT_NULL(slot); + errors_oom = 0; - // Single-frame transfer with incorrect CRC rx_frame_t frame = { 0 }; frame.base = make_frame_base(mem_payload, 0, 4, "test"); frame.base.crc = 0xDEADBEEF; // Incorrect CRC frame.meta.transfer_id = 999; frame.meta.transfer_payload_size = 4; - // CRC failure should reset the slot and report malformed. - const bool done = - rx_slot_update(&slot, 6000, mem_frag, del_payload, &frame, 4, &errors_oom, &errors_transfer_malformed); + const rx_slot_update_result_t res = + rx_slot_update(slot, 6000, mem_frag, del_payload, &frame, 4, &errors_oom); - // Verify malformed error was counted and slot was reset - TEST_ASSERT_FALSE(done); - TEST_ASSERT_EQUAL(1, errors_transfer_malformed); - TEST_ASSERT_FALSE(slot.busy); // Slot reset after CRC failure - TEST_ASSERT_EQUAL_size_t(0, slot.covered_prefix); - TEST_ASSERT_NULL(slot.fragments); + TEST_ASSERT_EQUAL(rx_slot_failure, res); - rx_slot_reset(&slot, mem_frag); + rx_slot_destroy(&slot, mem_frag, mem_slot); + TEST_ASSERT_NULL(slot); } instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_slot); instrumented_allocator_reset(&alloc_payload); - // Test 5: Successful completion with correct CRC (tree_res == rx_fragment_tree_done, CRC pass) + // Test 5: Successful completion with correct CRC. { - rx_slot_t slot = { 0 }; - errors_transfer_malformed = 0; - errors_oom = 0; - - // Single-frame transfer with correct CRC - // CRC calculation for "test": using Python pycyphal.transport.commons.crc.CRC32C - // >>> from pycyphal.transport.commons.crc import CRC32C - // >>> hex(CRC32C.new(b"test").value) + rx_slot_t* slot = rx_slot_new(mem_slot); + TEST_ASSERT_NOT_NULL(slot); + errors_oom = 0; + + // CRC value computed from "test". const uint32_t correct_crc = 0x86a072c0UL; rx_frame_t frame = { 0 }; @@ -1423,88 +1408,81 @@ static void test_rx_slot_update(void) frame.meta.transfer_id = 1111; frame.meta.transfer_payload_size = 4; - // Correct CRC should complete the transfer. - const bool done = - rx_slot_update(&slot, 7000, mem_frag, del_payload, &frame, 4, &errors_oom, &errors_transfer_malformed); + const rx_slot_update_result_t res = + rx_slot_update(slot, 7000, mem_frag, del_payload, &frame, 4, &errors_oom); - // Verify successful completion - TEST_ASSERT_TRUE(done); - TEST_ASSERT_EQUAL(0, errors_transfer_malformed); - TEST_ASSERT_FALSE(slot.busy); // Successfully completed - TEST_ASSERT_EQUAL_size_t(4, slot.covered_prefix); - TEST_ASSERT_NOT_NULL(slot.fragments); + TEST_ASSERT_EQUAL(rx_slot_complete, res); + TEST_ASSERT_EQUAL(0, errors_oom); + TEST_ASSERT_EQUAL_size_t(4, slot->covered_prefix); + TEST_ASSERT_NOT_NULL(slot->fragments); - rx_slot_reset(&slot, mem_frag); + rx_slot_destroy(&slot, mem_frag, mem_slot); + TEST_ASSERT_NULL(slot); } instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_slot); instrumented_allocator_reset(&alloc_payload); - // Test 6: CRC end update only when crc_end >= slot->crc_end + // Test 6: CRC end update rules. { - rx_slot_t slot = { 0 }; - errors_transfer_malformed = 0; - errors_oom = 0; + rx_slot_t* slot = rx_slot_new(mem_slot); + TEST_ASSERT_NOT_NULL(slot); + errors_oom = 0; - // Frame 1 at offset 5 (will set crc_end to 10) rx_frame_t frame1 = { 0 }; frame1.base = make_frame_base(mem_payload, 5, 5, "world"); frame1.base.crc = 0xAAAAAAAA; frame1.meta.transfer_id = 2222; frame1.meta.transfer_payload_size = 20; - // First frame initializes CRC tracking. - const bool done1 = - rx_slot_update(&slot, 8000, mem_frag, del_payload, &frame1, 20, &errors_oom, &errors_transfer_malformed); + const rx_slot_update_result_t res1 = + rx_slot_update(slot, 8000, mem_frag, del_payload, &frame1, 20, &errors_oom); - TEST_ASSERT_FALSE(done1); - TEST_ASSERT_TRUE(slot.busy); - TEST_ASSERT_EQUAL(10, slot.crc_end); - TEST_ASSERT_EQUAL(0xAAAAAAAA, slot.crc); + TEST_ASSERT_EQUAL(rx_slot_incomplete, res1); + TEST_ASSERT_EQUAL(10, slot->crc_end); + TEST_ASSERT_EQUAL(0xAAAAAAAA, slot->crc); - // Frame 2 at offset 0 (crc_end would be 3, less than current 10, so CRC shouldn't update) rx_frame_t frame2 = { 0 }; frame2.base = make_frame_base(mem_payload, 0, 3, "abc"); frame2.base.crc = 0xBBBBBBBB; frame2.meta.transfer_id = 2222; frame2.meta.transfer_payload_size = 20; - // Earlier CRC end should not update tracking. - const bool done2 = - rx_slot_update(&slot, 8100, mem_frag, del_payload, &frame2, 20, &errors_oom, &errors_transfer_malformed); + const rx_slot_update_result_t res2 = + rx_slot_update(slot, 8100, mem_frag, del_payload, &frame2, 20, &errors_oom); - TEST_ASSERT_FALSE(done2); - TEST_ASSERT_TRUE(slot.busy); - TEST_ASSERT_EQUAL(10, slot.crc_end); // Unchanged - TEST_ASSERT_EQUAL(0xAAAAAAAA, slot.crc); // Unchanged (frame2 didn't update it) + TEST_ASSERT_EQUAL(rx_slot_incomplete, res2); + TEST_ASSERT_EQUAL(10, slot->crc_end); + TEST_ASSERT_EQUAL(0xAAAAAAAA, slot->crc); - // Frame 3 at offset 10 (crc_end would be 15, greater than current 10, so CRC should update) rx_frame_t frame3 = { 0 }; frame3.base = make_frame_base(mem_payload, 10, 5, "hello"); frame3.base.crc = 0xCCCCCCCC; frame3.meta.transfer_id = 2222; frame3.meta.transfer_payload_size = 20; - // Later CRC end should update tracking. - const bool done3 = - rx_slot_update(&slot, 8200, mem_frag, del_payload, &frame3, 20, &errors_oom, &errors_transfer_malformed); + const rx_slot_update_result_t res3 = + rx_slot_update(slot, 8200, mem_frag, del_payload, &frame3, 20, &errors_oom); - TEST_ASSERT_FALSE(done3); - TEST_ASSERT_TRUE(slot.busy); - TEST_ASSERT_EQUAL(15, slot.crc_end); // Updated - TEST_ASSERT_EQUAL(0xCCCCCCCC, slot.crc); // Updated + TEST_ASSERT_EQUAL(rx_slot_incomplete, res3); + TEST_ASSERT_EQUAL(15, slot->crc_end); + TEST_ASSERT_EQUAL(0xCCCCCCCC, slot->crc); - rx_slot_reset(&slot, mem_frag); + rx_slot_destroy(&slot, mem_frag, mem_slot); + TEST_ASSERT_NULL(slot); } instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_slot); instrumented_allocator_reset(&alloc_payload); - // Test 7: Inconsistent frame fields; suspicious transfer rejected. + // Test 7: Inconsistent frame fields. { - rx_slot_t slot = { 0 }; - errors_transfer_malformed = 0; - errors_oom = 0; + errors_oom = 0; + + // Total size mismatch. + rx_slot_t* slot = rx_slot_new(mem_slot); + TEST_ASSERT_NOT_NULL(slot); - // First frame initializes the slot with transfer_payload_size=20 and priority=udpard_prio_high rx_frame_t frame1 = { 0 }; frame1.base = make_frame_base(mem_payload, 0, 5, "hello"); frame1.base.crc = 0x12345678; @@ -1512,40 +1490,28 @@ static void test_rx_slot_update(void) frame1.meta.transfer_payload_size = 20; frame1.meta.priority = udpard_prio_high; - // First frame initializes the slot. - const bool done1 = - rx_slot_update(&slot, 9000, mem_frag, del_payload, &frame1, 20, &errors_oom, &errors_transfer_malformed); - - TEST_ASSERT_FALSE(done1); - TEST_ASSERT_TRUE(slot.busy); - TEST_ASSERT_EQUAL(20, slot.total_size); - TEST_ASSERT_EQUAL(udpard_prio_high, slot.priority); - TEST_ASSERT_EQUAL_size_t(5, slot.covered_prefix); - TEST_ASSERT_EQUAL(0, errors_transfer_malformed); + const rx_slot_update_result_t res1 = + rx_slot_update(slot, 9000, mem_frag, del_payload, &frame1, 20, &errors_oom); + TEST_ASSERT_EQUAL(rx_slot_incomplete, res1); - // Second frame with DIFFERENT transfer_payload_size (should trigger the branch and reset the slot) rx_frame_t frame2 = { 0 }; frame2.base = make_frame_base(mem_payload, 5, 5, "world"); frame2.base.crc = 0xABCDEF00; frame2.meta.transfer_id = 3333; - frame2.meta.transfer_payload_size = 25; // DIFFERENT from frame1's 20 + frame2.meta.transfer_payload_size = 25; frame2.meta.priority = udpard_prio_high; - // Inconsistent total_size should reset the slot. - const bool done2 = - rx_slot_update(&slot, 9100, mem_frag, del_payload, &frame2, 25, &errors_oom, &errors_transfer_malformed); + const rx_slot_update_result_t res2 = + rx_slot_update(slot, 9100, mem_frag, del_payload, &frame2, 25, &errors_oom); + TEST_ASSERT_EQUAL(rx_slot_failure, res2); - // Verify that the malformed error was counted and slot was reset - TEST_ASSERT_FALSE(done2); - TEST_ASSERT_EQUAL(1, errors_transfer_malformed); - TEST_ASSERT_FALSE(slot.busy); // Slot reset due to inconsistent total_size - TEST_ASSERT_EQUAL_size_t(0, slot.covered_prefix); - TEST_ASSERT_NULL(slot.fragments); + rx_slot_destroy(&slot, mem_frag, mem_slot); + TEST_ASSERT_NULL(slot); - // Reset counters - errors_transfer_malformed = 0; + // Priority mismatch. + slot = rx_slot_new(mem_slot); + TEST_ASSERT_NOT_NULL(slot); - // Third frame initializes the slot again with transfer_payload_size=30 and priority=udpard_prio_low rx_frame_t frame3 = { 0 }; frame3.base = make_frame_base(mem_payload, 0, 5, "test1"); frame3.base.crc = 0x11111111; @@ -1553,40 +1519,28 @@ static void test_rx_slot_update(void) frame3.meta.transfer_payload_size = 30; frame3.meta.priority = udpard_prio_low; - // Reinitialize after reset. - const bool done3 = - rx_slot_update(&slot, 9200, mem_frag, del_payload, &frame3, 30, &errors_oom, &errors_transfer_malformed); + const rx_slot_update_result_t res3 = + rx_slot_update(slot, 9200, mem_frag, del_payload, &frame3, 30, &errors_oom); + TEST_ASSERT_EQUAL(rx_slot_incomplete, res3); - TEST_ASSERT_FALSE(done3); - TEST_ASSERT_TRUE(slot.busy); - TEST_ASSERT_EQUAL(30, slot.total_size); - TEST_ASSERT_EQUAL(udpard_prio_low, slot.priority); - TEST_ASSERT_EQUAL_size_t(5, slot.covered_prefix); - TEST_ASSERT_EQUAL(0, errors_transfer_malformed); - - // Fourth frame with DIFFERENT priority (should trigger the branch and reset the slot) rx_frame_t frame4 = { 0 }; frame4.base = make_frame_base(mem_payload, 5, 5, "test2"); frame4.base.crc = 0x22222222; frame4.meta.transfer_id = 4444; - frame4.meta.transfer_payload_size = 30; // Same as frame3 - frame4.meta.priority = udpard_prio_high; // DIFFERENT from frame3's udpard_prio_low + frame4.meta.transfer_payload_size = 30; + frame4.meta.priority = udpard_prio_high; - // Inconsistent priority should reset the slot. - const bool done4 = - rx_slot_update(&slot, 9300, mem_frag, del_payload, &frame4, 30, &errors_oom, &errors_transfer_malformed); + const rx_slot_update_result_t res4 = + rx_slot_update(slot, 9300, mem_frag, del_payload, &frame4, 30, &errors_oom); + TEST_ASSERT_EQUAL(rx_slot_failure, res4); - // Verify that the malformed error was counted and slot was reset - TEST_ASSERT_FALSE(done4); - TEST_ASSERT_EQUAL(1, errors_transfer_malformed); - TEST_ASSERT_FALSE(slot.busy); // Slot reset due to inconsistent priority - TEST_ASSERT_EQUAL_size_t(0, slot.covered_prefix); - TEST_ASSERT_NULL(slot.fragments); + rx_slot_destroy(&slot, mem_frag, mem_slot); + TEST_ASSERT_NULL(slot); - // Reset counters - errors_transfer_malformed = 0; + // Total size and priority mismatch. + slot = rx_slot_new(mem_slot); + TEST_ASSERT_NOT_NULL(slot); - // Fifth frame initializes the slot again rx_frame_t frame5 = { 0 }; frame5.base = make_frame_base(mem_payload, 0, 5, "test3"); frame5.base.crc = 0x33333333; @@ -1594,43 +1548,31 @@ static void test_rx_slot_update(void) frame5.meta.transfer_payload_size = 40; frame5.meta.priority = udpard_prio_nominal; - // Reinitialize after reset. - const bool done5 = - rx_slot_update(&slot, 9400, mem_frag, del_payload, &frame5, 40, &errors_oom, &errors_transfer_malformed); - - TEST_ASSERT_FALSE(done5); - TEST_ASSERT_TRUE(slot.busy); - TEST_ASSERT_EQUAL(40, slot.total_size); - TEST_ASSERT_EQUAL(udpard_prio_nominal, slot.priority); - TEST_ASSERT_EQUAL_size_t(5, slot.covered_prefix); - TEST_ASSERT_EQUAL(0, errors_transfer_malformed); + const rx_slot_update_result_t res5 = + rx_slot_update(slot, 9400, mem_frag, del_payload, &frame5, 40, &errors_oom); + TEST_ASSERT_EQUAL(rx_slot_incomplete, res5); - // Sixth frame with BOTH different transfer_payload_size AND priority (should still trigger the branch) rx_frame_t frame6 = { 0 }; frame6.base = make_frame_base(mem_payload, 5, 5, "test4"); frame6.base.crc = 0x44444444; frame6.meta.transfer_id = 5555; - frame6.meta.transfer_payload_size = 50; // DIFFERENT from frame5's 40 - frame6.meta.priority = udpard_prio_fast; // DIFFERENT from frame5's udpard_prio_nominal - - // Inconsistent priority and total_size should reset the slot. - const bool done6 = - rx_slot_update(&slot, 9500, mem_frag, del_payload, &frame6, 50, &errors_oom, &errors_transfer_malformed); + frame6.meta.transfer_payload_size = 50; + frame6.meta.priority = udpard_prio_fast; - // Verify that the malformed error was counted and slot was reset - TEST_ASSERT_FALSE(done6); - TEST_ASSERT_EQUAL(1, errors_transfer_malformed); - TEST_ASSERT_FALSE(slot.busy); // Slot reset due to both inconsistencies - TEST_ASSERT_EQUAL_size_t(0, slot.covered_prefix); - TEST_ASSERT_NULL(slot.fragments); + const rx_slot_update_result_t res6 = + rx_slot_update(slot, 9500, mem_frag, del_payload, &frame6, 50, &errors_oom); + TEST_ASSERT_EQUAL(rx_slot_failure, res6); - rx_slot_reset(&slot, mem_frag); + rx_slot_destroy(&slot, mem_frag, mem_slot); + TEST_ASSERT_NULL(slot); } instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_slot); instrumented_allocator_reset(&alloc_payload); - // Verify no memory leaks + // Verify no memory leaks. TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_slot.allocated_fragments); TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); } @@ -1770,7 +1712,7 @@ static void test_rx_ack_enqueued(void) const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; + const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session, .slot = mem_session }; tx_fixture_t tx_fix = { 0 }; tx_fixture_init(&tx_fix, 0xBADC0FFEE0DDF00DULL, 8); @@ -1856,7 +1798,7 @@ static void test_rx_session_unordered(void) const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; + const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session, .slot = mem_session }; udpard_rx_t rx; udpard_rx_new(&rx, NULL); @@ -1996,7 +1938,7 @@ static void test_rx_session_unordered_reject_old(void) instrumented_allocator_new(&alloc_payload); const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; + const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session, .slot = mem_session }; tx_fixture_t tx_fix = { 0 }; tx_fixture_init(&tx_fix, 0xF00DCAFEF00DCAFEULL, 4); @@ -2099,7 +2041,7 @@ static void test_rx_session_unordered_duplicates(void) const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; + const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session, .slot = mem_session }; udpard_rx_t rx; udpard_rx_new(&rx, NULL); @@ -2173,7 +2115,7 @@ static void test_rx_port(void) const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; + const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session, .slot = mem_session }; udpard_rx_t rx; udpard_rx_new(&rx, NULL); @@ -2239,7 +2181,7 @@ static void test_rx_port_timeouts(void) const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; + const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session, .slot = mem_session }; udpard_rx_t rx; udpard_rx_new(&rx, NULL); @@ -2301,7 +2243,7 @@ static void test_rx_port_oom(void) const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; + const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session, .slot = mem_session }; udpard_rx_t rx; udpard_rx_new(&rx, NULL); @@ -2357,7 +2299,7 @@ static void test_rx_port_free_loop(void) const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; + const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session, .slot = mem_session }; udpard_rx_t rx; udpard_rx_new(&rx, NULL); @@ -2449,6 +2391,7 @@ static void test_rx_additional_coverage(void) instrumented_allocator_new(&alloc_frag); instrumented_allocator_new(&alloc_ses); const udpard_rx_mem_resources_t mem = { .session = instrumented_allocator_make_resource(&alloc_ses), + .slot = instrumented_allocator_make_resource(&alloc_ses), .fragment = instrumented_allocator_make_resource(&alloc_frag) }; // Memory validation rejects missing hooks. const udpard_mem_vtable_t vtable_no_free = { .base = { .free = NULL }, .alloc = dummy_alloc }; @@ -2463,6 +2406,11 @@ static void test_rx_additional_coverage(void) TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_mem)); bad_mem.fragment.vtable = &vtable_no_alloc; TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_mem)); + bad_mem = mem; + bad_mem.slot.vtable = &vtable_no_free; + TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_mem)); + bad_mem.slot.vtable = &vtable_no_alloc; + TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_mem)); // Session helpers and free paths. const udpard_rx_port_vtable_t vtb = { .on_message = stub_on_message }; @@ -2488,7 +2436,7 @@ static void test_rx_additional_coverage(void) TEST_ASSERT_EQUAL(1, cavl_compare_rx_session_by_remote_uid(&(uint64_t){ 100 }, &ses->index_remote_uid)); rx_session_free(ses, &anim_list); - // Slot acquisition covers stale busy and eviction. + // Slot acquisition covers stale cleanup and eviction. udpard_rx_t rx = { 0 }; rx_session_t ses_slots; mem_zero(sizeof(ses_slots), &ses_slots); @@ -2497,17 +2445,31 @@ static void test_rx_additional_coverage(void) for (size_t i = 0; i < RX_TRANSFER_HISTORY_COUNT; i++) { ses_slots.history[i] = 1; } - ses_slots.slots[0].busy = true; - ses_slots.slots[0].ts_max = 0; - ses_slots.slots[0].transfer_id = 1; - rx_slot_t* slot = rx_session_get_slot(&ses_slots, SESSION_LIFETIME + 1, 99); - TEST_ASSERT_NOT_NULL(slot); + // Allocate one slot to simulate a stale in-progress transfer. + ses_slots.slots[0] = rx_slot_new(mem.slot); + TEST_ASSERT_NOT_NULL(ses_slots.slots[0]); + ses_slots.slots[0]->ts_max = 0; + ses_slots.slots[0]->transfer_id = 1; + rx_slot_t** slot_ref = rx_session_get_slot(&ses_slots, SESSION_LIFETIME + 1, 99); + TEST_ASSERT_NOT_NULL(slot_ref); + TEST_ASSERT_NOT_NULL(*slot_ref); + // Fill all slots to exercise eviction. for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - ses_slots.slots[i].busy = true; - ses_slots.slots[i].ts_max = 10 + (udpard_us_t)i; + if (ses_slots.slots[i] == NULL) { + ses_slots.slots[i] = rx_slot_new(mem.slot); + } + TEST_ASSERT_NOT_NULL(ses_slots.slots[i]); + ses_slots.slots[i]->ts_max = 10 + (udpard_us_t)i; + } + slot_ref = rx_session_get_slot(&ses_slots, 50, 2); + TEST_ASSERT_NOT_NULL(slot_ref); + TEST_ASSERT_NOT_NULL(*slot_ref); + // Release slot allocations from the helper session. + for (size_t i = 0; i < RX_SLOT_COUNT; i++) { + if (ses_slots.slots[i] != NULL) { + rx_slot_destroy(&ses_slots.slots[i], mem.fragment, mem.slot); + } } - slot = rx_session_get_slot(&ses_slots, 50, 2); - TEST_ASSERT_NOT_NULL(slot); // Stateless accept success, OOM, malformed. udpard_rx_port_t port_stateless = { 0 }; From 7e5d7c9c8d6ab95553cdf45f7f1465d6b6e40919 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Tue, 3 Feb 2026 15:58:33 +0200 Subject: [PATCH 12/13] format --- tests/src/test_intrusive_rx.c | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) diff --git a/tests/src/test_intrusive_rx.c b/tests/src/test_intrusive_rx.c index d5ebe6d..b16d560 100644 --- a/tests/src/test_intrusive_rx.c +++ b/tests/src/test_intrusive_rx.c @@ -1261,8 +1261,7 @@ static void test_rx_slot_update(void) const udpard_us_t ts = 1000; - const rx_slot_update_result_t res = - rx_slot_update(slot, ts, mem_frag, del_payload, &frame, 5, &errors_oom); + const rx_slot_update_result_t res = rx_slot_update(slot, ts, mem_frag, del_payload, &frame, 5, &errors_oom); TEST_ASSERT_EQUAL(rx_slot_complete, res); TEST_ASSERT_EQUAL(123, slot->transfer_id); @@ -1289,9 +1288,8 @@ static void test_rx_slot_update(void) frame1.meta.transfer_id = 456; frame1.meta.transfer_payload_size = 10; - const udpard_us_t ts1 = 2000; - const rx_slot_update_result_t res1 = - rx_slot_update(slot, ts1, mem_frag, del_payload, &frame1, 10, &errors_oom); + const udpard_us_t ts1 = 2000; + const rx_slot_update_result_t res1 = rx_slot_update(slot, ts1, mem_frag, del_payload, &frame1, 10, &errors_oom); TEST_ASSERT_EQUAL(rx_slot_incomplete, res1); TEST_ASSERT_EQUAL(ts1, slot->ts_min); @@ -1306,9 +1304,8 @@ static void test_rx_slot_update(void) frame2.meta.transfer_id = 456; frame2.meta.transfer_payload_size = 10; - const udpard_us_t ts2 = 3000; - const rx_slot_update_result_t res2 = - rx_slot_update(slot, ts2, mem_frag, del_payload, &frame2, 10, &errors_oom); + const udpard_us_t ts2 = 3000; + const rx_slot_update_result_t res2 = rx_slot_update(slot, ts2, mem_frag, del_payload, &frame2, 10, &errors_oom); TEST_ASSERT_EQUAL(rx_slot_incomplete, res2); TEST_ASSERT_EQUAL(ts1, slot->ts_min); @@ -1323,9 +1320,8 @@ static void test_rx_slot_update(void) frame3.meta.transfer_id = 456; frame3.meta.transfer_payload_size = 10; - const udpard_us_t ts3 = 1500; - const rx_slot_update_result_t res3 = - rx_slot_update(slot, ts3, mem_frag, del_payload, &frame3, 10, &errors_oom); + const udpard_us_t ts3 = 1500; + const rx_slot_update_result_t res3 = rx_slot_update(slot, ts3, mem_frag, del_payload, &frame3, 10, &errors_oom); TEST_ASSERT_EQUAL(rx_slot_incomplete, res3); TEST_ASSERT_EQUAL(ts3, slot->ts_min); @@ -1355,8 +1351,7 @@ static void test_rx_slot_update(void) frame.meta.transfer_id = 789; frame.meta.transfer_payload_size = 5; - const rx_slot_update_result_t res = - rx_slot_update(slot, 5000, mem_frag, del_payload, &frame, 5, &errors_oom); + const rx_slot_update_result_t res = rx_slot_update(slot, 5000, mem_frag, del_payload, &frame, 5, &errors_oom); TEST_ASSERT_EQUAL(rx_slot_incomplete, res); TEST_ASSERT_EQUAL(1, errors_oom); @@ -1381,8 +1376,7 @@ static void test_rx_slot_update(void) frame.meta.transfer_id = 999; frame.meta.transfer_payload_size = 4; - const rx_slot_update_result_t res = - rx_slot_update(slot, 6000, mem_frag, del_payload, &frame, 4, &errors_oom); + const rx_slot_update_result_t res = rx_slot_update(slot, 6000, mem_frag, del_payload, &frame, 4, &errors_oom); TEST_ASSERT_EQUAL(rx_slot_failure, res); @@ -1408,8 +1402,7 @@ static void test_rx_slot_update(void) frame.meta.transfer_id = 1111; frame.meta.transfer_payload_size = 4; - const rx_slot_update_result_t res = - rx_slot_update(slot, 7000, mem_frag, del_payload, &frame, 4, &errors_oom); + const rx_slot_update_result_t res = rx_slot_update(slot, 7000, mem_frag, del_payload, &frame, 4, &errors_oom); TEST_ASSERT_EQUAL(rx_slot_complete, res); TEST_ASSERT_EQUAL(0, errors_oom); @@ -2406,7 +2399,7 @@ static void test_rx_additional_coverage(void) TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_mem)); bad_mem.fragment.vtable = &vtable_no_alloc; TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_mem)); - bad_mem = mem; + bad_mem = mem; bad_mem.slot.vtable = &vtable_no_free; TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_mem)); bad_mem.slot.vtable = &vtable_no_alloc; From 98b85b3ae95bf26088e7ff3a8b347e6be063e6be Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Tue, 3 Feb 2026 16:08:19 +0200 Subject: [PATCH 13/13] coverage --- tests/src/test_intrusive_rx.c | 142 ++++++++++++++++++++++++++++++++++ 1 file changed, 142 insertions(+) diff --git a/tests/src/test_intrusive_rx.c b/tests/src/test_intrusive_rx.c index b16d560..cdeaffa 100644 --- a/tests/src/test_intrusive_rx.c +++ b/tests/src/test_intrusive_rx.c @@ -2095,6 +2095,86 @@ static void test_rx_session_unordered_duplicates(void) instrumented_allocator_reset(&alloc_payload); } +static void test_rx_session_malformed(void) +{ + // Malformed transfer increments error counter and drops slot. + instrumented_allocator_t alloc_frag = { 0 }; + instrumented_allocator_new(&alloc_frag); + instrumented_allocator_t alloc_session = { 0 }; + instrumented_allocator_new(&alloc_session); + instrumented_allocator_t alloc_slot = { 0 }; + instrumented_allocator_new(&alloc_slot); + instrumented_allocator_t alloc_payload = { 0 }; + instrumented_allocator_new(&alloc_payload); + const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); + const udpard_mem_t mem_slot = instrumented_allocator_make_resource(&alloc_slot); + const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); + const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); + const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session, .slot = mem_slot }; + + udpard_rx_t rx; + udpard_rx_new(&rx, NULL); + callback_result_t cb_result = { 0 }; + rx.user = &cb_result; + + udpard_rx_port_t port = { 0 }; + TEST_ASSERT(udpard_rx_port_new(&port, 64, rx_mem, &callbacks)); + + const uint64_t remote_uid = 0xABCDEF1234567890ULL; + rx_session_factory_args_t fac_args = { + .owner = &port, + .sessions_by_animation = &rx.list_session_by_animation, + .remote_uid = remote_uid, + .now = 0, + }; + rx_session_t* const ses = (rx_session_t*)cavl2_find_or_insert(&port.index_session_by_remote_uid, + &remote_uid, + &cavl_compare_rx_session_by_remote_uid, + &fac_args, + &cavl_factory_rx_session_by_remote_uid); + TEST_ASSERT_NOT_NULL(ses); + + meta_t meta = { .priority = udpard_prio_nominal, + .kind = frame_msg_best, + .transfer_payload_size = 8, + .transfer_id = 1, + .sender_uid = remote_uid }; + udpard_us_t now = 0; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1111 }, + make_frame_ptr(meta, mem_payload, "ABCDEFGH", 0, 4), + del_payload, + 0); + TEST_ASSERT_EQUAL_UINT64(0, rx.errors_transfer_malformed); + TEST_ASSERT_EQUAL(0, cb_result.message.count); + TEST_ASSERT_EQUAL_size_t(1, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(1, alloc_slot.allocated_fragments); + + meta.priority = udpard_prio_high; + now += 10; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1111 }, + make_frame_ptr(meta, mem_payload, "ABCDEFGH", 4, 4), + del_payload, + 0); + TEST_ASSERT_EQUAL_UINT64(1, rx.errors_transfer_malformed); + TEST_ASSERT_EQUAL(0, cb_result.message.count); + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_slot.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); + + udpard_rx_port_free(&rx, &port); + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_session); + instrumented_allocator_reset(&alloc_slot); + instrumented_allocator_reset(&alloc_payload); +} + static void test_rx_port(void) { // P2P ports behave like ordinary ports for payload delivery. @@ -2277,6 +2357,67 @@ static void test_rx_port_oom(void) instrumented_allocator_reset(&alloc_frag); instrumented_allocator_reset(&alloc_session); instrumented_allocator_reset(&alloc_payload); + + // Slot allocation failure should be reported gracefully. + instrumented_allocator_t alloc_frag_slot = { 0 }; + instrumented_allocator_new(&alloc_frag_slot); + instrumented_allocator_t alloc_session_slot = { 0 }; + instrumented_allocator_new(&alloc_session_slot); + instrumented_allocator_t alloc_slot = { 0 }; + instrumented_allocator_new(&alloc_slot); + alloc_slot.limit_fragments = 0; // force slot allocation failure + instrumented_allocator_t alloc_payload_slot = { 0 }; + instrumented_allocator_new(&alloc_payload_slot); + const udpard_mem_t mem_frag_slot = instrumented_allocator_make_resource(&alloc_frag_slot); + const udpard_mem_t mem_session_slot = instrumented_allocator_make_resource(&alloc_session_slot); + const udpard_mem_t mem_slot = instrumented_allocator_make_resource(&alloc_slot); + const udpard_mem_t mem_payload_slot = instrumented_allocator_make_resource(&alloc_payload_slot); + const udpard_deleter_t del_payload_slot = instrumented_allocator_make_deleter(&alloc_payload_slot); + const udpard_rx_mem_resources_t rx_mem_slot = { .fragment = mem_frag_slot, + .session = mem_session_slot, + .slot = mem_slot }; + + udpard_rx_t rx_slot; + udpard_rx_new(&rx_slot, NULL); + callback_result_t cb_result_slot = { 0 }; + rx_slot.user = &cb_result_slot; + + udpard_rx_port_t port_slot = { 0 }; + TEST_ASSERT(udpard_rx_port_new(&port_slot, 64, rx_mem_slot, &callbacks)); + + meta_t meta_slot = { .priority = udpard_prio_nominal, + .kind = frame_msg_best, + .transfer_payload_size = 4, + .transfer_id = 1, + .sender_uid = 0x0202020202020202ULL }; + rx_frame_t* frame_slot = make_frame_ptr(meta_slot, mem_payload_slot, "oom!", 0, 4); + const byte_t payload_slot[] = { 'o', 'o', 'm', '!' }; + byte_t dgram_slot[HEADER_SIZE_BYTES + sizeof(payload_slot)]; + header_serialize(dgram_slot, meta_slot, 0, 0, frame_slot->base.crc); + memcpy(dgram_slot + HEADER_SIZE_BYTES, payload_slot, sizeof(payload_slot)); + mem_free(mem_payload_slot, frame_slot->base.origin.size, frame_slot->base.origin.data); + void* payload_buf_slot = mem_res_alloc(mem_payload_slot, sizeof(dgram_slot)); + memcpy(payload_buf_slot, dgram_slot, sizeof(dgram_slot)); + + now = 0; + TEST_ASSERT(udpard_rx_port_push(&rx_slot, + &port_slot, + now, + (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, + (udpard_bytes_mut_t){ .data = payload_buf_slot, .size = sizeof(dgram_slot) }, + del_payload_slot, + 0)); + TEST_ASSERT_GREATER_THAN_UINT64(0, rx_slot.errors_oom); + TEST_ASSERT_EQUAL(1, alloc_session_slot.allocated_fragments); + TEST_ASSERT_EQUAL(0, alloc_slot.allocated_fragments); + TEST_ASSERT_EQUAL(0, alloc_frag_slot.allocated_fragments); + TEST_ASSERT_EQUAL(0, alloc_payload_slot.allocated_fragments); + udpard_rx_port_free(&rx_slot, &port_slot); + TEST_ASSERT_EQUAL(0, alloc_session_slot.allocated_fragments); + instrumented_allocator_reset(&alloc_frag_slot); + instrumented_allocator_reset(&alloc_session_slot); + instrumented_allocator_reset(&alloc_slot); + instrumented_allocator_reset(&alloc_payload_slot); } static void test_rx_port_free_loop(void) @@ -2548,6 +2689,7 @@ int main(void) RUN_TEST(test_rx_session_unordered); RUN_TEST(test_rx_session_unordered_reject_old); RUN_TEST(test_rx_session_unordered_duplicates); + RUN_TEST(test_rx_session_malformed); RUN_TEST(test_rx_port); RUN_TEST(test_rx_port_timeouts);