diff --git a/make/test/JtregNativeJdk.gmk b/make/test/JtregNativeJdk.gmk index b5abe5fdb921..91a5cbfabcd7 100644 --- a/make/test/JtregNativeJdk.gmk +++ b/make/test/JtregNativeJdk.gmk @@ -157,9 +157,21 @@ ifneq ($(filter build-test-jdk-jtreg-native, $(MAKECMDGOALS)), ) EXTRA_FILES := $(BUILD_JDK_JTREG_EXTRA_FILES), \ LIBS := $(LIBPTHREAD), \ )) + + # A special case for a bundled library that has multiple source files for the tests. + # This looks cleaner than simply merging the sources, as they do some preprocessor state 'management'... + $(eval $(call SetupJdkExecutable, BUILD_TEST_CArrayUtilTest, \ + NAME := CArrayUtilTest, \ + TYPE := EXECUTABLE, \ + SRC := $(TOPDIR)/test/jdk/jb/java/awt/vulkan/CArrayUtil/native, \ + EXTRA_FILES := $(TOPDIR)/src/java.desktop/share/native/common/java2d/vulkan/CArrayUtil.c, \ + CFLAGS := -I$(TOPDIR)/src/java.desktop/share/native/common/java2d/vulkan, \ + OUTPUT_DIR := $(BUILD_JDK_JTREG_OUTPUT_DIR)/bin, \ + OBJECT_DIR := $(BUILD_JDK_JTREG_OUTPUT_DIR)/support/CArrayUtilTest, \ + )) endif -build-test-jdk-jtreg-native: $(BUILD_JDK_JTREG_LIBRARIES) $(BUILD_JDK_JTREG_EXECUTABLES) +build-test-jdk-jtreg-native: $(BUILD_JDK_JTREG_LIBRARIES) $(BUILD_JDK_JTREG_EXECUTABLES) $(BUILD_TEST_CArrayUtilTest) ################################################################################ # Targets for building test-image. diff --git a/src/java.desktop/share/native/common/java2d/vulkan/CArrayUtil.c b/src/java.desktop/share/native/common/java2d/vulkan/CArrayUtil.c index 0a199932c0ba..097b5d4e3181 100644 --- a/src/java.desktop/share/native/common/java2d/vulkan/CArrayUtil.c +++ b/src/java.desktop/share/native/common/java2d/vulkan/CArrayUtil.c @@ -2,108 +2,73 @@ #include #include "CArrayUtil.h" -#if defined(_MSC_VER) -# include -# define ALIGNED_ALLOC(ALIGNMENT, SIZE) _aligned_malloc((SIZE), (ALIGNMENT)) -# define ALIGNED_FREE(PTR) _aligned_free(PTR) -#else -# include -# define ALIGNED_ALLOC(ALIGNMENT, SIZE) aligned_alloc((ALIGNMENT), (SIZE)) -# define ALIGNED_FREE(PTR) free(PTR) -#endif - -// === Allocation helpers === - -typedef struct { - size_t total_alignment; - size_t aligned_header_size; - void* new_data; -} CARR_context_t; - -static size_t CARR_align_size(size_t alignment, size_t size) { - // assert alignment is power of 2 - size_t alignment_mask = alignment - 1; - return (size + alignment_mask) & ~alignment_mask; -} - -static CARR_context_t CARR_context_init(size_t header_alignment, size_t header_size, size_t data_alignment) { - CARR_context_t context; - // assert header_alignment and data_alignment are powers of 2 - context.total_alignment = CARR_MAX(header_alignment, data_alignment); - // assert header_size is multiple of header_alignment - context.aligned_header_size = CARR_align_size(context.total_alignment, header_size); - context.new_data = NULL; - return context; -} - -static bool CARR_context_alloc(CARR_context_t* context, size_t data_size) { - void* block = ALIGNED_ALLOC(context->total_alignment, context->aligned_header_size + data_size); - if (block == NULL) return false; - context->new_data = (char*)block + context->aligned_header_size; - return true; -} - -static void CARR_context_free(CARR_context_t* context, void* old_data) { - if (old_data != NULL) { - void* block = (char*)old_data - context->aligned_header_size; - ALIGNED_FREE(block); - } -} - // === Arrays === -bool CARR_array_realloc(void** handle, size_t element_alignment, size_t element_size, size_t new_capacity) { - void* old_data = *handle; - if (old_data != NULL && CARR_ARRAY_T(old_data)->capacity == new_capacity) return true; - CARR_context_t context = CARR_context_init(alignof(CARR_array_t), sizeof(CARR_array_t), element_alignment); +bool CARR_untyped_array_realloc(untyped_array_t* array, size_t element_size, size_t new_capacity) { + if (array->capacity == new_capacity) { + return true; + } + + untyped_array_t new_array = { + .size = CARR_MIN(array->size, new_capacity), + .capacity = new_capacity, + .data = NULL + }; if (new_capacity != 0) { - if (!CARR_context_alloc(&context, element_size * new_capacity)) return false; - CARR_ARRAY_T(context.new_data)->capacity = new_capacity; - if (old_data == NULL) { - CARR_ARRAY_T(context.new_data)->size = 0; - } else { - CARR_ARRAY_T(context.new_data)->size = CARR_MIN(CARR_ARRAY_T(old_data)->size, new_capacity); - memcpy(context.new_data, old_data, element_size * CARR_ARRAY_T(context.new_data)->size); + new_array.data = malloc(element_size * new_capacity); + if (!new_array.data) { + return false; + } + + if (array->data != NULL) { + memcpy(new_array.data, array->data, element_size * new_array.size); } } - CARR_context_free(&context, old_data); - *handle = context.new_data; + + free(array->data); + *array = new_array; return true; } // === Ring buffers === -bool CARR_ring_buffer_realloc(void** handle, size_t element_alignment, size_t element_size, size_t new_capacity) { - void* old_data = *handle; - if (old_data != NULL) { - CARR_ring_buffer_t* old_buf = CARR_RING_BUFFER_T(old_data); - if (old_buf->capacity == new_capacity) return true; - // Shrinking is not supported. - if ((old_buf->capacity + old_buf->tail - old_buf->head) % old_buf->capacity > new_capacity) return false; +bool CARR_untyped_ring_buffer_realloc(untyped_ring_buffer_t* ring_buffer, size_t element_size, size_t new_capacity) { + if (ring_buffer->capacity == new_capacity) { + return true; } - CARR_context_t context = - CARR_context_init(alignof(CARR_ring_buffer_t), sizeof(CARR_ring_buffer_t), element_alignment); + + // Shrinking while discarding elements is not supported. + if (ring_buffer->size > new_capacity) { + return false; + } + + untyped_ring_buffer_t new_ring_buffer = { + .head_idx = 0, + .size = ring_buffer->size, + .capacity = new_capacity, + .data = NULL + }; if (new_capacity != 0) { - if (!CARR_context_alloc(&context, element_size * new_capacity)) return false; - CARR_ring_buffer_t* new_buf = CARR_RING_BUFFER_T(context.new_data); - new_buf->capacity = new_capacity; - new_buf->head = new_buf->tail = 0; - if (old_data != NULL) { - CARR_ring_buffer_t* old_buf = CARR_RING_BUFFER_T(old_data); - if (old_buf->tail > old_buf->head) { - new_buf->tail = old_buf->tail - old_buf->head; - memcpy(context.new_data, (char*)old_data + old_buf->head*element_size, new_buf->tail*element_size); - } else if (old_buf->tail < old_buf->head) { - new_buf->tail = old_buf->capacity + old_buf->tail - old_buf->head; - memcpy(context.new_data, (char*)old_data + old_buf->head*element_size, - (old_buf->capacity-old_buf->head)*element_size); - memcpy((char*)context.new_data + (new_buf->tail-old_buf->tail)*element_size, old_data, - old_buf->tail*element_size); + new_ring_buffer.data = malloc(element_size * new_capacity); + if (!new_ring_buffer.data) { + return false; + } + + if (ring_buffer->data != NULL) { + if (ring_buffer->head_idx + ring_buffer->size <= ring_buffer->capacity) { + // The 'single span' case + memcpy(new_ring_buffer.data, (char*)ring_buffer->data + ring_buffer->head_idx * element_size, ring_buffer->size * element_size); + } else { + // The 'two spans' case + const size_t first_span_size = ring_buffer->capacity - ring_buffer->head_idx; + memcpy(new_ring_buffer.data, (char*)ring_buffer->data + ring_buffer->head_idx * element_size, first_span_size * element_size); + memcpy((char*)new_ring_buffer.data + first_span_size * element_size, ring_buffer->data, (ring_buffer->size - first_span_size) * element_size); } } } - CARR_context_free(&context, old_data); - *handle = context.new_data; + + free(ring_buffer->data); + *ring_buffer = new_ring_buffer; return true; } @@ -119,42 +84,28 @@ static size_t CARR_hash_map_find_size(const size_t* table, unsigned int table_le } #define HASH_MAP_FIND_SIZE(TABLE, SIZE) CARR_hash_map_find_size(TABLE, SARRAY_COUNT_OF(TABLE), SIZE) -// Check whether memory chunk is non-zero. -static bool CARR_check_range(const void* p, size_t alignment, size_t size) { - switch (alignment) { - case sizeof(uint8_t): - case sizeof(uint16_t):{ - const uint8_t* data = p; - for (size_t i = 0; i < size; i++) { - if (data[i] != (uint8_t) 0) return true; - } - }break; - case sizeof(uint32_t):{ - size >>= 2; - const uint32_t* data = p; - for (size_t i = 0; i < size; i++) { - if (data[i] != (uint32_t) 0) return true; - } - }break; - default:{ - size >>= 3; - const uint64_t* data = p; - for (size_t i = 0; i < size; i++) { - if (data[i] != (uint64_t) 0) return true; - } - }break; +// Check whether the whole memory chunk is non-zero. +static bool CARR_check_range_is_nonzero(const void* data, size_t size) { + // Not sure if we need anything 'faster' here... + for (size_t i = 0; i < size; ++i) { + if (((const char*)data)[i] != 0) { + return true; + } } return false; } -static bool CARR_map_insert_all(CARR_MAP_LAYOUT_ARGS, void* src, void* dst) { - if (src == NULL) return true; - const CARR_map_dispatch_t* src_dispatch = ((const CARR_map_dispatch_t**)src)[-1]; - const CARR_map_dispatch_t* dst_dispatch = ((const CARR_map_dispatch_t**)dst)[-1]; - for (const void* key = NULL; (key = src_dispatch->next_key(CARR_MAP_LAYOUT_PASS, src, key)) != NULL;) { - const void* value = src_dispatch->find(CARR_MAP_LAYOUT_PASS, src, key, NULL, false); - void* new_value = dst_dispatch->find(CARR_MAP_LAYOUT_PASS, dst, key, NULL, true); - if (new_value == NULL) return false; // Cannot insert. +static bool CARR_map_insert_all(untyped_map_t* src_map, untyped_map_t* dst_map, size_t key_size, size_t value_size) { + if (src_map->vptr == NULL) { + return true; + } + + for (const void* key = NULL; (key = src_map->vptr->next_key(src_map, key_size, value_size, key)) != NULL;) { + const void* value = src_map->vptr->find(src_map, key_size, value_size, key, NULL, false); + void* new_value = dst_map->vptr->find(dst_map, key_size, value_size, key, NULL, true); + if (new_value == NULL) { + return false; // Cannot insert. + } memcpy(new_value, value, value_size); } return true; @@ -168,123 +119,166 @@ static bool CARR_map_insert_all(CARR_MAP_LAYOUT_ARGS, void* src, void* dst) { // only do "find or insert" and never delete elements. static const uint32_t CARR_hash_map_probing_rehash_bit = 0x80000000; static const uint32_t CARR_hash_map_probing_limit_mask = 0x7fffffff; -typedef struct { - size_t capacity; - size_t size; +typedef struct CARR_hash_map_probing_impl_data_struct { + void* key_data; + void* value_data; + uint32_t probing_limit; float load_factor; - void* null_key_slot; + void* zero_key_slot; // points to the all-zero key if one exists (to distinguish from a missing key) + CARR_equals_fp equals; CARR_hash_fp hash; - void* dispatch_placeholder; -} CARR_hash_map_probing_t; +} CARR_hash_map_probing_impl_data_t; -static inline void* CARR_hash_map_probing_value_for(CARR_MAP_LAYOUT_ARGS, const void* data, const void* key_slot) { - if (key_slot == NULL) return NULL; - CARR_hash_map_probing_t* map = (CARR_hash_map_probing_t*) data - 1; - size_t value_block_offset = CARR_align_size(value_alignment, key_size * map->capacity); - return (char*)data + value_block_offset + ((const char*)key_slot - (char*)data) / key_size * value_size; +static inline void* CARR_hash_map_probing_value_for(const untyped_map_t* map, size_t key_size, size_t value_size, const void* key_slot) { + if (key_slot == NULL) { + return NULL; + } + + CARR_hash_map_probing_impl_data_t* impl_data = map->impl_data; + return (char*)impl_data->value_data + ((const char*)key_slot - (char*)impl_data->key_data) / key_size * value_size; } -static size_t CARR_hash_map_probing_check_extra_capacity(CARR_hash_map_probing_t* map, size_t count) { +static size_t CARR_hash_map_probing_check_extra_capacity(const untyped_map_t* map, size_t count) { // Run length is a local metric, which directly correlate with lookup performance, // but can suffer from clustering, bad hash function, or bad luck. // Load factor is a global metric, which reflects "fullness", // but doesn't capture local effects, like clustering, // and is over-conservative for good distributions. // Therefore, we only rehash when both load factor and probing limit are exceeded. + CARR_hash_map_probing_impl_data_t* impl_data = map->impl_data; size_t new_capacity = map->size + count; if (new_capacity <= map->capacity) { - if (!(map->probing_limit & CARR_hash_map_probing_rehash_bit)) { // Rehashing not requested. + if (!(impl_data->probing_limit & CARR_hash_map_probing_rehash_bit)) { // Rehashing not requested. new_capacity = 0; - } else if (map->size < (size_t)(map->load_factor * (float)map->capacity)) { - map->probing_limit &= CARR_hash_map_probing_limit_mask; // Load factor too low, reset rehash flag. + } else if (map->size < (size_t)(impl_data->load_factor * (float)map->capacity)) { + impl_data->probing_limit &= CARR_hash_map_probing_limit_mask; // Load factor too low, reset rehash flag. new_capacity = 0; - } else new_capacity = map->capacity + 1; + } else { + new_capacity = map->capacity + 1; + } } return new_capacity; } -static const void* CARR_hash_map_probing_next_key(CARR_MAP_LAYOUT_ARGS, const void* data, const void* key_slot) { - CARR_hash_map_probing_t* map = (CARR_hash_map_probing_t*) data - 1; +static const void* CARR_hash_map_probing_next_key(const untyped_map_t* map, size_t key_size, size_t value_size, const void* key_slot) { + const CARR_hash_map_probing_impl_data_t* impl_data = map->impl_data; char* slot; - if (key_slot == NULL) slot = (char*)data; - else if (key_slot < data) return NULL; - else slot = (char*)key_slot + key_size; - char* limit = (char*)data + key_size * (map->capacity - 1); - for (; slot <= limit; slot += key_size) { - if (CARR_check_range(slot, key_alignment, key_size) || slot == map->null_key_slot) return slot; + if (key_slot == NULL) { + slot = impl_data->key_data; + } else { + slot = (char*)key_slot + key_size; + } + for (const char* key_data_end = (char*)impl_data->key_data + key_size * map->capacity; slot < key_data_end; slot += key_size) { + if (CARR_check_range_is_nonzero(slot, key_size) || slot == impl_data->zero_key_slot) { + return slot; + } } return NULL; } -static void CARR_hash_map_probing_clear(CARR_MAP_LAYOUT_ARGS, void* data) { - CARR_hash_map_probing_t* map = (CARR_hash_map_probing_t*) data - 1; - memset(data, 0, key_size * map->capacity); - map->probing_limit &= CARR_hash_map_probing_limit_mask; - map->null_key_slot = NULL; +static void CARR_hash_map_probing_clear(untyped_map_t* map, size_t key_size, size_t value_size) { map->size = 0; + + CARR_hash_map_probing_impl_data_t* impl_data = map->impl_data; + memset(impl_data->key_data, 0, key_size * map->capacity); + impl_data->probing_limit &= CARR_hash_map_probing_limit_mask; + impl_data->zero_key_slot = NULL; } -static void CARR_hash_map_probing_free(CARR_MAP_LAYOUT_ARGS, void* data) { - if (data == NULL) return; - CARR_context_t context = CARR_context_init(alignof(CARR_hash_map_probing_t), sizeof(CARR_hash_map_probing_t), - CARR_MAX(key_alignment, value_alignment)); - CARR_context_free(&context, data); +static void CARR_hash_map_probing_free(untyped_map_t* map, size_t key_size, size_t value_size) { + CARR_hash_map_probing_impl_data_t* impl_data = map->impl_data; + if (impl_data == NULL) { + return; + } + + free(impl_data->key_data); + free(impl_data->value_data); + free(impl_data); + *map = (untyped_map_t){0}; } // === Linear probing hash map === -static inline void CARR_hash_map_linear_probing_check_run(CARR_MAP_LAYOUT_ARGS, CARR_hash_map_probing_t* map, - const char* from, const char* to) { - if (map->probing_limit & CARR_hash_map_probing_rehash_bit) return; // Rehashing already requested. - if (map->size < (size_t)(map->load_factor * (float)map->capacity)) return; // Load factor too low. - ptrdiff_t offset = to - from; - if (to < from) offset += (ptrdiff_t)(map->capacity * key_size); - size_t run = (size_t)offset / key_size; - // Set rehash bit if our probing length exceeded the limit. - if (run > (size_t)map->probing_limit) map->probing_limit |= CARR_hash_map_probing_rehash_bit; +static inline void CARR_hash_map_linear_probing_check_run(untyped_map_t* map, size_t key_size, size_t value_size, + const char* occupied_begin, const char* occupied_end) { + CARR_hash_map_probing_impl_data_t* impl_data = map->impl_data; + if (impl_data->probing_limit & CARR_hash_map_probing_rehash_bit) { + return; // Rehashing already requested. + } + if (map->size < (size_t)(impl_data->load_factor * (float)map->capacity)) { + return; // Load factor too low. + } + ptrdiff_t offset = occupied_end - occupied_begin; + if (occupied_end < occupied_begin) { + offset += (ptrdiff_t)(map->capacity * key_size); + } + const size_t run = (size_t)offset / key_size; + // Set the rehash bit if our probing length exceeded the limit. + if (run > (size_t)impl_data->probing_limit) { + impl_data->probing_limit |= CARR_hash_map_probing_rehash_bit; + } } -static void* CARR_hash_map_linear_probing_find(CARR_MAP_LAYOUT_ARGS, - void* data, const void* key, const void** resolved_key, bool insert) { - CARR_hash_map_probing_t* map = (CARR_hash_map_probing_t*) data - 1; - char* wrap = (char*)data + key_size * map->capacity; - if (key >= data && key < (void*) wrap && ((const char*)key - (char*)data) % key_size == 0) { - // Try fast access for resolved key. - if (key == map->null_key_slot || CARR_check_range(key, key_alignment, key_size)) { - if (resolved_key != NULL) *resolved_key = key; - return CARR_hash_map_probing_value_for(CARR_MAP_LAYOUT_PASS, data, key); +static void* CARR_hash_map_linear_probing_find(untyped_map_t* map, size_t key_size, size_t value_size, + const void* key, const void** resolved_key, bool insert) { + CARR_hash_map_probing_impl_data_t* impl_data = map->impl_data; + + char* key_data_end = (char*)impl_data->key_data + key_size * map->capacity; + + // Resolved access path (`key` already points into the map): + // WARNING: THIS CHECK IS UNDEFINED BEHAVIOR! + // We are not really allowed by the C standard to check whether a pointer lies inside an array, but we're doing it anyway. + if (key >= impl_data->key_data && key < (void*)key_data_end && ((const char*)key - (char*)impl_data->key_data) % key_size == 0) { + // assert `key` is not an uninitialized slot (which would be a logical error anyway) + if (resolved_key != NULL) { + // We can discard const, since we now know the key is part of the map's (non-const) allocation. + *resolved_key = key; } + return CARR_hash_map_probing_value_for(map, key_size, value_size, key); } - size_t hash = map->hash(key); - char* start = (char*)data + key_size * (hash % map->capacity); - char* slot = start; + + // The general case: + + const size_t hash = impl_data->hash(key); + char* initial_slot = (char*)impl_data->key_data + key_size * (hash % map->capacity); + char* slot = initial_slot; for (;;) { - bool is_null = !CARR_check_range(slot, key_alignment, key_size); - if (map->equals(key, slot)) { + const bool is_null = !CARR_check_range_is_nonzero(slot, key_size); + if (impl_data->equals(key, slot)) { // Special case to distinguish null key from missing one. if (is_null) { - if (map->null_key_slot == NULL && insert) { - map->null_key_slot = slot; + if (impl_data->zero_key_slot == NULL && insert) { + impl_data->zero_key_slot = slot; break; // Insert. } - slot = map->null_key_slot; + slot = impl_data->zero_key_slot; } if (resolved_key != NULL) *resolved_key = slot; - return CARR_hash_map_probing_value_for(CARR_MAP_LAYOUT_PASS, data, slot); + return CARR_hash_map_probing_value_for(map, key_size, value_size, slot); } - if (is_null && slot != map->null_key_slot) { // Key not found. - if (insert) break; // Insert. - return resolved_key != NULL ? (void*)(*resolved_key = NULL) : NULL; + if (is_null && slot != impl_data->zero_key_slot) { // Key not found. + if (insert) { + break; // Insert. + } + if (resolved_key != NULL) { + *resolved_key = NULL; + } + return NULL; } slot += key_size; - if (slot == wrap) slot = (char*)data; - if (slot == start) { - return resolved_key != NULL ? (void*)(*resolved_key = NULL) : NULL; // We traversed the whole map. + if (slot == key_data_end) slot = (char*)impl_data->key_data; + if (slot == initial_slot) { + // We traversed the whole map. + if (resolved_key != NULL) { + *resolved_key = NULL; + } + return NULL; } } + // Insert. - void* value = CARR_hash_map_probing_value_for(CARR_MAP_LAYOUT_PASS, data, slot); + void* value = CARR_hash_map_probing_value_for(map, key_size, value_size, slot); memcpy(slot, key, key_size); // Copy key into slot. memset(value, 0, value_size); // Clear value. map->size++; @@ -292,74 +286,78 @@ static void* CARR_hash_map_linear_probing_find(CARR_MAP_LAYOUT_ARGS, *resolved_key = slot; value = NULL; // Indicate that value was just inserted. } - CARR_hash_map_linear_probing_check_run(CARR_MAP_LAYOUT_PASS, map, start, slot); + CARR_hash_map_linear_probing_check_run(map, key_size, value_size, initial_slot, slot); return value; } -static bool CARR_hash_map_linear_probing_remove(CARR_MAP_LAYOUT_ARGS, void* data, const void* key) { - char* key_slot; - CARR_hash_map_linear_probing_find(CARR_MAP_LAYOUT_PASS, data, key, (const void**) &key_slot, false); - if (key_slot == NULL) return false; - char* start = key_slot; - CARR_hash_map_probing_t* map = (CARR_hash_map_probing_t*) data - 1; - char* wrap = (char*)data + key_size * map->capacity; +static bool CARR_hash_map_linear_probing_remove(untyped_map_t* map, size_t key_size, size_t value_size, const void* key) { + const void* key_slot_void_ptr; + CARR_hash_map_linear_probing_find(map, key_size, value_size, key, &key_slot_void_ptr, false); + char* key_slot = (char*)key_slot_void_ptr; // It's ok to remove const from resolved key ptrs in this impl + if (key_slot == NULL) { + return false; + } + + CARR_hash_map_probing_impl_data_t* impl_data = map->impl_data; + const char* initial_slot = key_slot; + const char* key_data_end = (char*)impl_data->key_data + key_size * map->capacity; for (;;) { - if (map->null_key_slot == key_slot) map->null_key_slot = NULL; + if (impl_data->zero_key_slot == key_slot) { + impl_data->zero_key_slot = NULL; + } char* slot = key_slot; for (;;) { slot += key_size; - if (slot == wrap) slot = (char*)data; - if (slot == start || (!CARR_check_range(slot, key_alignment, key_size) && slot != map->null_key_slot)) { - memset(key_slot, 0, key_size); // Clear key slot. - CARR_hash_map_linear_probing_check_run(CARR_MAP_LAYOUT_PASS, map, start, slot); + if (slot == key_data_end) { + slot = (char*)impl_data->key_data; + } + if (slot == initial_slot || (!CARR_check_range_is_nonzero(slot, key_size) && slot != impl_data->zero_key_slot)) { + memset(key_slot, 0, key_size); // Clear the key slot. + CARR_hash_map_linear_probing_check_run(map, key_size, value_size, initial_slot, slot); return true; } - size_t hash = map->hash(slot); - char* expected_slot = (char*)data + key_size * (hash % map->capacity); + const size_t hash = impl_data->hash(slot); + const char* expected_slot = (char*)impl_data->key_data + key_size * (hash % map->capacity); if (slot >= expected_slot) { - if (key_slot >= expected_slot && key_slot <= slot) break; + if (key_slot >= expected_slot && key_slot <= slot) { + break; + } } else { - if (key_slot >= expected_slot || key_slot <= slot) break; + if (key_slot >= expected_slot || key_slot <= slot) { + break; + } } } // Move another entry into the gap. - if (map->null_key_slot == slot) map->null_key_slot = key_slot; + if (impl_data->zero_key_slot == slot) { + impl_data->zero_key_slot = key_slot; + } memcpy(key_slot, slot, key_size); - memcpy(CARR_hash_map_probing_value_for(CARR_MAP_LAYOUT_PASS, data, key_slot), - CARR_hash_map_probing_value_for(CARR_MAP_LAYOUT_PASS, data, slot), value_size); + memcpy(CARR_hash_map_probing_value_for(map, key_size, value_size, key_slot), + CARR_hash_map_probing_value_for(map, key_size, value_size, slot), value_size); key_slot = slot; // Repeat with the new entry. } } -static bool CARR_hash_map_linear_probing_ensure_extra_capacity(CARR_MAP_LAYOUT_ARGS, void** handle, size_t count) { - void* data = *handle; - CARR_hash_map_probing_t* map = (CARR_hash_map_probing_t*) data - 1; - size_t new_capacity = CARR_hash_map_probing_check_extra_capacity(map, count); - if (new_capacity == 0) return true; - return CARR_hash_map_linear_probing_rehash(CARR_MAP_LAYOUT_PASS, handle, map->equals, map->hash, new_capacity, - map->probing_limit & CARR_hash_map_probing_limit_mask, map->load_factor); +static bool CARR_hash_map_linear_probing_ensure_extra_capacity(untyped_map_t* map, size_t key_size, size_t value_size, size_t count) { + + const size_t new_capacity = CARR_hash_map_probing_check_extra_capacity(map, count); + if (new_capacity == 0) { + return true; + } + + CARR_hash_map_probing_impl_data_t* impl_data = map->impl_data; + return CARR_hash_map_linear_probing_rehash(map, key_size, value_size, impl_data->equals, impl_data->hash, new_capacity, + impl_data->probing_limit & CARR_hash_map_probing_limit_mask, impl_data->load_factor); } -bool CARR_hash_map_linear_probing_rehash(CARR_MAP_LAYOUT_ARGS, void** handle, CARR_equals_fp equals, CARR_hash_fp hash, +bool CARR_hash_map_linear_probing_rehash(untyped_map_t* map, size_t key_size, size_t value_size, CARR_equals_fp equals, CARR_hash_fp hash, size_t new_capacity, uint32_t probing_limit, float load_factor) { - size_t table_capacity = HASH_MAP_FIND_SIZE(CARR_hash_map_primes, new_capacity); - if (table_capacity != 0) new_capacity = table_capacity; + const size_t table_capacity = HASH_MAP_FIND_SIZE(CARR_hash_map_primes, new_capacity); + if (table_capacity != 0) { + new_capacity = table_capacity; + } - CARR_context_t context = CARR_context_init(alignof(CARR_hash_map_probing_t), sizeof(CARR_hash_map_probing_t), - CARR_MAX(key_alignment, value_alignment)); - size_t value_block_offset = CARR_align_size(value_alignment, key_size * new_capacity); - if (!CARR_context_alloc(&context, value_block_offset + value_size * new_capacity)) return false; - - CARR_hash_map_probing_t* map = (CARR_hash_map_probing_t*) context.new_data - 1; - *map = (CARR_hash_map_probing_t) { - .capacity = new_capacity, - .size = 0, - .probing_limit = CARR_MIN(probing_limit, CARR_hash_map_probing_limit_mask), - .load_factor = load_factor, - .null_key_slot = NULL, - .equals = equals, - .hash = hash - }; static const CARR_map_dispatch_t dispatch = { &CARR_hash_map_probing_next_key, &CARR_hash_map_linear_probing_find, @@ -368,15 +366,56 @@ bool CARR_hash_map_linear_probing_rehash(CARR_MAP_LAYOUT_ARGS, void** handle, CA &CARR_hash_map_probing_clear, &CARR_hash_map_probing_free, }; - ((const CARR_map_dispatch_t**)context.new_data)[-1] = &dispatch; + untyped_map_t new_map = { + .size = 0, + .capacity = new_capacity, + .vptr = &dispatch, + .impl_data = NULL, + .scratch_key_ptr = NULL, + .scratch_value_ptr = NULL + }; + CARR_hash_map_probing_impl_data_t* new_impl_data = malloc(sizeof(CARR_hash_map_probing_impl_data_t)); + if (new_impl_data == NULL) { + goto error_alloc_impl_data; + } + *new_impl_data = (CARR_hash_map_probing_impl_data_t){ + .key_data = NULL, + .value_data = NULL, + .probing_limit = CARR_MIN(probing_limit, CARR_hash_map_probing_limit_mask), + .load_factor = load_factor, + .zero_key_slot = NULL, + .equals = equals, + .hash = hash + }; + new_map.impl_data = new_impl_data; - CARR_hash_map_probing_clear(CARR_MAP_LAYOUT_PASS, context.new_data); - if (!CARR_map_insert_all(CARR_MAP_LAYOUT_PASS, *handle, context.new_data)) { - CARR_context_free(&context, context.new_data); - return false; + new_impl_data->key_data = malloc(key_size * new_capacity); + if (new_impl_data->key_data == NULL) { + goto error_alloc_key_data; } - if (*handle != NULL) ((const CARR_map_dispatch_t**)*handle)[-1]->free(CARR_MAP_LAYOUT_PASS, *handle); - *handle = context.new_data; + new_impl_data->value_data = malloc(value_size * new_capacity); + if (new_impl_data->value_data == NULL) { + goto error_alloc_value_data; + } + + CARR_hash_map_probing_clear(&new_map, key_size, value_size); + if (!CARR_map_insert_all(map, &new_map, key_size, value_size)) { + goto error_insert; + } + + if (map->vptr != NULL) { + map->vptr->free(map, key_size, value_size); + } + *map = new_map; return true; + + error_insert: + free(new_impl_data->value_data); + error_alloc_value_data: + free(new_impl_data->key_data); + error_alloc_key_data: + free(new_impl_data); + error_alloc_impl_data: + return false; } diff --git a/src/java.desktop/share/native/common/java2d/vulkan/CArrayUtil.h b/src/java.desktop/share/native/common/java2d/vulkan/CArrayUtil.h index d6fdc6707f64..28a4e0169c9c 100644 --- a/src/java.desktop/share/native/common/java2d/vulkan/CArrayUtil.h +++ b/src/java.desktop/share/native/common/java2d/vulkan/CArrayUtil.h @@ -1,8 +1,6 @@ #ifndef C_ARRAY_UTIL_H #define C_ARRAY_UTIL_H -#include -#include #include #include @@ -25,7 +23,6 @@ static inline bool CARR_handle_alloc(bool CARR_result, bool CARR_force) { C_ARRAY_UTIL_ALLOCATION_FAILED(); return false; } -static inline void consume(const void* value) {} // === Arrays === @@ -36,169 +33,127 @@ static inline void consume(const void* value) {} #define ARRAY_DEFAULT_CAPACITY 10 #endif -typedef struct { - size_t size; - size_t capacity; -} CARR_array_t; - -bool CARR_array_realloc(void** handle, size_t element_alignment, size_t element_size, size_t new_capacity); - -#define CARR_ARRAY_T(P) ((CARR_array_t*)(P) - 1) // NULL unsafe! - -static inline void* CARR_array_alloc(size_t element_alignment, size_t element_size, size_t new_capacity) { - void* data = NULL; - CARR_array_realloc(&data, element_alignment, element_size, new_capacity); - return data; +#define CARR_TYPED_ARRAY_T(T) struct { \ + size_t size; \ + size_t capacity; \ + T* data; \ } -static inline bool CARR_array_ensure_capacity(void** handle, size_t alignment, size_t size, +#define CARR_ARRAY_ELEMENT_SIZE(ARRAY) (sizeof((ARRAY).data[0])) + +typedef CARR_TYPED_ARRAY_T(void) untyped_array_t; + +static inline void CARR_untyped_array_init(untyped_array_t* array) { + array->size = 0; + array->capacity = 0; + array->data = NULL; +} + +bool CARR_untyped_array_realloc(untyped_array_t* array, size_t element_size, size_t new_capacity); + +static inline bool CARR_untyped_array_ensure_capacity(untyped_array_t* array, size_t element_size, size_t new_capacity, bool force) { - void* data = *handle; - if (new_capacity > (data == NULL ? 0 : CARR_ARRAY_T(data)->capacity)) { - return CARR_handle_alloc(CARR_array_realloc(handle, alignment, size, new_capacity), force); + if (new_capacity > array->capacity) { + return CARR_handle_alloc(CARR_untyped_array_realloc(array, element_size, new_capacity), force); } return true; } -static inline bool CARR_array_resize(void** handle, size_t alignment, size_t size, size_t new_size, bool force) { - if (CARR_array_ensure_capacity(handle, alignment, size, new_size, force)) { - void* data = *handle; - if (data != NULL) CARR_ARRAY_T(data)->size = new_size; - return true; +static inline bool CARR_untyped_array_resize(untyped_array_t* array, size_t element_size, size_t new_size, bool force) { + if (!CARR_untyped_array_ensure_capacity(array, element_size, new_size, force)) { + return false; } - return false; + + array->size = new_size; + return true; } -static inline void CARR_array_push_back(void** handle, size_t alignment, size_t size) { - void* data = *handle; - if (data == NULL || CARR_ARRAY_T(data)->size >= CARR_ARRAY_T(data)->capacity) { - size_t new_capacity = data == NULL ? ARRAY_DEFAULT_CAPACITY : ARRAY_CAPACITY_GROW(CARR_ARRAY_T(data)->size); - if (!CARR_handle_alloc(CARR_array_realloc(handle, alignment, size, new_capacity), true)) return; - data = *handle; // assert data != NULL +static inline void CARR_untyped_array_push_back(untyped_array_t* array, size_t element_size) { + // assert size <= capacity + if (array->size == array->capacity) { + const size_t new_capacity = array->size == 0 ? ARRAY_DEFAULT_CAPACITY : ARRAY_CAPACITY_GROW(array->size); + if (!CARR_handle_alloc(CARR_untyped_array_realloc(array, element_size, new_capacity), true)) { + return; + } } - CARR_ARRAY_T(data)->size++; + ++array->size; } /** - * Dynamic array declaration, e.g. ARRAY(int) my_array = NULL; + * Dynamic array declaration, e.g., ARRAY(int) my_array = {0}; * @param TYPE type of the array element. */ -#define ARRAY(TYPE) TYPE* +#define ARRAY(T) union { \ + CARR_TYPED_ARRAY_T(T); \ + untyped_array_t as_untyped; \ +} /** - * Allocate array. Returns NULL on allocation failure. - * @param T type of elements - * @param CAPACITY capacity of the array - * @return pointer to the allocated array, or NULL - */ -#define ARRAY_ALLOC(T, CAPACITY) ((T*)CARR_array_alloc(alignof(T), sizeof(T), CAPACITY)) - -/** - * @param P array - * @return size of the array - */ -#define ARRAY_SIZE(P) ((P) == NULL ? (size_t) 0 : (CARR_ARRAY_T(P))->size) - -/** - * @param P array - * @return capacity of the array - */ -#define ARRAY_CAPACITY(P) ((P) == NULL ? (size_t) 0 : (CARR_ARRAY_T(P))->capacity) - -/** - * @param P array + * @param ARRAY array * @return dereferenced pointer to the last element in the array */ -#define ARRAY_LAST(P) ((P)[ARRAY_SIZE(P) - 1]) +#define ARRAY_LAST(ARRAY) ((ARRAY).data[(ARRAY).size - 1]) /** - * Deallocate the vector - * @param P array + * Deallocate the dynamic array + * @param ARRAY array */ -#define ARRAY_FREE(P) ((void)CARR_array_realloc((void**)&(P), alignof(*(P)), sizeof(*(P)), 0)) - -/** - * Apply function to the array elements - * @param P array - * @param F function to apply - */ -#define ARRAY_APPLY(P, F) do { \ - for (size_t _i = 0; _i < ARRAY_SIZE(P); _i++) F(&((P)[_i])); \ -} while(0) - -/** - * Apply function to the array elements, passing pointer to an element as first parameter - * @param P array - * @param F function to apply - */ -#define ARRAY_APPLY_LEADING(P, F, ...) do { \ - for (size_t _i = 0; _i < ARRAY_SIZE(P); _i++) F(&((P)[_i]), __VA_ARGS__); \ -} while(0) - -/** - * Apply function to the array elements, passing pointer to an element as last parameter - * @param P array - * @param F function to apply - */ -#define ARRAY_APPLY_TRAILING(P, F, ...) do { \ - for (size_t _i = 0; _i < ARRAY_SIZE(P); _i++) F(__VA_ARGS__, &((P)[_i])); \ -} while(0) +#define ARRAY_FREE(ARRAY) ((void)CARR_untyped_array_realloc(&(ARRAY).as_untyped, CARR_ARRAY_ELEMENT_SIZE((ARRAY)), 0)) /** * Ensure array capacity. Array is implicitly initialized when necessary. * On allocation failure, array is left unchanged. - * @param P array + * @param ARRAY array * @param CAPACITY required capacity of the array * @return true if the operation succeeded */ -#define ARRAY_TRY_ENSURE_CAPACITY(P, CAPACITY) \ - CARR_array_ensure_capacity((void**)&(P), alignof(*(P)), sizeof(*(P)), (CAPACITY), false) +#define ARRAY_TRY_ENSURE_CAPACITY(ARRAY, CAPACITY) \ + CARR_untyped_array_ensure_capacity(&(ARRAY).as_untyped, CARR_ARRAY_ELEMENT_SIZE((ARRAY)), (CAPACITY), false) /** * Ensure array capacity. Array is implicitly initialized when necessary. * On allocation failure, C_ARRAY_UTIL_ALLOCATION_FAILED is called. - * @param P array + * @param ARRAY array * @param CAPACITY required capacity of the array */ -#define ARRAY_ENSURE_CAPACITY(P, CAPACITY) \ - ((void)CARR_array_ensure_capacity((void**)&(P), alignof(*(P)), sizeof(*(P)), (CAPACITY), true)) +#define ARRAY_ENSURE_CAPACITY(ARRAY, CAPACITY) \ + ((void)CARR_untyped_array_ensure_capacity(&(ARRAY).as_untyped, CARR_ARRAY_ELEMENT_SIZE((ARRAY)), (CAPACITY), true)) /** * Shrink capacity of the array to its size. * On allocation failure, array is left unchanged. - * @param P array - * @return the array + * @param ARRAY array * @return true if the operation succeeded */ -#define ARRAY_SHRINK_TO_FIT(P) CARR_array_realloc((void**)&(P), alignof(*(P)), sizeof(*(P)), ARRAY_SIZE(P)) +#define ARRAY_SHRINK_TO_FIT(ARRAY) CARR_untyped_array_realloc(&(ARRAY).as_untyped, CARR_ARRAY_ELEMENT_SIZE((ARRAY)), (ARRAY).size) /** * Resize an array. Array is implicitly initialized when necessary. * On allocation failure, array is left unchanged. - * @param P array + * @param ARRAY array * @param SIZE required size of the array * @return true if the operation succeeded */ -#define ARRAY_TRY_RESIZE(P, SIZE) \ - CARR_array_resize((void**)&(P), alignof(*(P)), sizeof(*(P)), (SIZE), false) +#define ARRAY_TRY_RESIZE(ARRAY, SIZE) \ + CARR_untyped_array_resize(&(ARRAY).as_untyped, CARR_ARRAY_ELEMENT_SIZE((ARRAY)), (SIZE), false) /** * Resize an array. Array is implicitly initialized when necessary. * On allocation failure, C_ARRAY_UTIL_ALLOCATION_FAILED is called. - * @param P array + * @param ARRAY array * @param SIZE required size of the array */ -#define ARRAY_RESIZE(P, SIZE) \ - ((void)CARR_array_resize((void**)&(P), alignof(*(P)), sizeof(*(P)), (SIZE), true)) +#define ARRAY_RESIZE(ARRAY, SIZE) \ + ((void)CARR_untyped_array_resize(&(ARRAY).as_untyped, CARR_ARRAY_ELEMENT_SIZE((ARRAY)), (SIZE), true)) /** * Add element to the end of the array. Array is implicitly initialized when necessary. * On allocation failure, C_ARRAY_UTIL_ALLOCATION_FAILED is called. - * @param P array + * @param ARRAY array * @return dereferenced pointer to the inserted element */ -#define ARRAY_PUSH_BACK(P) \ - (*(CARR_array_push_back((void**)&(P), alignof(*(P)), sizeof(*(P))), (P) + ARRAY_SIZE(P) - 1)) +#define ARRAY_PUSH_BACK(ARRAY) \ + (*(CARR_untyped_array_push_back(&(ARRAY).as_untyped, CARR_ARRAY_ELEMENT_SIZE((ARRAY))), ((ARRAY).data) + ((ARRAY).size) - 1)) /** * Compile-time length of the static array. @@ -207,188 +162,191 @@ static inline void CARR_array_push_back(void** handle, size_t alignment, size_t // === Ring buffers === -typedef struct { - size_t head; - size_t tail; - size_t capacity; -} CARR_ring_buffer_t; - -bool CARR_ring_buffer_realloc(void** handle, size_t element_alignment, size_t element_size, size_t new_capacity); - -#define CARR_RING_BUFFER_T(P) ((CARR_ring_buffer_t*)(P) - 1) // NULL / type unsafe! -#define CARR_RING_BUFFER_IS_NULL(P) (&(P)->CARR_elem == NULL) // Guard against wrong pointer types. -#define CARR_RING_BUFFER_GUARD(P, ...) (consume(&(P)->CARR_elem), __VA_ARGS__) // Guard against wrong pointer types. - -static inline size_t CARR_ring_buffer_size(void* data) { - CARR_ring_buffer_t* buffer = CARR_RING_BUFFER_T(data); - return (buffer->capacity + buffer->tail - buffer->head) % buffer->capacity; +#define CARR_TYPED_RING_BUFFER_T(T) struct { \ + size_t head_idx; \ + size_t size; \ + size_t capacity; \ + T* data; \ } -static inline bool CARR_ring_buffer_ensure_can_push(void** handle, size_t alignment, size_t size, bool force) { - void* data = *handle; - if (data == NULL || CARR_ring_buffer_size(data) + 1 >= CARR_RING_BUFFER_T(data)->capacity) { - size_t new_capacity = data == NULL ? - ARRAY_DEFAULT_CAPACITY : ARRAY_CAPACITY_GROW(CARR_RING_BUFFER_T(data)->capacity); - return CARR_handle_alloc(CARR_ring_buffer_realloc(handle, alignment, size, new_capacity), force); +typedef CARR_TYPED_RING_BUFFER_T(void) untyped_ring_buffer_t; + +bool CARR_untyped_ring_buffer_realloc(untyped_ring_buffer_t* ring_buffer, size_t element_size, size_t new_capacity); + +static inline bool CARR_untyped_ring_buffer_ensure_can_push(untyped_ring_buffer_t* ring_buffer, size_t element_size, bool force) { + // assert size <= capacity + if (ring_buffer->size == ring_buffer->capacity) { + const size_t new_capacity = ring_buffer->size == 0 ? ARRAY_DEFAULT_CAPACITY : ARRAY_CAPACITY_GROW(ring_buffer->size); + return CARR_handle_alloc(CARR_untyped_ring_buffer_realloc(ring_buffer, element_size, new_capacity), force); } return true; } -static inline size_t CARR_ring_buffer_push_front(void* data) { - if (data == NULL) return 0; - CARR_ring_buffer_t* buffer = CARR_RING_BUFFER_T(data); - return buffer->head = (buffer->head + buffer->capacity - 1) % buffer->capacity; +static inline void CARR_untyped_ring_buffer_push_front(untyped_ring_buffer_t* ring_buffer, size_t element_size) { + CARR_untyped_ring_buffer_ensure_can_push(ring_buffer, element_size, true); + ring_buffer->head_idx = (ring_buffer->head_idx + ring_buffer->capacity - 1) % ring_buffer->capacity; + ++ring_buffer->size; } -static inline size_t CARR_ring_buffer_push_back(void* data) { - if (data == NULL) return 0; - CARR_ring_buffer_t* buffer = CARR_RING_BUFFER_T(data); - size_t i = buffer->tail; - buffer->tail = (buffer->tail + 1) % buffer->capacity; - return i; +static inline void CARR_untyped_ring_buffer_push_back(untyped_ring_buffer_t* ring_buffer, size_t element_size) { + CARR_untyped_ring_buffer_ensure_can_push(ring_buffer, element_size, true); + ++ring_buffer->size; } +static inline void CARR_untyped_ring_buffer_pop_front(untyped_ring_buffer_t* ring_buffer) { + // assert size > 0 + ring_buffer->head_idx = (ring_buffer->head_idx + 1) % ring_buffer->capacity; + --ring_buffer->size; +} + +static inline void CARR_untyped_ring_buffer_pop_back(untyped_ring_buffer_t* ring_buffer) { + // assert size > 0 + --ring_buffer->size; +} + + /** * Ring buffer declaration, e.g. RING_BUFFER(int) my_ring = NULL; * @param TYPE type of the ring buffer element. */ -#define RING_BUFFER(TYPE) struct { TYPE CARR_elem; }* - -/** - * @param P ring buffer - * @return size of the ring buffer - */ -#define RING_BUFFER_SIZE(P) (CARR_RING_BUFFER_IS_NULL(P) ? (size_t) 0 : CARR_ring_buffer_size(P)) - -/** - * @param P ring buffer - * @return capacity of the ring buffer - */ -#define RING_BUFFER_CAPACITY(P) (CARR_RING_BUFFER_IS_NULL(P) ? (size_t) 0 : CARR_RING_BUFFER_T(P)->capacity) +#define RING_BUFFER(T) union { \ + CARR_TYPED_RING_BUFFER_T(T); \ + untyped_ring_buffer_t as_untyped; \ +} /** * Ensure enough capacity to push an element into ring buffer. Implicitly initializes when buffer is NULL. * On allocation failure, buffer is left unchanged. - * @param P ring buffer + * @param RING_BUFFER ring buffer * @return true if the operation succeeded */ -#define RING_BUFFER_TRY_ENSURE_CAN_PUSH(P) CARR_RING_BUFFER_GUARD((P), \ - CARR_ring_buffer_ensure_can_push((void**)&(P), alignof(*(P)), sizeof(*(P)), false)) +#define RING_BUFFER_TRY_ENSURE_CAN_PUSH(RING_BUFFER) \ + CARR_untyped_ring_buffer_ensure_can_push(&(RING_BUFFER).as_untyped, CARR_ARRAY_ELEMENT_SIZE((RING_BUFFER)), false) /** * Ensure enough capacity to push an element into ring buffer. Implicitly initializes when buffer is NULL. * On allocation failure, C_ARRAY_UTIL_ALLOCATION_FAILED is called. - * @param P ring buffer + * @param RING_BUFFER ring buffer */ -#define RING_BUFFER_ENSURE_CAN_PUSH(P) CARR_RING_BUFFER_GUARD((P), \ - (void)CARR_ring_buffer_ensure_can_push((void**)&(P), alignof(*(P)), sizeof(*(P)), true)) +#define RING_BUFFER_ENSURE_CAN_PUSH(RING_BUFFER) \ + ((void)CARR_untyped_ring_buffer_ensure_can_push(&(RING_BUFFER).as_untyped, CARR_ARRAY_ELEMENT_SIZE((RING_BUFFER)), true)) /** * Add element to the beginning of the ring buffer. Implicitly initializes when buffer is NULL. * On allocation failure, C_ARRAY_UTIL_ALLOCATION_FAILED is called. - * @param P ring buffer + * @param RING_BUFFER ring buffer * @return dereferenced pointer to the inserted element */ -#define RING_BUFFER_PUSH_FRONT(P) \ - ((RING_BUFFER_ENSURE_CAN_PUSH(P), (P) + CARR_ring_buffer_push_front(P))->CARR_elem) +#define RING_BUFFER_PUSH_FRONT(RING_BUFFER) \ + (*(CARR_untyped_ring_buffer_push_front(&(RING_BUFFER).as_untyped, CARR_ARRAY_ELEMENT_SIZE((RING_BUFFER))), (RING_BUFFER).data + (RING_BUFFER).head_idx)) /** * Add element to the end of the ring buffer. Implicitly initializes when buffer is NULL. * On allocation failure, C_ARRAY_UTIL_ALLOCATION_FAILED is called. - * @param P ring buffer + * @param RING_BUFFER ring buffer * @return dereferenced pointer to the inserted element */ -#define RING_BUFFER_PUSH_BACK(P) \ - ((RING_BUFFER_ENSURE_CAN_PUSH(P), (P) + CARR_ring_buffer_push_back(P))->CARR_elem) +#define RING_BUFFER_PUSH_BACK(RING_BUFFER) \ + (*(CARR_untyped_ring_buffer_push_back(&(RING_BUFFER).as_untyped, CARR_ARRAY_ELEMENT_SIZE((RING_BUFFER))), (RING_BUFFER).data + ((RING_BUFFER).head_idx + (RING_BUFFER).size - 1) % (RING_BUFFER).capacity)) /** * Get pointer to the first element of the ring buffer. - * @param P ring buffer + * @param RING_BUFFER ring buffer * @return pointer to the first element of the ring buffer, or NULL */ -#define RING_BUFFER_FRONT(P) (CARR_RING_BUFFER_IS_NULL(P) || \ - CARR_RING_BUFFER_T(P)->head == CARR_RING_BUFFER_T(P)->tail ? NULL : &(P)[CARR_RING_BUFFER_T(P)->head].CARR_elem) +#define RING_BUFFER_FRONT(RING_BUFFER) \ + ((RING_BUFFER).size == 0 ? NULL : (RING_BUFFER).data + (RING_BUFFER).head_idx) /** * Get pointer to the last element of the ring buffer. - * @param P ring buffer + * @param RING_BUFFER ring buffer * @return pointer to the last element of the ring buffer, or NULL */ -#define RING_BUFFER_BACK(P) (CARR_RING_BUFFER_IS_NULL(P) || \ - CARR_RING_BUFFER_T(P)->head == CARR_RING_BUFFER_T(P)->tail ? NULL : \ - &(P)[(CARR_RING_BUFFER_T(P)->tail+CARR_RING_BUFFER_T(P)->capacity-1) % CARR_RING_BUFFER_T(P)->capacity].CARR_elem) +#define RING_BUFFER_BACK(RING_BUFFER) \ + ((RING_BUFFER).size == 0 ? NULL : (RING_BUFFER).data + ((RING_BUFFER).head_idx + (RING_BUFFER).size) % ((RING_BUFFER).capacity)) + /** * Move beginning of the ring buffer forward (remove first element). - * @param P ring buffer + * @param RING_BUFFER ring buffer */ -#define RING_BUFFER_POP_FRONT(P) CARR_RING_BUFFER_GUARD((P), (void)(CARR_RING_BUFFER_T(P)->head = \ - (CARR_RING_BUFFER_T(P)->head + 1) % CARR_RING_BUFFER_T(P)->capacity)) +#define RING_BUFFER_POP_FRONT(RING_BUFFER) \ + CARR_untyped_ring_buffer_pop_front(&(RING_BUFFER).as_untyped) /** * Move end of the ring buffer backward (remove last element). - * @param P ring buffer + * @param RING_BUFFER ring buffer */ -#define RING_BUFFER_POP_BACK(P) CARR_RING_BUFFER_GUARD((P), (void)(CARR_RING_BUFFER_T(P)->tail = \ - (CARR_RING_BUFFER_T(P)->tail + CARR_RING_BUFFER_T(P)->capacity - 1) % CARR_RING_BUFFER_T(P)->capacity)) +#define RING_BUFFER_POP_BACK(RING_BUFFER) \ + CARR_untyped_ring_buffer_pop_back(&(RING_BUFFER).as_untyped) /** * Deallocate the ring buffer - * @param P ring buffer + * @param RING_BUFFER ring buffer */ -#define RING_BUFFER_FREE(P) CARR_RING_BUFFER_GUARD((P), \ - (void)CARR_ring_buffer_realloc((void**)&(P), alignof(*(P)), sizeof(*(P)), 0)) +#define RING_BUFFER_FREE(RING_BUFFER) \ + ((void)CARR_untyped_ring_buffer_realloc(&(RING_BUFFER).as_untyped, CARR_ARRAY_ELEMENT_SIZE((RING_BUFFER)), 0)) // === Maps === +typedef struct CARR_map_dispatch_struct CARR_map_dispatch_t; + +#define CARR_TYPED_MAP_T(K, V) struct { \ + size_t size; \ + size_t capacity; \ + const CARR_map_dispatch_t* vptr; \ + void* impl_data; \ + const K* scratch_key_ptr; \ + V* scratch_value_ptr; \ +} + +typedef CARR_TYPED_MAP_T(void, void) untyped_map_t; + typedef bool (*CARR_equals_fp)(const void* a, const void* b); typedef size_t (*CARR_hash_fp)(const void* data); -#define CARR_MAP_LAYOUT_ARGS size_t key_alignment, size_t key_size, size_t value_alignment, size_t value_size -#define CARR_MAP_LAYOUT_PASS key_alignment, key_size, value_alignment, value_size -#define CARR_MAP_LAYOUT(P) \ - alignof((P)->CARR_keys[0].CARR_key[0]), sizeof((P)->CARR_keys[0].CARR_key[0]), \ - alignof((P)->CARR_values[0].CARR_value[0]), sizeof((P)->CARR_values[0].CARR_value[0]) +typedef const void* (*CARR_map_dispatch_next_key_fp)(const untyped_map_t* map, size_t key_size, size_t value_size, const void* key_slot); +typedef void* (*CARR_map_dispatch_find_fp)(untyped_map_t* map, size_t key_size, size_t value_size, + const void* key, const void** resolved_key, bool insert); +typedef bool (*CARR_map_dispatch_remove_fp)(untyped_map_t* map, size_t key_size, size_t value_size, const void* key); +typedef bool (*CARR_map_dispatch_ensure_extra_capacity_fp)(untyped_map_t* map, size_t key_size, size_t value_size, size_t count); +typedef void (*CARR_map_dispatch_clear_fp)(untyped_map_t* map, size_t key_size, size_t value_size); +typedef void (*CARR_map_dispatch_free_fp)(untyped_map_t* map, size_t key_size, size_t value_size); -typedef const void* (*CARR_map_dispatch_next_key_fp)(CARR_MAP_LAYOUT_ARGS, const void* data, const void* key_slot); -typedef void* (*CARR_map_dispatch_find_fp)(CARR_MAP_LAYOUT_ARGS, - void* data, const void* key, const void** resolved_key, bool insert); -typedef bool (*CARR_map_dispatch_remove_fp)(CARR_MAP_LAYOUT_ARGS, void* data, const void* key); -typedef bool (*CARR_map_dispatch_ensure_extra_capacity_fp)(CARR_MAP_LAYOUT_ARGS, void** handle, size_t count); -typedef void (*CARR_map_dispatch_clear_fp)(CARR_MAP_LAYOUT_ARGS, void* data); -typedef void (*CARR_map_dispatch_free_fp)(CARR_MAP_LAYOUT_ARGS, void* data); - -typedef struct { +struct CARR_map_dispatch_struct { CARR_map_dispatch_next_key_fp next_key; CARR_map_dispatch_find_fp find; CARR_map_dispatch_remove_fp remove; CARR_map_dispatch_ensure_extra_capacity_fp ensure_extra_capacity; CARR_map_dispatch_clear_fp clear; CARR_map_dispatch_free_fp free; -} CARR_map_dispatch_t; +}; -#define CARR_MAP_KEY_PTR(P, ...) \ - (&((true ? NULL : (P))->CARR_keys[((uintptr_t)(__VA_ARGS__) / sizeof((P)->CARR_keys[0]) - 1)].CARR_key[0])) -#define CARR_MAP_VALUE_PTR(P, ...) \ - (&((true ? NULL : (P))->CARR_values[((uintptr_t)(__VA_ARGS__) / sizeof((P)->CARR_values[0]) - 1)].CARR_value[0])) -#define CARR_MAP_KEY_GUARD(P, ...) \ - (true ? (__VA_ARGS__) : &(P)->CARR_keys[0].CARR_key[0]) // Guard against wrong key types. -#define CARR_MAP_DISPATCH(P, NAME, ...) \ - (((const CARR_map_dispatch_t**)(P))[-1]->NAME(CARR_MAP_LAYOUT(P), __VA_ARGS__)) +#define CARR_MAP_LAYOUT(MAP) sizeof(*(MAP).scratch_key_ptr), sizeof(*(MAP).scratch_value_ptr) -bool CARR_hash_map_linear_probing_rehash(CARR_MAP_LAYOUT_ARGS, void** handle, CARR_equals_fp equals, CARR_hash_fp hash, +#define CARR_MAP_DISPATCH_NO_ARGS(MAP, NAME) \ + ((MAP).vptr->NAME(&(MAP).as_untyped, CARR_MAP_LAYOUT((MAP)))) + +#define CARR_MAP_DISPATCH(MAP, NAME, ...) \ + ((MAP).vptr->NAME(&(MAP).as_untyped, CARR_MAP_LAYOUT((MAP)), __VA_ARGS__)) + +#define CARR_MAP_KEY_GUARD(MAP, ...) \ + (true ? (__VA_ARGS__) : (MAP).scratch_key_ptr) // Guard against wrong key types. + + +bool CARR_hash_map_linear_probing_rehash(untyped_map_t* map, size_t key_size, size_t value_size, CARR_equals_fp equals, CARR_hash_fp hash, size_t new_capacity, uint32_t probing_limit, float load_factor); /** - * Map declaration, e.g. MAP(int, int) my_map = NULL; + * Map declaration, e.g. MAP(int, int) my_map = {0}; * Map must be explicitly initialized before usage, e.g. via HASH_MAP_REHASH. - * @param KEY_TYPE type of the map key. - * @param VALUE_TYPE type of the map value. + * @param K type of the map key. + * @param V type of the map value. */ -#define MAP(KEY_TYPE, VALUE_TYPE) union { \ - struct { char CARR_dummy; const KEY_TYPE CARR_key[]; } CARR_keys[1]; \ - struct { char CARR_dummy; VALUE_TYPE CARR_value[]; } CARR_values[1]; \ -}* +#define MAP(K, V) union { \ + CARR_TYPED_MAP_T(K, V); \ + untyped_map_t as_untyped; \ +} /** * Rehash a hash map with given strategy. It will be initialized if NULL. @@ -401,111 +359,117 @@ bool CARR_hash_map_linear_probing_rehash(CARR_MAP_LAYOUT_ARGS, void** handle, CA * uint32_t probing_limit, // Search length, triggering rehash. Must not be too low, around 10 should be fine? * float load_factor // Min load factor needed to allow rehash triggered by probing_limit. 0.75 is fine. * ) - * @param P map + * @param MAP map * @param STRATEGY strategy to use * @param ... parameters for the rehash strategy */ -#define HASH_MAP_REHASH(P, STRATEGY, ...) \ - ((void)CARR_handle_alloc(CARR_hash_map_##STRATEGY##_rehash(CARR_MAP_LAYOUT(P), (void**)&(P), __VA_ARGS__), true)) +#define HASH_MAP_REHASH(MAP, STRATEGY, ...) \ + ((void)CARR_handle_alloc(CARR_hash_map_##STRATEGY##_rehash(&(MAP).as_untyped, CARR_MAP_LAYOUT((MAP)), __VA_ARGS__), true)) /** * Rehash a hash map with given strategy. It will be initialized if NULL. * On allocation failure, map is left unchanged. * For list of available strategies see HASH_MAP_REHASH. - * @param P map + * @param MAP map * @param STRATEGY strategy to use * @return true if the operation succeeded */ -#define HASH_MAP_TRY_REHASH(P, STRATEGY, ...) \ - (CARR_hash_map_##STRATEGY##_rehash(CARR_MAP_LAYOUT(P), (void**)&(P), __VA_ARGS__)) +#define HASH_MAP_TRY_REHASH(MAP, STRATEGY, ...) \ + (CARR_hash_map_##STRATEGY##_rehash(&(MAP).as_untyped, CARR_MAP_LAYOUT((MAP)), __VA_ARGS__)) /** * Find the next resolved key present in the map, or NULL. * Enumeration order is implementation-defined. - * @param P map + * @param MAP map * @param KEY_PTR pointer to the current resolved key, or NULL * @return pointer to the next resolved key */ -#define MAP_NEXT_KEY(P, KEY_PTR) \ - CARR_MAP_KEY_PTR((P), CARR_MAP_DISPATCH((P), next_key, (P), CARR_MAP_KEY_GUARD((P), (KEY_PTR)))) +#define MAP_NEXT_KEY(MAP, KEY_PTR) \ + (((MAP).scratch_key_ptr = CARR_MAP_DISPATCH((MAP), next_key, CARR_MAP_KEY_GUARD((MAP), (KEY_PTR))))) /** * Find a value for the provided key. - * @param P map + * @param MAP map * @param ... key to find, can be a compound literal, like (int){0} * @return pointer to the found value, or NULL */ -#define MAP_FIND(P, ...) \ - CARR_MAP_VALUE_PTR((P), CARR_MAP_DISPATCH((P), find, (P), CARR_MAP_KEY_GUARD((P), &(__VA_ARGS__)), NULL, false)) +#define MAP_FIND(MAP, ...) \ + (((MAP).scratch_value_ptr = CARR_MAP_DISPATCH((MAP), find, CARR_MAP_KEY_GUARD((MAP), &(__VA_ARGS__)), NULL, false))) /** * Find a value for the provided key, or insert a new one. * Value is zeroed for newly inserted items. * On allocation failure, C_ARRAY_UTIL_ALLOCATION_FAILED is called. - * @param P map + * @param MAP map * @param ... key to find, can be a compound literal, like (int){0} * @return dereferenced pointer to the found value */ -#define MAP_AT(P, ...) (*(MAP_ENSURE_EXTRA_CAPACITY((P), 1), \ - CARR_MAP_VALUE_PTR((P), CARR_MAP_DISPATCH((P), find, (P), CARR_MAP_KEY_GUARD((P), &(__VA_ARGS__)), NULL, true)))) +#define MAP_AT(MAP, ...) (*( \ + MAP_TRY_ENSURE_EXTRA_CAPACITY((MAP), 1), \ + (((MAP).scratch_value_ptr = CARR_MAP_DISPATCH((MAP), find, CARR_MAP_KEY_GUARD((MAP), &(__VA_ARGS__)), NULL, true))) \ +)) /** * Resolve provided key and find corresponding value. * Using resolved key addresses speeds up subsequent map operations. - * @param P map + * @param MAP map * @param KEY_PTR pointer to the key to find, replaced with resolved key address, or NULL * @return pointer to the found value, or NULL */ -#define MAP_RESOLVE(P, KEY_PTR) CARR_MAP_VALUE_PTR((P), \ - CARR_MAP_DISPATCH((P), find, (P), CARR_MAP_KEY_GUARD((P), (KEY_PTR)), (const void**) &(KEY_PTR), false)) +#define MAP_RESOLVE(MAP, KEY_PTR) \ + (((MAP).scratch_value_ptr = CARR_MAP_DISPATCH((MAP), find, CARR_MAP_KEY_GUARD((MAP), (KEY_PTR)), (const void**)&(KEY_PTR), false))) /** * Resolve provided key and find corresponding value, or insert a new one. * Using resolved key addresses speeds up subsequent map operations. * Returned value pointer may be NULL, indicating that the entry was just inserted, use MAP_FIND or MAP_AT to access it. * On allocation failure, map is left unchanged. - * @param P map + * @param MAP map * @param KEY_PTR pointer to the key to find, replaced with resolved key address * @return pointer to the found value, or NULL */ -#define MAP_RESOLVE_OR_INSERT(P, KEY_PTR) (MAP_TRY_ENSURE_EXTRA_CAPACITY((P), 1), CARR_MAP_VALUE_PTR((P), \ - CARR_MAP_DISPATCH((P), find, (P), CARR_MAP_KEY_GUARD((P), (KEY_PTR)), (const void**) &(KEY_PTR), true))) +#define MAP_RESOLVE_OR_INSERT(MAP, KEY_PTR) ( \ + MAP_TRY_ENSURE_EXTRA_CAPACITY((MAP), 1), \ + (((MAP).scratch_value_ptr = CARR_MAP_DISPATCH((MAP), find, CARR_MAP_KEY_GUARD((MAP), (KEY_PTR)), (const void**)&(KEY_PTR), true))) \ +) +// This kind of cast to const void** is UB (I think), but a proper use needs a fresh variable, which we can't really do in a macro. +// It's possible to add a map.scratch_resolved_key_ptr for this, but the current thing will work everywhere anyway... /** * Remove the provided key, if one exists. - * @param P map + * @param MAP map * @param ... key to remove, can be a compound literal, like (int){0} * @return true if the key was removed */ -#define MAP_REMOVE(P, ...) CARR_MAP_DISPATCH((P), remove, (P), CARR_MAP_KEY_GUARD((P), &(__VA_ARGS__))) +#define MAP_REMOVE(MAP, ...) CARR_MAP_DISPATCH((MAP), remove, CARR_MAP_KEY_GUARD((MAP), &(__VA_ARGS__))) /** * Ensure that map has enough capacity to insert COUNT more items without reallocation. * On allocation failure, C_ARRAY_UTIL_ALLOCATION_FAILED is called. - * @param P map + * @param MAP map * @param COUNT number of new items */ -#define MAP_ENSURE_EXTRA_CAPACITY(P, COUNT) ((void)CARR_handle_alloc(MAP_TRY_ENSURE_EXTRA_CAPACITY((P), (COUNT)), true)) +#define MAP_ENSURE_EXTRA_CAPACITY(MAP, COUNT) ((void)CARR_handle_alloc(MAP_TRY_ENSURE_EXTRA_CAPACITY((MAP), (COUNT)), true)) /** * Ensure that map has enough capacity to insert COUNT more items without reallocation. * On allocation failure, map is left unchanged. - * @param P map + * @param MAP map * @param COUNT number of new items * @return true if the operation succeeded */ -#define MAP_TRY_ENSURE_EXTRA_CAPACITY(P, COUNT) CARR_MAP_DISPATCH((P), ensure_extra_capacity, (void**)&(P), (COUNT)) +#define MAP_TRY_ENSURE_EXTRA_CAPACITY(MAP, COUNT) CARR_MAP_DISPATCH((MAP), ensure_extra_capacity, (COUNT)) /** * Clear the map. - * @param P map + * @param MAP map */ -#define MAP_CLEAR(P) CARR_MAP_DISPATCH((P), clear, (P)) +#define MAP_CLEAR(MAP) CARR_MAP_DISPATCH_NO_ARGS((MAP), clear) /** * Free the map. - * @param P map + * @param MAP map */ -#define MAP_FREE(P) ((P) == NULL ? 0 : CARR_MAP_DISPATCH((P), free, (P)), (void)((P) = NULL)) +#define MAP_FREE(MAP) ((void)((MAP).vptr == NULL ? 0 : CARR_MAP_DISPATCH_NO_ARGS((MAP), free))) #endif // C_ARRAY_UTIL_H diff --git a/src/java.desktop/share/native/common/java2d/vulkan/VKAllocator.c b/src/java.desktop/share/native/common/java2d/vulkan/VKAllocator.c index 668f4271db99..42c1b9f12910 100644 --- a/src/java.desktop/share/native/common/java2d/vulkan/VKAllocator.c +++ b/src/java.desktop/share/native/common/java2d/vulkan/VKAllocator.c @@ -227,10 +227,10 @@ static uint32_t VKAllocator_AllocatePage(VKAllocator* alloc, uint32_t memoryType Page* page; if (alloc->freePageIndex != NO_PAGE_INDEX) { index = alloc->freePageIndex; - page = &alloc->pages[index]; + page = &alloc->pages.data[index]; alloc->freePageIndex = page->nextFreePage; } else { - index = ARRAY_SIZE(alloc->pages); + index = alloc->pages.size; VK_RUNTIME_ASSERT(index < MAX_PAGES); ARRAY_PUSH_BACK(alloc->pages) = (Page) {}; page = &ARRAY_LAST(alloc->pages); @@ -272,7 +272,7 @@ static uint32_t VKAllocator_PopFreeBlockPair(SharedPageData* data, uint32_t leve uint32_t pairIndex = data->freeLevelIndices[level]; if (pairIndex != 0) { // Pop existing free block pair. - BlockPair pair = data->blockPairs[pairIndex-1]; + BlockPair pair = data->blockPairs.data[pairIndex-1]; assert(pair.firstFree ^ pair.secondFree); // Only one must be free. data->freeLevelIndices[level] = pair.nextFree; return pairIndex; @@ -282,15 +282,15 @@ static uint32_t VKAllocator_PopFreeBlockPair(SharedPageData* data, uint32_t leve BlockPair* pair; if (data->freeBlockPairIndex != 0) { pairIndex = data->freeBlockPairIndex; - pair = &data->blockPairs[pairIndex-1]; + pair = &data->blockPairs.data[pairIndex-1]; data->freeBlockPairIndex = pair->nextFree; } else { ARRAY_PUSH_BACK(data->blockPairs) = (BlockPair) {}; - pairIndex = ARRAY_SIZE(data->blockPairs); - pair = &data->blockPairs[pairIndex-1]; + pairIndex = data->blockPairs.size; + pair = &data->blockPairs.data[pairIndex-1]; } // Subdivide parent block. - BlockPair* parent = &data->blockPairs[parentIndex-1]; + BlockPair* parent = &data->blockPairs.data[parentIndex-1]; assert(parent->firstFree || parent->secondFree); *pair = (BlockPair) { .offset = parent->offset, @@ -320,7 +320,7 @@ static VkBool32 VKAllocator_PushFreeBlockPair(SharedPageData* data, BlockPair* p // Merge. uint32_t parentIndex = pair->parent; assert(parentIndex != 0); - BlockPair* parent = &data->blockPairs[parentIndex-1]; + BlockPair* parent = &data->blockPairs.data[parentIndex-1]; if (pair->offset == parent->offset) { assert(!parent->firstFree); parent->firstFree = 1; @@ -333,14 +333,14 @@ static VkBool32 VKAllocator_PushFreeBlockPair(SharedPageData* data, BlockPair* p data->freeLevelIndices[level] = pair->nextFree; } else { assert(data->freeLevelIndices[level] != 0); - BlockPair* b = &data->blockPairs[data->freeLevelIndices[level]-1]; + BlockPair* b = &data->blockPairs.data[data->freeLevelIndices[level]-1]; for (;;) { if (b->nextFree == pairIndex) { b->nextFree = pair->nextFree; break; } assert(b->nextFree != 0); - b = &data->blockPairs[b->nextFree-1]; + b = &data->blockPairs.data[b->nextFree-1]; } } // Return block pair struct to pool. @@ -394,7 +394,7 @@ static AllocationResult VKAllocator_AllocateForResource(VKMemoryRequirements* re SharedPageData* data; uint32_t pairIndex; while (pageIndex != NO_PAGE_INDEX) { - page = &alloc->pages[pageIndex]; + page = &alloc->pages.data[pageIndex]; data = page->sharedPageData; pairIndex = VKAllocator_PopFreeBlockPair(data, level); if (pairIndex != 0) break; @@ -407,7 +407,7 @@ static AllocationResult VKAllocator_AllocateForResource(VKMemoryRequirements* re else if (pageLevel > MAX_BLOCK_LEVEL) pool->allocationLevelTracker = (pageLevel = MAX_BLOCK_LEVEL) * 2 + 1; pageIndex = VKAllocator_AllocatePage(alloc, memoryType, BLOCK_SIZE << pageLevel, VK_NULL_HANDLE, VK_NULL_HANDLE); if (pageIndex == NO_PAGE_INDEX) return (AllocationResult) {{0}, VK_NULL_HANDLE}; - page = &alloc->pages[pageIndex]; + page = &alloc->pages.data[pageIndex]; data = page->sharedPageData = (SharedPageData*) calloc(1, sizeof(SharedPageData)); VK_RUNTIME_ASSERT(page->sharedPageData); data->memoryType = memoryType; @@ -425,7 +425,7 @@ static AllocationResult VKAllocator_AllocateForResource(VKMemoryRequirements* re assert(pairIndex != 0); } // Take the block. - BlockPair* pair = &data->blockPairs[pairIndex-1]; + BlockPair* pair = &data->blockPairs.data[pairIndex-1]; result.handle.page = pageIndex; result.handle.pair = pairIndex; // No need to check alignment, all blocks are aligned on their size. @@ -442,7 +442,7 @@ static AllocationResult VKAllocator_AllocateForResource(VKMemoryRequirements* re // Dedicated allocation. uint32_t pageIndex = VKAllocator_AllocatePage(alloc, memoryType, size, image, buffer); if (pageIndex == NO_PAGE_INDEX) return (AllocationResult) {{0}, VK_NULL_HANDLE}; - Page* page = &alloc->pages[pageIndex]; + Page* page = &alloc->pages.data[pageIndex]; page->dedicatedSize = size; return (AllocationResult) { .handle = { @@ -486,11 +486,11 @@ void VKAllocator_Free(VKAllocator* allocator, VKMemory memory) { assert(allocator != NULL); if (memory == VK_NULL_HANDLE) return; MemoryHandle handle = { .value = (uint64_t) memory }; - Page* page = &allocator->pages[handle.page]; + Page* page = &allocator->pages.data[handle.page]; if (handle.pair != 0) { // Return block into shared page. SharedPageData* data = page->sharedPageData; - BlockPair* pair = &data->blockPairs[handle.pair-1]; + BlockPair* pair = &data->blockPairs.data[handle.pair-1]; if ((pair->offset << 1U) == handle.offset) pair->firstFree = 1; else pair->secondFree = 1; VkBool32 cleared = VKAllocator_PushFreeBlockPair(data, pair, handle.pair, handle.level); @@ -502,14 +502,14 @@ void VKAllocator_Free(VKAllocator* allocator, VKMemory memory) { Pool* pool = &allocator->pools[data->memoryType]; if (pool->sharedPagesIndex != handle.page) { assert(pool->sharedPagesIndex != NO_PAGE_INDEX); - Page* p = &allocator->pages[pool->sharedPagesIndex]; + Page* p = &allocator->pages.data[pool->sharedPagesIndex]; for (;;) { if (p->sharedPageData->nextPageIndex == handle.page) { p->sharedPageData->nextPageIndex = data->nextPageIndex; break; } assert(p->sharedPageData->nextPageIndex != 0); - p = &allocator->pages[p->sharedPageData->nextPageIndex]; + p = &allocator->pages.data[p->sharedPageData->nextPageIndex]; } VKAllocator_FreePage(allocator, page, handle.page); free(data); @@ -527,7 +527,7 @@ VkMappedMemoryRange VKAllocator_GetMemoryRange(VKAllocator* allocator, VKMemory MemoryHandle handle = { .value = (uint64_t) memory }; return (VkMappedMemoryRange) { .sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, - .memory = allocator->pages[handle.page].memory, + .memory = allocator->pages.data[handle.page].memory, .offset = handle.offset * BLOCK_SIZE, .size = handle.level == 31 ? VK_WHOLE_SIZE : BLOCK_SIZE << handle.level }; @@ -535,7 +535,7 @@ VkMappedMemoryRange VKAllocator_GetMemoryRange(VKAllocator* allocator, VKMemory void* VKAllocator_Map(VKAllocator* allocator, VKMemory memory) { assert(allocator != NULL && memory != VK_NULL_HANDLE); MemoryHandle handle = { .value = (uint64_t) memory }; - Page* page = &allocator->pages[handle.page]; + Page* page = &allocator->pages.data[handle.page]; void *p; if (handle.pair != 0) { if (page->sharedPageData->mappedData == NULL) { @@ -553,7 +553,7 @@ void* VKAllocator_Map(VKAllocator* allocator, VKMemory memory) { void VKAllocator_Unmap(VKAllocator* allocator, VKMemory memory) { assert(allocator != NULL && memory != VK_NULL_HANDLE); MemoryHandle handle = { .value = (uint64_t) memory }; - Page* page = &allocator->pages[handle.page]; + Page* page = &allocator->pages.data[handle.page]; if (handle.pair == 0) allocator->device->vkUnmapMemory(allocator->device->handle, page->memory); } void VKAllocator_Flush(VKAllocator* allocator, VKMemory memory, VkDeviceSize offset, VkDeviceSize size) { @@ -593,13 +593,13 @@ void VKAllocator_Destroy(VKAllocator* allocator) { for (uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; i++) { uint32_t pageIndex; while ((pageIndex = allocator->pools[i].sharedPagesIndex) != NO_PAGE_INDEX) { - Page* page = &allocator->pages[pageIndex]; + Page* page = &allocator->pages.data[pageIndex]; SharedPageData* data = page->sharedPageData; #ifdef DEBUG // Check that all shared allocations were freed. for (uint32_t j = MAX_BLOCK_LEVEL;; j--) { if (data->freeLevelIndices[j] != 0) { - BlockPair* pair = &data->blockPairs[data->freeLevelIndices[j]-1]; + BlockPair* pair = &data->blockPairs.data[data->freeLevelIndices[j]-1]; if (pair->parent == 0) break; else VK_FATAL_ERROR("VKAllocator_Destroy: leaked memory in shared page"); } diff --git a/src/java.desktop/share/native/common/java2d/vulkan/VKCapabilityUtil.h b/src/java.desktop/share/native/common/java2d/vulkan/VKCapabilityUtil.h index 3d084879cb83..2eb364a0a178 100644 --- a/src/java.desktop/share/native/common/java2d/vulkan/VKCapabilityUtil.h +++ b/src/java.desktop/share/native/common/java2d/vulkan/VKCapabilityUtil.h @@ -70,17 +70,17 @@ static void VKNamedEntry_Match(VKNamedEntry* list, pchar all, uint32_t count, si } } -static ARRAY(pchar) VKNamedEntry_CollectNames(const VKNamedEntry* list) { - ARRAY(pchar) result = NULL; +static pchar_array_t VKNamedEntry_CollectNames(const VKNamedEntry* list) { + pchar_array_t result = {0}; for (; list != NULL; list = list->next) { if (list->found) ARRAY_PUSH_BACK(result) = list->name; } return result; } -static void VKCapabilityUtil_LogErrors(int level, ARRAY(pchar) errors) { - for (uint32_t i = 0; i < ARRAY_SIZE(errors); i++) { - J2dRlsTraceLn(level, " %s", errors[i]); +static void VKCapabilityUtil_LogErrors(int level, pchar_array_t errors) { + for (uint32_t i = 0; i < errors.size; i++) { + J2dRlsTraceLn(level, " %s", errors.data[i]); } } diff --git a/src/java.desktop/share/native/common/java2d/vulkan/VKComposites.c b/src/java.desktop/share/native/common/java2d/vulkan/VKComposites.c index 5520ceca79e1..4b800c9e4744 100644 --- a/src/java.desktop/share/native/common/java2d/vulkan/VKComposites.c +++ b/src/java.desktop/share/native/common/java2d/vulkan/VKComposites.c @@ -48,7 +48,7 @@ static bool equals(const void* ap, const void* bp) { VKComposites VKComposites_Create() { const VKCompositeMode NEXT_FREE_MODE = ALPHA_COMPOSITE_GROUP + 1; - VKComposites composites = { NULL }; + VKComposites composites = {0}; HASH_MAP_REHASH(composites.map, linear_probing, &equals, &hash, NEXT_FREE_MODE + 1, 10, 0.75); VKComposites_AddState(&composites, LOGIC_COMPOSITE_XOR, (VKCompositeState) { diff --git a/src/java.desktop/share/native/common/java2d/vulkan/VKDevice.c b/src/java.desktop/share/native/common/java2d/vulkan/VKDevice.c index 15236a1db9cd..18b05016f73e 100644 --- a/src/java.desktop/share/native/common/java2d/vulkan/VKDevice.c +++ b/src/java.desktop/share/native/common/java2d/vulkan/VKDevice.c @@ -53,7 +53,7 @@ static const char* physicalDeviceTypeString(VkPhysicalDeviceType type) { } static VkBool32 VKDevice_CheckAndAddFormat(VKEnv* vk, VkPhysicalDevice physicalDevice, - ARRAY(jint)* supportedFormats, VkFormat format, const char* name) { + jint_array_t* supportedFormats, VkFormat format, const char* name) { VkFormatProperties formatProperties; vk->vkGetPhysicalDeviceFormatProperties(physicalDevice, format, &formatProperties); static const VkFormatFeatureFlags SAMPLED_FLAGS = VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT | @@ -100,7 +100,7 @@ void VKDevice_CheckAndAdd(VKEnv* vk, VkPhysicalDevice physicalDevice) { VK_IF_ERROR(vk->vkEnumerateDeviceExtensionProperties(physicalDevice, NULL, &extensionCount, allExtensions)) return; // Check API version. - ARRAY(pchar) errors = NULL; + pchar_array_t errors = {0}; jint caps = 0; J2dRlsTraceLn(J2D_TRACE_INFO, "%s (%d.%d.%d, %s)", (const char *) deviceProperties2.properties.deviceName, @@ -183,10 +183,10 @@ void VKDevice_CheckAndAdd(VKEnv* vk, VkPhysicalDevice physicalDevice) { VKSampledSrcType* SRCTYPE_3BYTE = &sampledSrcTypes.table[sun_java2d_vulkan_VKSwToSurfaceBlit_SRCTYPE_3BYTE]; VKSampledSrcType* SRCTYPE_565 = &sampledSrcTypes.table[sun_java2d_vulkan_VKSwToSurfaceBlit_SRCTYPE_565]; VKSampledSrcType* SRCTYPE_555 = &sampledSrcTypes.table[sun_java2d_vulkan_VKSwToSurfaceBlit_SRCTYPE_555]; - ARRAY(jint) supportedFormats = NULL; + jint_array_t supportedFormats = {0}; #define CHECK_AND_ADD_FORMAT(FORMAT) VKDevice_CheckAndAddFormat(vk, physicalDevice, &supportedFormats, FORMAT, #FORMAT) if (CHECK_AND_ADD_FORMAT(VK_FORMAT_B8G8R8A8_UNORM) && SRCTYPE_4BYTE->format == VK_FORMAT_UNDEFINED) { - supportedFormats[0] |= CAP_PRESENTABLE_BIT; // TODO Check presentation support. + supportedFormats.data[0] |= CAP_PRESENTABLE_BIT; // TODO Check presentation support. *SRCTYPE_4BYTE = (VKSampledSrcType) { VK_FORMAT_B8G8R8A8_UNORM, { VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_A }}; } @@ -238,7 +238,7 @@ void VKDevice_CheckAndAdd(VKEnv* vk, VkPhysicalDevice physicalDevice) { } // Check found errors. - if (errors != NULL) { + if (errors.size != 0) { J2dRlsTraceLn(J2D_TRACE_WARNING, " Device is not supported:"); VKCapabilityUtil_LogErrors(J2D_TRACE_WARNING, errors); ARRAY_FREE(errors); @@ -264,7 +264,7 @@ void VKDevice_CheckAndAdd(VKEnv* vk, VkPhysicalDevice physicalDevice) { .enabledLayers = VKNamedEntry_CollectNames(layers), .enabledExtensions = VKNamedEntry_CollectNames(extensions), .sampledSrcTypes = sampledSrcTypes, - .supportedFormats = supportedFormats, + .supportedFormats = { .as_untyped = supportedFormats.as_untyped }, .caps = caps }; } @@ -333,10 +333,10 @@ Java_sun_java2d_vulkan_VKGPU_init(JNIEnv *env, jclass jClass, jlong jDevice) { .flags = 0, .queueCreateInfoCount = 1, .pQueueCreateInfos = &queueCreateInfo, - .enabledLayerCount = ARRAY_SIZE(device->enabledLayers), - .ppEnabledLayerNames = (const char *const *) device->enabledLayers, - .enabledExtensionCount = ARRAY_SIZE(device->enabledExtensions), - .ppEnabledExtensionNames = (const char *const *) device->enabledExtensions, + .enabledLayerCount = device->enabledLayers.size, + .ppEnabledLayerNames = (const char *const *) device->enabledLayers.data, + .enabledExtensionCount = device->enabledExtensions.size, + .ppEnabledExtensionNames = (const char *const *) device->enabledExtensions.data, .pEnabledFeatures = &features10 }; diff --git a/src/java.desktop/share/native/common/java2d/vulkan/VKDevice.h b/src/java.desktop/share/native/common/java2d/vulkan/VKDevice.h index 66d32ec97e3b..283b0d34418a 100644 --- a/src/java.desktop/share/native/common/java2d/vulkan/VKDevice.h +++ b/src/java.desktop/share/native/common/java2d/vulkan/VKDevice.h @@ -50,11 +50,11 @@ struct VKDevice { VkPhysicalDeviceType type; VkDeviceSize nonCoherentAtomSize; uint32_t queueFamily; - ARRAY(pchar) enabledLayers; - ARRAY(pchar) enabledExtensions; + pchar_array_t enabledLayers; + pchar_array_t enabledExtensions; VkQueue queue; VKSampledSrcTypes sampledSrcTypes; - ARRAY(jint) supportedFormats; + jint_array_t supportedFormats; jint caps; VKAllocator* allocator; diff --git a/src/java.desktop/share/native/common/java2d/vulkan/VKEnv.c b/src/java.desktop/share/native/common/java2d/vulkan/VKEnv.c index 8b0825262192..bde327db7bf9 100644 --- a/src/java.desktop/share/native/common/java2d/vulkan/VKEnv.c +++ b/src/java.desktop/share/native/common/java2d/vulkan/VKEnv.c @@ -68,9 +68,9 @@ void VKDevice_Reset(VKDevice* device); static void VKEnv_Destroy(VKEnv* vk) { if (vk == NULL) return; - if (vk->devices != NULL) { - for (uint32_t i = 0; i < ARRAY_SIZE(vk->devices); i++) { - VKDevice_Reset(&vk->devices[i]); + if (vk->devices.size != 0) { + for (uint32_t i = 0; i < vk->devices.size; i++) { + VKDevice_Reset(&vk->devices.data[i]); } ARRAY_FREE(vk->devices); } @@ -161,7 +161,7 @@ static VKEnv* VKEnv_Create(PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr, VKPl VKNamedEntry_LogAll("instance extensions", allExtensions[0].extensionName, extensionCount, sizeof(VkExtensionProperties)); // Check API version. - ARRAY(pchar) errors = NULL; + pchar_array_t errors = {0}; if (apiVersion < REQUIRED_VULKAN_VERSION) ARRAY_PUSH_BACK(errors) = "Unsupported API version"; // Check layers. @@ -184,7 +184,7 @@ static VKEnv* VKEnv_Create(PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr, VKPl VKNamedEntry_LogFound(extensions); // Check found errors. - if (errors != NULL) { + if (errors.size != 0) { J2dRlsTraceLn(J2D_TRACE_ERROR, " Vulkan is not supported:"); VKCapabilityUtil_LogErrors(J2D_TRACE_ERROR, errors); ARRAY_FREE(errors); @@ -228,8 +228,8 @@ static VKEnv* VKEnv_Create(PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr, VKPl .presentationSupported = presentationSupported }; - ARRAY(pchar) enabledLayers = VKNamedEntry_CollectNames(layers); - ARRAY(pchar) enabledExtensions = VKNamedEntry_CollectNames(extensions); + pchar_array_t enabledLayers = VKNamedEntry_CollectNames(layers); + pchar_array_t enabledExtensions = VKNamedEntry_CollectNames(extensions); VkApplicationInfo applicationInfo = { .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO, @@ -246,10 +246,10 @@ static VKEnv* VKEnv_Create(PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr, VKPl .pNext = pNext, .flags = 0, .pApplicationInfo = &applicationInfo, - .enabledLayerCount = ARRAY_SIZE(enabledLayers), - .ppEnabledLayerNames = enabledLayers, - .enabledExtensionCount = ARRAY_SIZE(enabledExtensions), - .ppEnabledExtensionNames = enabledExtensions + .enabledLayerCount = enabledLayers.size, + .ppEnabledLayerNames = enabledLayers.data, + .enabledExtensionCount = enabledExtensions.size, + .ppEnabledExtensionNames = enabledExtensions.data }; VK_IF_ERROR(vkCreateInstance(&instanceCreateInfo, NULL, &vk->instance)) { @@ -320,7 +320,7 @@ static VkBool32 VKEnv_FindDevices(VKEnv* vk) { for (uint32_t i = 0; i < count; i++) { VKDevice_CheckAndAdd(vk, physicalDevices[i]); } - if (ARRAY_SIZE(vk->devices) == 0) { + if (vk->devices.size == 0) { J2dRlsTraceLn(J2D_TRACE_ERROR, "Vulkan: No compatible device found"); return JNI_FALSE; } @@ -332,17 +332,17 @@ static jobjectArray createJavaGPUs(JNIEnv *env, VKEnv* vk) { if (deviceClass == NULL) return NULL; jmethodID deviceConstructor = (*env)->GetMethodID(env, deviceClass, "", "(JLjava/lang/String;II[I)V"); if (deviceConstructor == NULL) return NULL; - jobjectArray deviceArray = (*env)->NewObjectArray(env, ARRAY_SIZE(vk->devices), deviceClass, NULL); + jobjectArray deviceArray = (*env)->NewObjectArray(env, vk->devices.size, deviceClass, NULL); if (deviceArray == NULL) return NULL; - for (uint32_t i = 0; i < ARRAY_SIZE(vk->devices); i++) { - jstring name = JNU_NewStringPlatform(env, vk->devices[i].name); + for (uint32_t i = 0; i < vk->devices.size; i++) { + jstring name = JNU_NewStringPlatform(env, vk->devices.data[i].name); if (name == NULL) return NULL; - jintArray supportedFormats = (*env)->NewIntArray(env, ARRAY_SIZE(vk->devices[i].supportedFormats)); + jintArray supportedFormats = (*env)->NewIntArray(env, vk->devices.data[i].supportedFormats.size); if (supportedFormats == NULL) return NULL; - (*env)->SetIntArrayRegion(env, supportedFormats, 0, ARRAY_SIZE(vk->devices[i].supportedFormats), vk->devices[i].supportedFormats); + (*env)->SetIntArrayRegion(env, supportedFormats, 0, vk->devices.data[i].supportedFormats.size, vk->devices.data[i].supportedFormats.data); jobject device = (*env)->NewObject(env, deviceClass, deviceConstructor, - ptr_to_jlong(&vk->devices[i]), name, vk->devices[i].type, - vk->devices[i].caps, supportedFormats); + ptr_to_jlong(&vk->devices.data[i]), name, vk->devices.data[i].type, + vk->devices.data[i].caps, supportedFormats); if (device == NULL) return NULL; (*env)->SetObjectArrayElement(env, deviceArray, i, device); } diff --git a/src/java.desktop/share/native/common/java2d/vulkan/VKImage.c b/src/java.desktop/share/native/common/java2d/vulkan/VKImage.c index e467919a4c5a..9e21f870fc29 100644 --- a/src/java.desktop/share/native/common/java2d/vulkan/VKImage.c +++ b/src/java.desktop/share/native/common/java2d/vulkan/VKImage.c @@ -142,7 +142,7 @@ void VKImage_LoadBuffer(VKDevice* device, VKImage* image, VKBuffer* buffer, void VKImage_Destroy(VKDevice* device, VKImage* image) { assert(device != NULL && device->allocator != NULL); if (image == NULL) return; - if (image->viewMap != NULL) { + if (image->viewMap.size != 0) { for (const VKImageViewKey* k = NULL; (k = MAP_NEXT_KEY(image->viewMap, k)) != NULL;) { const VKImageViewInfo* viewInfo = MAP_FIND(image->viewMap, *k); if (viewInfo->descriptorSet != VK_NULL_HANDLE) { @@ -150,8 +150,8 @@ void VKImage_Destroy(VKDevice* device, VKImage* image) { } device->vkDestroyImageView(device->handle, viewInfo->view, NULL); } - MAP_FREE(image->viewMap); } + MAP_FREE(image->viewMap); device->vkDestroyImage(device->handle, image->handle, NULL); VKAllocator_Free(device->allocator, image->memory); free(image); diff --git a/src/java.desktop/share/native/common/java2d/vulkan/VKPipelines.c b/src/java.desktop/share/native/common/java2d/vulkan/VKPipelines.c index 005141c03e52..4a00cea7ecb3 100644 --- a/src/java.desktop/share/native/common/java2d/vulkan/VKPipelines.c +++ b/src/java.desktop/share/native/common/java2d/vulkan/VKPipelines.c @@ -525,8 +525,8 @@ void VKPipelines_DestroyContext(VKPipelineContext* pipelineContext) { VKDevice* device = pipelineContext->device; assert(device != NULL); - for (uint32_t i = 0; i < ARRAY_SIZE(pipelineContext->renderPassContexts); i++) { - VKPipelines_DestroyRenderPassContext(pipelineContext->renderPassContexts[i]); + for (uint32_t i = 0; i < pipelineContext->renderPassContexts.size; i++) { + VKPipelines_DestroyRenderPassContext(pipelineContext->renderPassContexts.data[i]); } ARRAY_FREE(pipelineContext->renderPassContexts); @@ -546,9 +546,9 @@ void VKPipelines_DestroyContext(VKPipelineContext* pipelineContext) { VKRenderPassContext* VKPipelines_GetRenderPassContext(VKPipelineContext* pipelineContext, VkFormat format) { assert(pipelineContext != NULL && pipelineContext->device != NULL); - for (uint32_t i = 0; i < ARRAY_SIZE(pipelineContext->renderPassContexts); i++) { - if (pipelineContext->renderPassContexts[i]->format == format) { - return pipelineContext->renderPassContexts[i]; + for (uint32_t i = 0; i < pipelineContext->renderPassContexts.size; i++) { + if (pipelineContext->renderPassContexts.data[i]->format == format) { + return pipelineContext->renderPassContexts.data[i]; } } // Not found, create. diff --git a/src/java.desktop/share/native/common/java2d/vulkan/VKRenderQueue.c b/src/java.desktop/share/native/common/java2d/vulkan/VKRenderQueue.c index d54ca3808584..6420b712cd5c 100644 --- a/src/java.desktop/share/native/common/java2d/vulkan/VKRenderQueue.c +++ b/src/java.desktop/share/native/common/java2d/vulkan/VKRenderQueue.c @@ -450,19 +450,19 @@ JNIEXPORT void JNICALL Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer jint count = NEXT_INT(b); J2dRlsTraceLn(J2D_TRACE_VERBOSE, "VKRenderQueue_flushBuffer: SET_SHAPE_CLIP_SPANS"); - size_t offset = ARRAY_SIZE(VKRenderer_GetContext()->clipSpanVertices); + size_t offset = VKRenderer_GetContext()->clipSpanVertices.size; ARRAY_RESIZE(VKRenderer_GetContext()->clipSpanVertices, offset + count * 6); for (jint i = 0; i < count; i++) { jint x1 = NEXT_INT(b); jint y1 = NEXT_INT(b); jint x2 = NEXT_INT(b); jint y2 = NEXT_INT(b); - VKRenderer_GetContext()->clipSpanVertices[offset + i * 6 + 0] = (VKIntVertex) {x1, y1}; - VKRenderer_GetContext()->clipSpanVertices[offset + i * 6 + 1] = (VKIntVertex) {x2, y1}; - VKRenderer_GetContext()->clipSpanVertices[offset + i * 6 + 2] = (VKIntVertex) {x2, y2}; - VKRenderer_GetContext()->clipSpanVertices[offset + i * 6 + 3] = (VKIntVertex) {x2, y2}; - VKRenderer_GetContext()->clipSpanVertices[offset + i * 6 + 4] = (VKIntVertex) {x1, y2}; - VKRenderer_GetContext()->clipSpanVertices[offset + i * 6 + 5] = (VKIntVertex) {x1, y1}; + VKRenderer_GetContext()->clipSpanVertices.data[offset + i * 6 + 0] = (VKIntVertex) {x1, y1}; + VKRenderer_GetContext()->clipSpanVertices.data[offset + i * 6 + 1] = (VKIntVertex) {x2, y1}; + VKRenderer_GetContext()->clipSpanVertices.data[offset + i * 6 + 2] = (VKIntVertex) {x2, y2}; + VKRenderer_GetContext()->clipSpanVertices.data[offset + i * 6 + 3] = (VKIntVertex) {x2, y2}; + VKRenderer_GetContext()->clipSpanVertices.data[offset + i * 6 + 4] = (VKIntVertex) {x1, y2}; + VKRenderer_GetContext()->clipSpanVertices.data[offset + i * 6 + 5] = (VKIntVertex) {x1, y1}; } VKRenderer_GetContext()->clipModCount++; } @@ -804,7 +804,7 @@ JNIEXPORT void JNICALL Java_sun_java2d_vulkan_VKRenderQueue_flushBuffer // Flush all pending GPU work VKEnv* vk = VKEnv_GetInstance(); - for (uint32_t i = 0; i < ARRAY_SIZE(vk->devices); i++) { - VKRenderer_Flush(vk->devices[i].renderer); + for (uint32_t i = 0; i < vk->devices.size; i++) { + VKRenderer_Flush(vk->devices.data[i].renderer); } } diff --git a/src/java.desktop/share/native/common/java2d/vulkan/VKRenderer.c b/src/java.desktop/share/native/common/java2d/vulkan/VKRenderer.c index 70ff8fac77af..54c025cc66ca 100644 --- a/src/java.desktop/share/native/common/java2d/vulkan/VKRenderer.c +++ b/src/java.desktop/share/native/common/java2d/vulkan/VKRenderer.c @@ -76,7 +76,7 @@ RING_BUFFER(struct PoolEntry_ ## NAME { \ * } */ #define POOL_DRAIN_FOR(RENDERER, NAME, ENTRY) for (struct PoolEntry_ ## NAME *(ENTRY); VKRenderer_CheckPoolDrain( \ - (RENDERER)->NAME, (ENTRY) = RING_BUFFER_FRONT((RENDERER)->NAME)); RING_BUFFER_POP_FRONT((RENDERER)->NAME)) + &(RENDERER)->NAME, (ENTRY) = RING_BUFFER_FRONT((RENDERER)->NAME)); RING_BUFFER_POP_FRONT((RENDERER)->NAME)) /** * Free pool memory. It doesn't destroy remaining items. @@ -190,7 +190,7 @@ static VKRenderingContext context = { }, .clipModCount = 1, .clipRect = NO_CLIP, - .clipSpanVertices = NULL + .clipSpanVertices = { .as_untyped = {0} } }; /** @@ -221,8 +221,9 @@ static VkBool32 VKRenderer_CheckPoolEntryAvailable(VKRenderer* renderer, void* e static VkBool32 VKRenderer_CheckPoolDrain(void* pool, void* entry) { if (entry != NULL) return VK_TRUE; if (pool != NULL) { - RING_BUFFER(char) ring_buffer = pool; - RING_BUFFER_FREE(ring_buffer); + // A small hack that should be fine, since we won't be using the element size in the deallocation + RING_BUFFER(char) ring_buffer_as_char = { .as_untyped = *(untyped_ring_buffer_t*)pool }; + RING_BUFFER_FREE(ring_buffer_as_char); } return VK_FALSE; } @@ -296,10 +297,10 @@ static VkDescriptorSet VKRenderer_AllocateImageDescriptorSet(VKRenderer* rendere } void VKRenderer_CreateImageDescriptorSet(VKRenderer* renderer, VkDescriptorPool* descriptorPool, VkDescriptorSet* set) { VKDevice* device = renderer->device; - for (int i = ARRAY_SIZE(renderer->imageDescriptorPools) - 1; i >= 0; i--) { - *set = VKRenderer_AllocateImageDescriptorSet(renderer, renderer->imageDescriptorPools[i]); + for (int i = renderer->imageDescriptorPools.size - 1; i >= 0; i--) { + *set = VKRenderer_AllocateImageDescriptorSet(renderer, renderer->imageDescriptorPools.data[i]); if (*set != VK_NULL_HANDLE) { - *descriptorPool = renderer->imageDescriptorPools[i]; + *descriptorPool = renderer->imageDescriptorPools.data[i]; return; } } @@ -446,19 +447,19 @@ void VKRenderer_Destroy(VKRenderer* renderer) { device->vkDestroyBufferView(device->handle, entry->value.view, NULL); device->vkDestroyBuffer(device->handle, entry->value.buffer.handle, NULL); } - for (uint32_t i = 0; i < ARRAY_SIZE(renderer->bufferMemoryPages); i++) { - VKAllocator_Free(device->allocator, renderer->bufferMemoryPages[i]); + for (uint32_t i = 0; i < renderer->bufferMemoryPages.size; i++) { + VKAllocator_Free(device->allocator, renderer->bufferMemoryPages.data[i]); } ARRAY_FREE(renderer->bufferMemoryPages); - for (uint32_t i = 0; i < ARRAY_SIZE(renderer->descriptorPools); i++) { - device->vkDestroyDescriptorPool(device->handle, renderer->descriptorPools[i], NULL); + for (uint32_t i = 0; i < renderer->descriptorPools.size; i++) { + device->vkDestroyDescriptorPool(device->handle, renderer->descriptorPools.data[i], NULL); } ARRAY_FREE(renderer->descriptorPools); VKTexturePool_Dispose(renderer->texturePool); - for (uint32_t i = 0; i < ARRAY_SIZE(renderer->imageDescriptorPools); i++) { - device->vkDestroyDescriptorPool(device->handle, renderer->imageDescriptorPools[i], NULL); + for (uint32_t i = 0; i < renderer->imageDescriptorPools.size; i++) { + device->vkDestroyDescriptorPool(device->handle, renderer->imageDescriptorPools.data[i], NULL); } ARRAY_FREE(renderer->imageDescriptorPools); @@ -512,7 +513,7 @@ void VKRenderer_Flush(VKRenderer* renderer) { if (renderer == NULL) return; VKRenderer_CleanupPendingResources(renderer); VKDevice* device = renderer->device; - size_t pendingPresentations = ARRAY_SIZE(renderer->pendingPresentation.swapchains); + size_t pendingPresentations = renderer->pendingPresentation.swapchains.size; // Submit pending command buffer and semaphores. // Even if there are no commands to be sent, we can submit pending semaphores for presentation synchronization. @@ -539,9 +540,9 @@ void VKRenderer_Flush(VKRenderer* renderer) { VkSubmitInfo submitInfo = { .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, .pNext = &timelineSemaphoreSubmitInfo, - .waitSemaphoreCount = ARRAY_SIZE(renderer->wait.semaphores), - .pWaitSemaphores = renderer->wait.semaphores, - .pWaitDstStageMask = renderer->wait.stages, + .waitSemaphoreCount = renderer->wait.semaphores.size, + .pWaitSemaphores = renderer->wait.semaphores.data, + .pWaitDstStageMask = renderer->wait.stages.data, .commandBufferCount = renderer->commandBuffer != VK_NULL_HANDLE ? 1 : 0, .pCommandBuffers = &renderer->commandBuffer, .signalSemaphoreCount = pendingPresentations > 0 ? 2 : 1, @@ -560,9 +561,9 @@ void VKRenderer_Flush(VKRenderer* renderer) { .waitSemaphoreCount = 1, .pWaitSemaphores = &semaphores[1], .swapchainCount = pendingPresentations, - .pSwapchains = renderer->pendingPresentation.swapchains, - .pImageIndices = renderer->pendingPresentation.indices, - .pResults = renderer->pendingPresentation.results + .pSwapchains = renderer->pendingPresentation.swapchains.data, + .pImageIndices = renderer->pendingPresentation.indices.data, + .pResults = renderer->pendingPresentation.results.data }; VkResult presentResult = device->vkQueuePresentKHR(device->queue, &presentInfo); if (presentResult != VK_SUCCESS) { @@ -612,34 +613,34 @@ static void VKRenderer_ResetDrawing(VKSDOps* surface) { renderPass->vertexCount = 0; renderPass->vertexBufferWriting = (BufferWritingState) { NULL, 0, VK_FALSE }; renderPass->maskFillBufferWriting = (BufferWritingState) { NULL, 0, VK_FALSE }; - if (ARRAY_SIZE(renderPass->flushRanges) > 0) { + if (renderPass->flushRanges.size > 0) { VK_IF_ERROR(surface->device->vkFlushMappedMemoryRanges(surface->device->handle, - ARRAY_SIZE(renderPass->flushRanges), renderPass->flushRanges)) {} + renderPass->flushRanges.size, renderPass->flushRanges.data)) {} ARRAY_RESIZE(renderPass->flushRanges, 0); } - size_t vertexBufferCount = ARRAY_SIZE(renderPass->vertexBuffers); - size_t maskFillBufferCount = ARRAY_SIZE(renderPass->maskFillBuffers); - size_t cleanupQueueCount = ARRAY_SIZE(renderPass->cleanupQueue); + size_t vertexBufferCount = renderPass->vertexBuffers.size; + size_t maskFillBufferCount = renderPass->maskFillBuffers.size; + size_t cleanupQueueCount = renderPass->cleanupQueue.size; for (uint32_t i = 0; i < vertexBufferCount; i++) { - POOL_RETURN(renderer, vertexBufferPool, renderPass->vertexBuffers[i]); + POOL_RETURN(renderer, vertexBufferPool, renderPass->vertexBuffers.data[i]); } for (uint32_t i = 0; i < maskFillBufferCount; i++) { - POOL_RETURN(renderer, maskFillBufferPool, renderPass->maskFillBuffers[i]); + POOL_RETURN(renderer, maskFillBufferPool, renderPass->maskFillBuffers.data[i]); } for (uint32_t i = 0; i < cleanupQueueCount; i++) { - POOL_RETURN(renderer, cleanupQueue, renderPass->cleanupQueue[i]); + POOL_RETURN(renderer, cleanupQueue, renderPass->cleanupQueue.data[i]); } ARRAY_RESIZE(renderPass->vertexBuffers, 0); ARRAY_RESIZE(renderPass->maskFillBuffers, 0); ARRAY_RESIZE(renderPass->cleanupQueue, 0); // Update dependencies on used surfaces. - for (uint32_t i = 0, surfaces = (uint32_t) ARRAY_SIZE(renderPass->usedSurfaces); i < surfaces; i++) { - VKSDOps* usedSurface = renderPass->usedSurfaces[i]; - uint32_t newSize = 0, oldSize = (uint32_t) ARRAY_SIZE(usedSurface->dependentSurfaces); + for (uint32_t i = 0, surfaces = (uint32_t) renderPass->usedSurfaces.size; i < surfaces; i++) { + VKSDOps* usedSurface = renderPass->usedSurfaces.data[i]; + uint32_t newSize = 0, oldSize = (uint32_t) usedSurface->dependentSurfaces.size; for (uint32_t j = 0; j < oldSize; j++) { - VKSDOps* s = usedSurface->dependentSurfaces[j]; - if (s != surface) usedSurface->dependentSurfaces[newSize++] = s; + VKSDOps* s = usedSurface->dependentSurfaces.data[j]; + if (s != surface) usedSurface->dependentSurfaces.data[newSize++] = s; } if (newSize != oldSize) ARRAY_RESIZE(usedSurface->dependentSurfaces, newSize); } @@ -654,15 +655,15 @@ static void VKRenderer_ResetDrawing(VKSDOps* surface) { static void VKRenderer_FlushDependentRenderPasses(VKSDOps* surface) { // We're going to clear dependentSurfaces in the end anyway, // so temporarily reset it to NULL to save on removing flushed render passes one-by-one. - ARRAY(VKSDOps*) deps = surface->dependentSurfaces; - surface->dependentSurfaces = NULL; - uint32_t size = (uint32_t) ARRAY_SIZE(deps); + ARRAY(VKSDOps*) deps = { .as_untyped = surface->dependentSurfaces.as_untyped }; + surface->dependentSurfaces.as_untyped = (untyped_array_t){0}; + uint32_t size = (uint32_t) deps.size; if (size > 0) J2dRlsTraceLn(J2D_TRACE_VERBOSE, "VKRenderer_FlushDependentRenderPasses(%p): %d", surface, size); for (uint32_t i = 0; i < size; i++) { - VKRenderer_FlushRenderPass(deps[i]); + VKRenderer_FlushRenderPass(deps.data[i]); } ARRAY_RESIZE(deps, 0); - surface->dependentSurfaces = deps; + surface->dependentSurfaces.as_untyped = deps.as_untyped; } /** @@ -900,8 +901,8 @@ VkBool32 VKRenderer_FlushRenderPass(VKSDOps* surface) { // Update timestamps on used surfaces. surface->lastTimestamp = renderer->writeTimestamp; - for (uint32_t i = 0, surfaces = (uint32_t) ARRAY_SIZE(surface->renderPass->usedSurfaces); i < surfaces; i++) { - surface->renderPass->usedSurfaces[i]->lastTimestamp = renderer->writeTimestamp; + for (uint32_t i = 0, surfaces = (uint32_t) surface->renderPass->usedSurfaces.size; i < surfaces; i++) { + surface->renderPass->usedSurfaces.data[i]->lastTimestamp = renderer->writeTimestamp; } // Insert barriers to prepare surface for rendering. @@ -994,7 +995,7 @@ void VKRenderer_FlushSurface(VKSDOps* surface) { .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, - .image = win->swapchainImages[imageIndex], + .image = win->swapchainImages.data[imageIndex], .subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 } }}; VKBarrierBatch barrierBatch = {1, surface->image->lastStage | VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT}; @@ -1014,7 +1015,7 @@ void VKRenderer_FlushSurface(VKSDOps* surface) { }; device->vkCmdBlitImage(cb, surface->image->handle, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, - win->swapchainImages[imageIndex], VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + win->swapchainImages.data[imageIndex], VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit, VK_FILTER_NEAREST); // Insert barrier to prepare swapchain image for presentation. @@ -1027,7 +1028,7 @@ void VKRenderer_FlushSurface(VKSDOps* surface) { .newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, - .image = win->swapchainImages[imageIndex], + .image = win->swapchainImages.data[imageIndex], .subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 } }; device->vkCmdPipelineBarrier(cb, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, NULL, 0, NULL, 1, &barrier); @@ -1111,7 +1112,7 @@ static uint32_t VKRenderer_AllocateVertices(uint32_t primitives, uint32_t vertic ARRAY_PUSH_BACK(surface->renderPass->flushRanges) = buffer.range; surface->renderPass->vertexBufferWriting.data = writing.state.data = buffer.data; } - assert(ARRAY_SIZE(surface->renderPass->vertexBuffers) > 0); + assert(surface->renderPass->vertexBuffers.size > 0); surface->renderPass->firstVertex = surface->renderPass->vertexCount = 0; surface->device->vkCmdBindVertexBuffers(surface->renderPass->commandBuffer, 0, 1, &(ARRAY_LAST(surface->renderPass->vertexBuffers).handle), &writing.state.offset); @@ -1150,7 +1151,7 @@ static BufferWritingState VKRenderer_AllocateMaskFillBytes(uint32_t size) { ARRAY_PUSH_BACK(surface->renderPass->flushRanges) = buffer.buffer.range; surface->renderPass->maskFillBufferWriting.data = state.data = buffer.buffer.data; } - assert(ARRAY_SIZE(surface->renderPass->maskFillBuffers) > 0); + assert(surface->renderPass->maskFillBuffers.size > 0); surface->device->vkCmdBindDescriptorSets(surface->renderPass->commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, surface->device->renderer->pipelineContext->maskFillPipelineLayout, 0, 1, &ARRAY_LAST(surface->renderPass->maskFillBuffers).descriptorSet, 0, NULL); @@ -1221,7 +1222,7 @@ static void VKRenderer_SetupStencil() { // Clear stencil attachment. VkClearAttachment clearAttachment = { .aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT, - .clearValue.depthStencil.stencil = ARRAY_SIZE(context->clipSpanVertices) > 0 ? + .clearValue.depthStencil.stencil = context->clipSpanVertices.size > 0 ? CLIP_STENCIL_EXCLUDE_VALUE : CLIP_STENCIL_INCLUDE_VALUE }; VkClearRect clearRect = { @@ -1246,11 +1247,11 @@ static void VKRenderer_SetupStencil() { renderPass->vertexBufferWriting.bound = VK_FALSE; // Rasterize clip spans. - uint32_t primitiveCount = ARRAY_SIZE(context->clipSpanVertices) / 3; + uint32_t primitiveCount = context->clipSpanVertices.size / 3; VKIntVertex* vs; for (uint32_t primitivesDrawn = 0; primitivesDrawn < primitiveCount;) { uint32_t currentDraw = VK_DRAW(vs, primitiveCount - primitivesDrawn, 3); - memcpy(vs, context->clipSpanVertices + primitivesDrawn * 3, currentDraw * 3 * sizeof(VKIntVertex)); + memcpy(vs, context->clipSpanVertices.data + primitivesDrawn * 3, currentDraw * 3 * sizeof(VKIntVertex)); primitivesDrawn += currentDraw; } VKRenderer_FlushDraw(surface); @@ -1310,7 +1311,7 @@ VkBool32 VKRenderer_Validate(VKShader shader, VKShaderVariant shaderVariant, VkP VKCompositeMode oldComposite = renderPass->state.composite; VkBool32 clipChanged = renderPass->clipModCount != context.clipModCount; // Init stencil attachment, if needed. - if (clipChanged && ARRAY_SIZE(context.clipSpanVertices) > 0 && surface->stencil == NULL) { + if (clipChanged && context.clipSpanVertices.size > 0 && surface->stencil == NULL) { if (surface->renderPass->pendingCommands) VKRenderer_FlushRenderPass(surface); if (!VKSD_ConfigureImageSurfaceStencil(surface)) return VK_FALSE; } @@ -1327,7 +1328,7 @@ VkBool32 VKRenderer_Validate(VKShader shader, VKShaderVariant shaderVariant, VkP surface->device->vkCmdSetScissor(renderPass->commandBuffer, 0, 1, &context.clipRect); if (clipChanged) { VKStencilMode stencilMode = STENCIL_MODE_NONE; - if (ARRAY_SIZE(context.clipSpanVertices) > 0) { + if (context.clipSpanVertices.size > 0) { VKRenderer_SetupStencil(); stencilMode = STENCIL_MODE_ON; } else if (surface->stencil != NULL) { @@ -1537,10 +1538,10 @@ void VKRenderer_AddSurfaceDependency(VKSDOps* src, VKSDOps* dst) { assert(dst->renderPass != NULL); // We don't care much about duplicates in our dependency arrays, // so just make a lazy deduplication attempt by checking the last element. - if (ARRAY_SIZE(src->dependentSurfaces) == 0 || ARRAY_LAST(src->dependentSurfaces) != dst) { + if (src->dependentSurfaces.size == 0 || ARRAY_LAST(src->dependentSurfaces) != dst) { ARRAY_PUSH_BACK(src->dependentSurfaces) = dst; } - if (ARRAY_SIZE(dst->renderPass->usedSurfaces) == 0 || ARRAY_LAST(dst->renderPass->usedSurfaces) != src) { + if (dst->renderPass->usedSurfaces.size == 0 || ARRAY_LAST(dst->renderPass->usedSurfaces) != src) { ARRAY_PUSH_BACK(dst->renderPass->usedSurfaces) = src; } } diff --git a/src/java.desktop/share/native/common/java2d/vulkan/VKSurfaceData.c b/src/java.desktop/share/native/common/java2d/vulkan/VKSurfaceData.c index 3a6ba1b7a573..dca269dcd0d9 100644 --- a/src/java.desktop/share/native/common/java2d/vulkan/VKSurfaceData.c +++ b/src/java.desktop/share/native/common/java2d/vulkan/VKSurfaceData.c @@ -56,7 +56,6 @@ void VKSD_ResetSurface(VKSDOps* vksdo) { if (vksdo->drawableType == VKSD_WINDOW) { VKWinSDOps* vkwinsdo = (VKWinSDOps*) vksdo; ARRAY_FREE(vkwinsdo->swapchainImages); - vkwinsdo->swapchainImages = NULL; if (vkwinsdo->vksdOps.device != NULL && vkwinsdo->swapchain != VK_NULL_HANDLE) { vkwinsdo->vksdOps.device->vkDestroySwapchainKHR(vkwinsdo->vksdOps.device->handle, vkwinsdo->swapchain, NULL); } @@ -285,7 +284,7 @@ VkBool32 VKSD_ConfigureWindowSurface(VKWinSDOps* vkwinsdo) { } ARRAY_RESIZE(vkwinsdo->swapchainImages, swapchainImageCount); VK_IF_ERROR(device->vkGetSwapchainImagesKHR(device->handle, vkwinsdo->swapchain, - &swapchainImageCount, vkwinsdo->swapchainImages)) { + &swapchainImageCount, vkwinsdo->swapchainImages.data)) { return VK_FALSE; } return VK_TRUE; diff --git a/src/java.desktop/share/native/common/java2d/vulkan/VKUtil.h b/src/java.desktop/share/native/common/java2d/vulkan/VKUtil.h index a222f3f2da47..1c109c05e5a9 100644 --- a/src/java.desktop/share/native/common/java2d/vulkan/VKUtil.h +++ b/src/java.desktop/share/native/common/java2d/vulkan/VKUtil.h @@ -24,6 +24,7 @@ #ifndef VKUtil_h_Included #define VKUtil_h_Included #include +#include #include #include "awt.h" #include "jni_util.h" @@ -68,6 +69,9 @@ static inline VkBool32 VKUtil_CheckError(VkResult result, const char* errorMessa #define C_ARRAY_UTIL_ALLOCATION_FAILED() VK_FATAL_ERROR("CArrayUtil allocation failed") #include "CArrayUtil.h" +typedef ARRAY(pchar) pchar_array_t; +typedef ARRAY(jint) jint_array_t; + #define VK_ID_TRANSFORM ((VKTransform)\ {1.0f, 0.0f, 0.0f, \ 0.0f, 1.0f, 0.0f}) diff --git a/test/jdk/jb/java/awt/vulkan/CArrayUtil/CArrayUtilTest.java b/test/jdk/jb/java/awt/vulkan/CArrayUtil/CArrayUtilTest.java new file mode 100644 index 000000000000..ef072e0aace8 --- /dev/null +++ b/test/jdk/jb/java/awt/vulkan/CArrayUtil/CArrayUtilTest.java @@ -0,0 +1,48 @@ +/* + * Copyright 2024-2025 JetBrains s.r.o. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import jdk.test.lib.Platform; +import jdk.test.lib.process.OutputAnalyzer; +import jdk.test.lib.process.ProcessTools; + +import java.nio.file.Path; +import java.nio.file.Paths; + +/* + * @test + * @summary Checks that the CArrayUtil library works properly + * @library /test/lib + * @build jdk.test.lib.process.ProcessTools + * @run main/native CArrayUtilTest + */ + +public class CArrayUtilTest { + public static void main(String[] args) throws Exception { + Path executable = Paths.get(System.getProperty("test.nativepath"), + "CArrayUtilTest" + (Platform.isWindows() ? ".exe" : "")); + ProcessBuilder pb = new ProcessBuilder(String.valueOf(executable)); + + OutputAnalyzer output = ProcessTools.executeProcess(pb); + output.shouldHaveExitValue(0); + } +} \ No newline at end of file diff --git a/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test.c b/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test.c new file mode 100644 index 000000000000..572504942302 --- /dev/null +++ b/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test.c @@ -0,0 +1,42 @@ +/* + * Copyright 2025 JetBrains s.r.o. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "test.h" +int argc; +char **argv; +int test_nesting_level; + +void test_array(); +void test_ring_buffer(); +void test_map(); +void test_alloc_fail(); + +int main(int my_argc, char *my_argv[]) { + argc = my_argc; + argv = my_argv; + test_nesting_level = 0; + RUN_TEST(array); + RUN_TEST(ring_buffer); + RUN_TEST(map); + return 0; +} diff --git a/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test.h b/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test.h new file mode 100644 index 000000000000..ba9ad9e3c07a --- /dev/null +++ b/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test.h @@ -0,0 +1,69 @@ +/* + * Copyright 2025 JetBrains s.r.o. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef TEST_H +#define TEST_H + +#include +#include +#include +#include "CArrayUtil.h" + +typedef char* pchar; + +extern int argc; +extern char **argv; +extern int test_nesting_level; + +static inline void fail() { + exit(1); +} + +#define PRINT_INDENT(INDENT, ...) printf("%*s", (int)((INDENT) + strlen(__VA_ARGS__)), (__VA_ARGS__)) +#define RUN_TEST(NAME) do { \ + if (argc < 2 || strncmp(argv[1], #NAME, CARR_MIN(strlen(#NAME), strlen(argv[1]))) == 0) { \ + PRINT_INDENT(test_nesting_level*2, "Start: " #NAME "\n"); \ + test_nesting_level++; \ + test_ ## NAME(); \ + test_nesting_level--; \ + PRINT_INDENT(test_nesting_level*2, "End: " #NAME "\n"); \ + } \ +} while(0) + +#define CONCATENATE_IMPL(A, B) A ## B +#define CONCATENATE(A, B) CONCATENATE_IMPL(A, B) + +// Peek into the map impl to check if a rehash has taken place; good enough for a test... +typedef struct CARR_hash_map_probing_impl_data_struct { + void* key_data; + void* value_data; + + uint32_t probing_limit; + float load_factor; + void* zero_key_slot; // points to the all-zero key if one exists (to distinguish from a missing key) + + CARR_equals_fp equals; + CARR_hash_fp hash; +} CARR_hash_map_probing_impl_data_t; + +#endif //TEST_H diff --git a/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test_array.c b/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test_array.c new file mode 100644 index 000000000000..abbe751bfeb1 --- /dev/null +++ b/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test_array.c @@ -0,0 +1,182 @@ +/* + * Copyright 2025 JetBrains s.r.o. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "test.h" + +#define TYPE uint8_t +#include "test_array.h" +#define TYPE uint16_t +#include "test_array.h" +#define TYPE uint32_t +#include "test_array.h" +#define TYPE uint64_t +#include "test_array.h" + +static void test_array_pchar() { + ARRAY(pchar) a = {0}; + ARRAY_ENSURE_CAPACITY(a, 10); + + if (a.capacity != 10) fail(); + + ARRAY_PUSH_BACK(a) = "0"; + ARRAY_PUSH_BACK(a) = "1"; + ARRAY_PUSH_BACK(a) = "2"; + ARRAY_PUSH_BACK(a) = "3"; + + if (a.size != 4) fail(); + + for (size_t i = 0; i < a.size; i++) { + char str[21]; + sprintf(str, "%zu", i); + if (strcmp(a.data[i], str) != 0) fail(); + } + + ARRAY_FREE(a); +} + +static void test_array_null_safe() { + ARRAY(pchar) a = {0}; + + if (a.size != 0) fail(); + if (a.capacity != 0) fail(); + ARRAY_FREE(a); // check that free is NULL-safe + + ARRAY_ENSURE_CAPACITY(a, 1); + ARRAY_PUSH_BACK(a) = "test"; + if (a.size != 1) fail(); + if (a.capacity < 1) fail(); + + ARRAY_FREE(a); +} + +static void test_array_shrink_to_fit() { + ARRAY(pchar) a = {0}; + ARRAY_ENSURE_CAPACITY(a, 10); + const pchar* initial_data = a.data; + + if (a.capacity != 10) fail(); + + ARRAY_PUSH_BACK(a) = "0"; + ARRAY_PUSH_BACK(a) = "1"; + ARRAY_PUSH_BACK(a) = "2"; + ARRAY_PUSH_BACK(a) = "3"; + + if (a.size != 4) fail(); + + ARRAY_SHRINK_TO_FIT(a); + + if (a.data == initial_data) fail(); + + if (a.capacity != 4) fail(); + if (a.size != 4) fail(); + + for (size_t i = 0; i < a.size; i++) { + char str[21]; + sprintf(str, "%zu", i); + if (strcmp(a.data[i], str) != 0) fail(); + } + + ARRAY_FREE(a); +} + +static void test_array_expand() { + ARRAY(pchar) a = {0}; + ARRAY_ENSURE_CAPACITY(a, 3); + + if (a.capacity != 3) fail(); + + ARRAY_PUSH_BACK(a) = "0"; + ARRAY_PUSH_BACK(a) = "1"; + ARRAY_PUSH_BACK(a) = "2"; + ARRAY_PUSH_BACK(a) = "3"; + + if (a.size != 4) fail(); + if (a.capacity <= 3) fail(); + + for (size_t i = 0; i < a.size; i++) { + char str[21]; + sprintf(str, "%zu", i); + if (strcmp(a.data[i], str) != 0) fail(); + } + + ARRAY_FREE(a); +} + +static void test_array_ensure_capacity() { + ARRAY(pchar) a = {0}; + + ARRAY_ENSURE_CAPACITY(a, 1); + if (a.capacity < 1) fail(); + + size_t expanded_capacity = a.capacity + 1; + ARRAY_ENSURE_CAPACITY(a, expanded_capacity); + if (a.capacity < expanded_capacity) fail(); + + ARRAY_FREE(a); +} + +static void test_array_resize() { + ARRAY(pchar) a = {0}; + + ARRAY_RESIZE(a, 10); + if (a.size != 10) fail(); + if (a.capacity < 10) fail(); + + ARRAY_RESIZE(a, 20); + if (a.size != 20) fail(); + if (a.capacity < 20) fail(); + + ARRAY_FREE(a); +} + +static void test_array_struct() { + typedef struct { + size_t data[123]; + } struct_t; + ARRAY(struct_t) a = {0}; + + for (size_t i = 0; i < 1000; i++) { + ARRAY_PUSH_BACK(a) = (struct_t){{i}}; + } + if (a.size != 1000) fail(); + for (size_t i = 0; i < 1000; i++) { + if (a.data[i].data[0] != i) fail(); + } + + ARRAY_FREE(a); +} + +void test_array() { + RUN_TEST(array_uint8_t); + RUN_TEST(array_uint16_t); + RUN_TEST(array_uint32_t); + RUN_TEST(array_uint64_t); + RUN_TEST(array_pchar); + + RUN_TEST(array_null_safe); + RUN_TEST(array_shrink_to_fit); + RUN_TEST(array_expand); + RUN_TEST(array_ensure_capacity); + RUN_TEST(array_resize); + RUN_TEST(array_struct); +} diff --git a/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test_array.h b/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test_array.h new file mode 100644 index 000000000000..f54a6d09af0e --- /dev/null +++ b/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test_array.h @@ -0,0 +1,43 @@ +/* + * Copyright 2025 JetBrains s.r.o. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +static void CONCATENATE(test_array_, TYPE)() { + ARRAY(TYPE) a = {0}; + ARRAY_ENSURE_CAPACITY(a, 10); + + if (a.capacity != 10) fail(); + + ARRAY_PUSH_BACK(a) = 0; + ARRAY_PUSH_BACK(a) = 1; + ARRAY_PUSH_BACK(a) = 2; + ARRAY_PUSH_BACK(a) = 3; + + if (a.size != 4) fail(); + + for (TYPE i = 0; i < a.size; i++) { + if (a.data[i] != i) fail(); + } + ARRAY_FREE(a); +} + +#undef TYPE diff --git a/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test_map.c b/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test_map.c new file mode 100644 index 000000000000..a86eb3d040c5 --- /dev/null +++ b/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test_map.c @@ -0,0 +1,95 @@ +/* + * Copyright 2025 JetBrains s.r.o. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "test.h" + +#define map_key_t uint8_t +#define map_value_t uint8_t +#include "test_map.h" +#define map_key_t uint8_t +#define map_value_t uint16_t +#include "test_map.h" +#define map_key_t uint8_t +#define map_value_t uint32_t +#include "test_map.h" +#define map_key_t uint8_t +#define map_value_t uint64_t +#include "test_map.h" +#define map_key_t uint16_t +#define map_value_t uint8_t +#include "test_map.h" +#define map_key_t uint16_t +#define map_value_t uint16_t +#include "test_map.h" +#define map_key_t uint16_t +#define map_value_t uint32_t +#include "test_map.h" +#define map_key_t uint16_t +#define map_value_t uint64_t +#include "test_map.h" +#define map_key_t uint32_t +#define map_value_t uint8_t +#include "test_map.h" +#define map_key_t uint32_t +#define map_value_t uint16_t +#include "test_map.h" +#define map_key_t uint32_t +#define map_value_t uint32_t +#include "test_map.h" +#define map_key_t uint32_t +#define map_value_t uint64_t +#include "test_map.h" +#define map_key_t uint64_t +#define map_value_t uint8_t +#include "test_map.h" +#define map_key_t uint64_t +#define map_value_t uint16_t +#include "test_map.h" +#define map_key_t uint64_t +#define map_value_t uint32_t +#include "test_map.h" +#define map_key_t uint64_t +#define map_value_t uint64_t +#include "test_map.h" + +#include "test_map_struct.h" + +void test_map() { + RUN_TEST(map_linear_probing_uint8_t_uint8_t); + RUN_TEST(map_linear_probing_uint8_t_uint16_t); + RUN_TEST(map_linear_probing_uint8_t_uint32_t); + RUN_TEST(map_linear_probing_uint8_t_uint64_t); + RUN_TEST(map_linear_probing_uint16_t_uint8_t); + RUN_TEST(map_linear_probing_uint16_t_uint16_t); + RUN_TEST(map_linear_probing_uint16_t_uint32_t); + RUN_TEST(map_linear_probing_uint16_t_uint64_t); + RUN_TEST(map_linear_probing_uint32_t_uint8_t); + RUN_TEST(map_linear_probing_uint32_t_uint16_t); + RUN_TEST(map_linear_probing_uint32_t_uint32_t); + RUN_TEST(map_linear_probing_uint32_t_uint64_t); + RUN_TEST(map_linear_probing_uint64_t_uint8_t); + RUN_TEST(map_linear_probing_uint64_t_uint16_t); + RUN_TEST(map_linear_probing_uint64_t_uint32_t); + RUN_TEST(map_linear_probing_uint64_t_uint64_t); + RUN_TEST(map_linear_probing_struct); +} diff --git a/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test_map.h b/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test_map.h new file mode 100644 index 000000000000..c99b80c47d0d --- /dev/null +++ b/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test_map.h @@ -0,0 +1,210 @@ +/* + * Copyright 2025 JetBrains s.r.o. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#define TYPE CONCATENATE(CONCATENATE(CONCATENATE(_, map_key_t), _), map_value_t) + +#define map_t CONCATENATE(map, TYPE) +typedef MAP(map_key_t, map_value_t) map_t; + +#define equals CONCATENATE(equals, TYPE) +#define good_hash CONCATENATE(good_hash, TYPE) +#define bad_hash CONCATENATE(bad_hash, TYPE) +#define awful_hash CONCATENATE(awful_hash, TYPE) +#define test_hash_map CONCATENATE(test_hash_map, TYPE) +#define test_map_linear_probing CONCATENATE(test_map_linear_probing, TYPE) + +static bool equals(const void* a, const void* b) { + return *((map_key_t*) a) == *((map_key_t*) b); +} +static size_t good_hash(const void* data) { + return *((map_key_t*) data); +} +static size_t bad_hash(const void* data) { + return *((map_key_t*) data) / 8 * 8; +} +static size_t awful_hash(const void* data) { + return 0; +} + +// Test lookup, insertion, deletion and clear in the end. +static void test_hash_map(map_t* map) { + for (map_key_t i = 1; i < 100; i++) { + map_key_t* k; + map_value_t* v; + + v = MAP_FIND(*map, i); + if (v != NULL) fail(); + + k = &i; + v = MAP_RESOLVE(*map, k); + if (v != NULL || k != NULL) fail(); + + if (i % 2 == 0) MAP_AT(*map, i) = (map_value_t)i; + else { + k = &i; + v = MAP_RESOLVE_OR_INSERT(*map, k); + if (k == NULL) fail(); + if (v != NULL) fail(); + v = MAP_FIND(*map, *k); + if (v == NULL) fail(); + *v = (map_value_t)i; + } + + v = MAP_FIND(*map, i); + if (v == NULL) fail(); + if (*v != i) fail(); + + k = &i; + v = MAP_RESOLVE(*map, k); + if (v == NULL || k == NULL) fail(); + if (MAP_RESOLVE(*map, k) != v) fail(); + if (*v != i) fail(); + + k = &i; + v = MAP_RESOLVE_OR_INSERT(*map, k); + if (v == NULL || k == NULL) fail(); + if (MAP_FIND(*map, *k) != v) fail(); + if (*v != i) fail(); + } + + if (MAP_FIND(*map, (map_key_t){0}) != NULL) fail(); + MAP_AT(*map, (map_key_t){0}) = 0; + + for (map_key_t i = 0; i < 200; i++) { + if ((MAP_FIND(*map, i) != NULL) != (i < 100)) fail(); + } + for (map_key_t i = 100; i < 200; i++) MAP_AT(*map, i) = (map_value_t)i; + for (map_key_t i = 0; i < 250; i++) { + if ((MAP_FIND(*map, i) != NULL) != (i < 200)) fail(); + } + + int count = 0; + int64_t sum = 0; + for (const map_key_t* k = NULL; (k = MAP_NEXT_KEY(*map, k)) != NULL;) { + map_value_t* v = MAP_FIND(*map, *k); + if (v == NULL || *v != *k) fail(); + count++; + sum += *v; + } + if (count != 200) fail(); + if (sum != (199 * 200) / 2) fail(); + + for (map_key_t i = 0; i < 250; i += 2) { + if (MAP_REMOVE(*map, i) != (i < 200)) fail(); + } + + count = 0; + sum = 0; + for (const map_key_t* k = NULL; (k = MAP_NEXT_KEY(*map, k)) != NULL;) { + map_value_t* v = MAP_FIND(*map, *k); + if (v == NULL || *v != *k) fail(); + count++; + sum += *v; + } + if (count != 100) fail(); + if (sum != 100 * 100) fail(); + + MAP_CLEAR(*map); + if (MAP_NEXT_KEY(*map, NULL) != NULL) fail(); +} + +static void test_map_linear_probing() { + map_t map = {0}; + map_value_t* value_data_before = NULL; + + // Test fresh map, expanding from the smallest size, rehashing only when full. + HASH_MAP_REHASH(map, linear_probing, &equals, &good_hash, 0, -1, 1.0f); + value_data_before = ((CARR_hash_map_probing_impl_data_t*)map.impl_data)->value_data; + test_hash_map(&map); + if (((CARR_hash_map_probing_impl_data_t*)map.impl_data)->value_data == value_data_before) fail(); + + // Check the same scenario, expect no reallocations. + test_hash_map(&map); + value_data_before = ((CARR_hash_map_probing_impl_data_t*)map.impl_data)->value_data; + if (((CARR_hash_map_probing_impl_data_t*)map.impl_data)->value_data != value_data_before) fail(); + + // Prepare space in advance, permit no collisions, expect no reallocations. + HASH_MAP_REHASH(map, linear_probing, &equals, &good_hash, 200, 0, 0.0f); + value_data_before = ((CARR_hash_map_probing_impl_data_t*)map.impl_data)->value_data; + test_hash_map(&map); + if (((CARR_hash_map_probing_impl_data_t*)map.impl_data)->value_data != value_data_before) fail(); + + // Bad hash, allow up to 7 collisions, expect no reallocations. + HASH_MAP_REHASH(map, linear_probing, &equals, &bad_hash, 200, 7, 0.0f); + value_data_before = ((CARR_hash_map_probing_impl_data_t*)map.impl_data)->value_data; + test_hash_map(&map); + if (((CARR_hash_map_probing_impl_data_t*)map.impl_data)->value_data != value_data_before) fail(); + + // Bad hash, permit no collisions, but choke reallocation with load factor, expect no reallocations. + HASH_MAP_REHASH(map, linear_probing, &equals, &bad_hash, 200, 0, 1.0f); + value_data_before = ((CARR_hash_map_probing_impl_data_t*)map.impl_data)->value_data; + test_hash_map(&map); + if (((CARR_hash_map_probing_impl_data_t*)map.impl_data)->value_data != value_data_before) fail(); + + // Bad hash, permit no collisions, but limit reallocation with load factor, expect no reallocations. + HASH_MAP_REHASH(map, linear_probing, &equals, &bad_hash, 200, 0, 0.6f); + value_data_before = ((CARR_hash_map_probing_impl_data_t*)map.impl_data)->value_data; + test_hash_map(&map); + if (((CARR_hash_map_probing_impl_data_t*)map.impl_data)->value_data != value_data_before) fail(); + + // Bad hash, permit no collisions, but loosely limit reallocation with load factor, expect reallocation. + HASH_MAP_REHASH(map, linear_probing, &equals, &bad_hash, 200, 0, 0.3f); + value_data_before = ((CARR_hash_map_probing_impl_data_t*)map.impl_data)->value_data; + test_hash_map(&map); + if (((CARR_hash_map_probing_impl_data_t*)map.impl_data)->value_data == value_data_before) fail(); + + // Awful hash, expect to reallocate reaching 197 collisions. + HASH_MAP_REHASH(map, linear_probing, &equals, &awful_hash, 200, 197, 0.0f); + value_data_before = ((CARR_hash_map_probing_impl_data_t*)map.impl_data)->value_data; + test_hash_map(&map); + if (((CARR_hash_map_probing_impl_data_t*)map.impl_data)->value_data == value_data_before) fail(); + + // Awful hash, permit no collisions, but limit reallocation with load factor, expect no reallocations. + HASH_MAP_REHASH(map, linear_probing, &equals, &awful_hash, 200, 0, 0.6f); + value_data_before = ((CARR_hash_map_probing_impl_data_t*)map.impl_data)->value_data; + test_hash_map(&map); + if (((CARR_hash_map_probing_impl_data_t*)map.impl_data)->value_data != value_data_before) fail(); + + // Awful hash, permit no collisions, but loosely limit reallocation with load factor, expect reallocation. + HASH_MAP_REHASH(map, linear_probing, &equals, &awful_hash, 200, 0, 0.3f); + value_data_before = ((CARR_hash_map_probing_impl_data_t*)map.impl_data)->value_data; + test_hash_map(&map); + if (((CARR_hash_map_probing_impl_data_t*)map.impl_data)->value_data == value_data_before) fail(); + + MAP_FREE(map); + if (map.impl_data != NULL) fail(); + + // Freeing must be NULL-safe + MAP_FREE(map); +} + +#undef map_key_t +#undef map_value_t +#undef map_t + +#undef equals +#undef good_hash +#undef bad_hash +#undef awful_hash +#undef test_hash_map +#undef test_map_linear_probing diff --git a/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test_map_struct.h b/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test_map_struct.h new file mode 100644 index 000000000000..3e0effef8594 --- /dev/null +++ b/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test_map_struct.h @@ -0,0 +1,82 @@ +/* + * Copyright 2025 JetBrains s.r.o. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +static void test_map_linear_probing_struct() { + typedef struct { + uint64_t data[123]; + } struct_t; + MAP(struct_t, uint8_t) big_key_map = {0}; + MAP(uint8_t, struct_t) big_val_map = {0}; + HASH_MAP_REHASH(big_key_map, linear_probing, &equals_uint64_t_uint8_t, &good_hash_uint64_t_uint8_t, 0, -1, 1.0); + HASH_MAP_REHASH(big_val_map, linear_probing, &equals_uint8_t_uint64_t, &good_hash_uint8_t_uint64_t, 0, -1, 1.0); + + for (uint8_t i = 0;; i++) { + uint8_t key = ((i & 0b10101010) >> 1) | ((i & 0b01010101) << 1); + struct_t big_key = {{key}}; + uint8_t *bkm_val, *bvm_key = &key; + struct_t *bvm_val, *bkm_key = &big_key; + bkm_val = MAP_RESOLVE_OR_INSERT(big_key_map, bkm_key); + bvm_val = MAP_RESOLVE_OR_INSERT(big_val_map, bvm_key); + if (bkm_key == NULL || bvm_key == NULL) fail(); + if (bkm_val != NULL || bvm_val != NULL) fail(); + bkm_val = MAP_FIND(big_key_map, *bkm_key); + bvm_val = MAP_FIND(big_val_map, *bvm_key); + if (bkm_val == NULL || bvm_val == NULL) fail(); + *bkm_val = key; + *bvm_val = big_key; + if (i == 255) break; + } + + uint32_t count = 0; + for (const struct_t* k = NULL; (k = MAP_NEXT_KEY(big_key_map, k)) != NULL;) { + count++; + if (k->data[0] != *MAP_FIND(big_key_map, *k)) fail(); + } + if (count != 256) fail(); + + count = 0; + for (const uint8_t* k = NULL; (k = MAP_NEXT_KEY(big_val_map, k)) != NULL;) { + count++; + if (*k != MAP_FIND(big_val_map, *k)->data[0]) fail(); + } + if (count != 256) fail(); + + for (uint8_t i = 255;; i--) { + struct_t big_key = {{i}}; + uint8_t *bkm_val, *bvm_key = &i; + struct_t *bvm_val, *bkm_key = &big_key; + bkm_val = MAP_RESOLVE(big_key_map, bkm_key); + bvm_val = MAP_RESOLVE(big_val_map, bvm_key); + if (bkm_key == NULL || bvm_key == NULL) fail(); + if (bkm_val == NULL || bvm_val == NULL) fail(); + if (!MAP_REMOVE(big_key_map, *bkm_key)) fail(); + if (!MAP_REMOVE(big_val_map, *bvm_key)) fail(); + if (i == 0) break; + } + + if (MAP_NEXT_KEY(big_key_map, NULL) != NULL) fail(); + if (MAP_NEXT_KEY(big_val_map, NULL) != NULL) fail(); + + MAP_FREE(big_key_map); + MAP_FREE(big_val_map); +} diff --git a/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test_ring_buffer.c b/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test_ring_buffer.c new file mode 100644 index 000000000000..7e7248912c8e --- /dev/null +++ b/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test_ring_buffer.c @@ -0,0 +1,73 @@ +/* + * Copyright 2025 JetBrains s.r.o. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "test.h" + +#define TYPE uint32_t +#include "test_ring_buffer.h" +#define TYPE uint64_t +#include "test_ring_buffer.h" + +static void test_ring_buffer_null_safe() { + RING_BUFFER(pchar) b = {0}; + + if (b.size != 0) fail(); + if (b.capacity != 0) fail(); + RING_BUFFER_FREE(b); + + RING_BUFFER_PUSH_BACK(b) = "test"; + if (b.size != 1) fail(); + if (b.capacity < 1) fail(); + + RING_BUFFER_FREE(b); +} + +static void test_ring_buffer_struct() { + typedef struct { + size_t data[123]; + } struct_t; + RING_BUFFER(struct_t) b = {0}; + + for (size_t i = 0; i < 1000; i++) { + RING_BUFFER_PUSH_BACK(b) = (struct_t){{i}}; + } + if (b.size != 1000) fail(); + for (size_t i = 0;; i++) { + struct_t* s = RING_BUFFER_FRONT(b); + if (s == NULL) { + if (i != 1000) fail(); + else break; + } + if (s->data[0] != i) fail(); + RING_BUFFER_POP_FRONT(b); + } + + RING_BUFFER_FREE(b); +} + +void test_ring_buffer() { + RUN_TEST(ring_buffer_wrap_uint32_t); + RUN_TEST(ring_buffer_wrap_uint64_t); + RUN_TEST(ring_buffer_null_safe); + RUN_TEST(ring_buffer_struct); +} diff --git a/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test_ring_buffer.h b/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test_ring_buffer.h new file mode 100644 index 000000000000..465329764e64 --- /dev/null +++ b/test/jdk/jb/java/awt/vulkan/CArrayUtil/native/test_ring_buffer.h @@ -0,0 +1,59 @@ +/* + * Copyright 2025 JetBrains s.r.o. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +static void CONCATENATE(test_ring_buffer_wrap_, TYPE)() { + const size_t EXPAND_COUNT = 1000; + const int INNER_COUNT = 1000; + RING_BUFFER(TYPE) b = {0}; + + TYPE read = 0; + TYPE write = 0; + for (size_t i = 0; i < EXPAND_COUNT; i++) { + for (int j = 0; j < INNER_COUNT; j++) { + RING_BUFFER_PUSH_BACK(b) = write; + write++; + TYPE* value = RING_BUFFER_FRONT(b); + if (value == NULL) fail(); + if (*value != read) fail(); + read++; + RING_BUFFER_POP_FRONT(b); + } + RING_BUFFER_PUSH_BACK(b) = write; + write++; + } + if (b.size != EXPAND_COUNT) fail(); + + for (size_t i = 0; i < EXPAND_COUNT; i++) { + TYPE* value = RING_BUFFER_FRONT(b); + if (value == NULL) fail(); + if (*value != read) fail(); + read++; + RING_BUFFER_POP_FRONT(b); + } + if (RING_BUFFER_FRONT(b) != NULL) fail(); + if (b.size != 0) fail(); + + RING_BUFFER_FREE(b); +} + +#undef TYPE