Autism
License text changes, adding const and restrict keywords, freetype git submodule
This commit is contained in:
parent
31e37846b4
commit
08c58e719c
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -4,3 +4,6 @@
|
||||
[submodule "external/stb"]
|
||||
path = external/stb
|
||||
url = https://github.com/nothings/stb
|
||||
[submodule "external/freetype"]
|
||||
path = external/freetype
|
||||
url = https://gitlab.freedesktop.org/freetype/freetype.git
|
||||
|
1
external/freetype
vendored
Submodule
1
external/freetype
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 37cefe33b284d0bad4ec52bcccc1a8c2d8704340
|
@ -22,7 +22,7 @@ foreach(SHADER_FILE ${SHADER_FILES})
|
||||
endforeach()
|
||||
add_custom_target(shaders ALL DEPENDS ${SHADER_SPIRV_BINARIES})
|
||||
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -O3")
|
||||
add_library(sofrenderer SHARED
|
||||
"src/logger.c"
|
||||
"src/renderer.c"
|
||||
@ -36,9 +36,7 @@ set_target_properties(sofrenderer PROPERTIES
|
||||
LIBRARY_OUTPUT_DIRECTORY "${PROJECT_SOURCE_DIR}/lib")
|
||||
|
||||
# GLFW
|
||||
add_subdirectory(../external/glfw build)
|
||||
target_include_directories(sofrenderer PRIVATE "${PROJECT_SOURCE_DIR}/../external/glfw/include")
|
||||
target_link_directories(sofrenderer PRIVATE "${PROJECT_SOURCE_DIR}/../external/glfw")
|
||||
add_subdirectory(../external/glfw glfw)
|
||||
target_link_libraries(sofrenderer PRIVATE glfw)
|
||||
|
||||
# Vulkan
|
||||
@ -47,3 +45,7 @@ target_link_libraries(sofrenderer PRIVATE ${Vulkan_LIBRARIES})
|
||||
|
||||
# stb
|
||||
target_include_directories(sofrenderer PRIVATE "${PROJECT_SOURCE_DIR}/../external/stb")
|
||||
|
||||
# freetype
|
||||
add_subdirectory(../external/freetype freetype)
|
||||
target_link_libraries(sofrenderer PRIVATE freetype)
|
||||
|
@ -1,14 +1,20 @@
|
||||
/*
|
||||
|
||||
Copyright (C) 2024 Frosch
|
||||
|
||||
This file is part of Shape of Fantasy.
|
||||
Copyright (C) 2024 Frosch
|
||||
|
||||
Shape of Fantasy is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
Shape of Fantasy is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with Shape of Fantasy. If not, see <https://www.gnu.org/licenses/>.
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
*/
|
||||
|
||||
@ -18,7 +24,7 @@ You should have received a copy of the GNU General Public License along with Sha
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
void* create_darray(u64 capacity, u64 stride)
|
||||
void* create_darray(const u64 capacity, const u64 stride)
|
||||
{
|
||||
if (!capacity)
|
||||
{
|
||||
@ -31,7 +37,7 @@ void* create_darray(u64 capacity, u64 stride)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
u64 byte_size = DARRAY_TOT_FIELDS * sizeof(u64) + capacity * stride;
|
||||
const u64 byte_size = DARRAY_TOT_FIELDS * sizeof(u64) + capacity * stride;
|
||||
u64* header = (u64*)malloc(byte_size);
|
||||
header[DARRAY_CAPACITY] = capacity;
|
||||
header[DARRAY_SIZE] = 0;
|
||||
@ -48,7 +54,7 @@ void destroy_darray(void* arr)
|
||||
free(header);
|
||||
}
|
||||
|
||||
static void* change_darray_capacity(void* arr, u64 capacity)
|
||||
static void* change_darray_capacity(void* arr, const u64 capacity)
|
||||
{
|
||||
if (!capacity)
|
||||
{
|
||||
@ -57,9 +63,9 @@ static void* change_darray_capacity(void* arr, u64 capacity)
|
||||
}
|
||||
|
||||
// get pointer to the first field
|
||||
u64* header = (u64*)arr - DARRAY_TOT_FIELDS;
|
||||
const u64* header = (u64*)arr - DARRAY_TOT_FIELDS;
|
||||
|
||||
u64 stride = header[DARRAY_STRIDE];
|
||||
const u64 stride = header[DARRAY_STRIDE];
|
||||
void* result = create_darray(capacity, stride);
|
||||
memcpy(result, arr, header[DARRAY_CAPACITY] * stride);
|
||||
destroy_darray(arr);
|
||||
@ -67,7 +73,7 @@ static void* change_darray_capacity(void* arr, u64 capacity)
|
||||
return result;
|
||||
}
|
||||
|
||||
void set_darray_field(void* arr, u64 field, u64 value)
|
||||
void set_darray_field(void* arr, const u64 field, const u64 value)
|
||||
{
|
||||
// get pointer to the first field
|
||||
u64* header = (u64*)arr - DARRAY_TOT_FIELDS;
|
||||
@ -75,7 +81,7 @@ void set_darray_field(void* arr, u64 field, u64 value)
|
||||
header[field] = value;
|
||||
}
|
||||
|
||||
u64 get_darray_field(const void* arr, u8 field)
|
||||
u64 get_darray_field(const void* arr, const u8 field)
|
||||
{
|
||||
if (field >= DARRAY_TOT_FIELDS)
|
||||
{
|
||||
@ -89,7 +95,7 @@ u64 get_darray_field(const void* arr, u8 field)
|
||||
return header[field];
|
||||
}
|
||||
|
||||
void* darray_pushback(void* arr, const void* src, u64 src_size)
|
||||
void* darray_pushback(void* arr, const void* src, const u64 src_size)
|
||||
{
|
||||
if (!src_size)
|
||||
{
|
||||
@ -100,8 +106,8 @@ void* darray_pushback(void* arr, const void* src, u64 src_size)
|
||||
// get pointer to the first field
|
||||
u64* header = (u64*)arr - DARRAY_TOT_FIELDS;
|
||||
|
||||
u64 size = header[DARRAY_SIZE];
|
||||
u64 stride = header[DARRAY_STRIDE];
|
||||
const u64 size = header[DARRAY_SIZE];
|
||||
const u64 stride = header[DARRAY_STRIDE];
|
||||
|
||||
// if the size is about to become larger than the capacity, increase the capacity
|
||||
if ((size + src_size) > header[DARRAY_CAPACITY])
|
||||
@ -112,7 +118,7 @@ void* darray_pushback(void* arr, const void* src, u64 src_size)
|
||||
header = (u64*)arr - DARRAY_TOT_FIELDS;
|
||||
}
|
||||
|
||||
u64 address = (u64)arr;
|
||||
const u64 address = (u64)arr;
|
||||
void* dst = (void*)(address + size * stride);
|
||||
memcpy(dst, src, stride * src_size);
|
||||
header[DARRAY_SIZE] = size + src_size;
|
||||
@ -120,7 +126,7 @@ void* darray_pushback(void* arr, const void* src, u64 src_size)
|
||||
return arr;
|
||||
}
|
||||
|
||||
void* darray_insert(void* arr, u64 index, const void* src, u64 src_size)
|
||||
void* darray_insert(void* arr, const u64 index, const void* src, const u64 src_size)
|
||||
{
|
||||
if (!src_size)
|
||||
{
|
||||
@ -131,13 +137,13 @@ void* darray_insert(void* arr, u64 index, const void* src, u64 src_size)
|
||||
// get pointer to the first field
|
||||
u64* header = (u64*)arr - DARRAY_TOT_FIELDS;
|
||||
|
||||
u64 size = header[DARRAY_SIZE];
|
||||
const u64 size = header[DARRAY_SIZE];
|
||||
if (index >= size)
|
||||
{
|
||||
trace_log(LOG_ERROR, "Darray data insertion failed; index '%d' out of bounds.", index);
|
||||
return NULL;
|
||||
}
|
||||
u64 stride = header[DARRAY_STRIDE];
|
||||
const u64 stride = header[DARRAY_STRIDE];
|
||||
|
||||
// if the size is about to become larger than the capacity, increase the capacity
|
||||
if ((size + src_size) > header[DARRAY_CAPACITY])
|
||||
@ -146,7 +152,7 @@ void* darray_insert(void* arr, u64 index, const void* src, u64 src_size)
|
||||
header = (u64*)arr - DARRAY_TOT_FIELDS; //recalc header, changing capacity changes arr
|
||||
}
|
||||
|
||||
u64 address = (u64)arr;
|
||||
const u64 address = (u64)arr;
|
||||
{
|
||||
void* dst = (void*)(address + (index + src_size) * stride);
|
||||
void* shift_from = (void*)(address + index * stride);
|
||||
@ -162,20 +168,20 @@ void* darray_insert(void* arr, u64 index, const void* src, u64 src_size)
|
||||
return arr;
|
||||
}
|
||||
|
||||
u8 darray_pop(void* arr, void* dst, u64 dst_size)
|
||||
u8 darray_pop(void* arr, void* dst, const u64 dst_size)
|
||||
{
|
||||
u64 address = (u64)arr;
|
||||
const u64 address = (u64)arr;
|
||||
|
||||
// get pointer to the first field
|
||||
u64* header = (u64*)arr - DARRAY_TOT_FIELDS;
|
||||
|
||||
u64 size = header[DARRAY_SIZE];
|
||||
const u64 size = header[DARRAY_SIZE];
|
||||
if (!dst_size || !(size - dst_size))
|
||||
{
|
||||
trace_log(LOG_ERROR, "Darray pop failed; destination size '%d' must be nonzero and smaller than the darray size '%d'.", dst_size, size);
|
||||
return 0;
|
||||
}
|
||||
u64 stride = header[DARRAY_STRIDE];
|
||||
const u64 stride = header[DARRAY_STRIDE];
|
||||
|
||||
if (dst != NULL)
|
||||
{
|
||||
@ -187,14 +193,14 @@ u8 darray_pop(void* arr, void* dst, u64 dst_size)
|
||||
return 1;
|
||||
}
|
||||
|
||||
u8 darray_remove(void* arr, u64 index, void* dst, u64 dst_size)
|
||||
u8 darray_remove(void* arr, const u64 index, void* dst, const u64 dst_size)
|
||||
{
|
||||
u64 address = (u64)arr;
|
||||
const u64 address = (u64)arr;
|
||||
|
||||
// get pointer to the first field
|
||||
u64* header = (u64*)arr - DARRAY_TOT_FIELDS;
|
||||
|
||||
u64 size = header[DARRAY_SIZE];
|
||||
const u64 size = header[DARRAY_SIZE];
|
||||
if (!dst_size && !(size - dst_size))
|
||||
{
|
||||
trace_log(LOG_ERROR, "Darray data removal failed; destination size '%d' must be nonzero and smaller than the darray size '%d'.", dst_size, size);
|
||||
@ -205,7 +211,7 @@ u8 darray_remove(void* arr, u64 index, void* dst, u64 dst_size)
|
||||
trace_log(LOG_ERROR, "Darray data removal failed; index '%d' out of bounds with darray size '%d' and removal size '%d'.", index, dst_size, size);
|
||||
return 0;
|
||||
}
|
||||
u64 stride = header[DARRAY_STRIDE];
|
||||
const u64 stride = header[DARRAY_STRIDE];
|
||||
|
||||
if (dst != NULL)
|
||||
{
|
||||
|
@ -23,17 +23,17 @@ enum
|
||||
DARRAY_TOT_FIELDS
|
||||
};
|
||||
|
||||
void* create_darray(u64 capacity, u64 stride);
|
||||
void* create_darray(const u64 capacity, const u64 stride);
|
||||
void destroy_darray(void* arr);
|
||||
|
||||
void set_darray_field(void* arr, u64 field, u64 value);
|
||||
u64 get_darray_field(const void* arr, u8 field);
|
||||
void set_darray_field(void* arr, const u64 field, const u64 value);
|
||||
u64 get_darray_field(const void* arr, const u8 field);
|
||||
static inline u64 get_darray_capacity(const void* arr) { return get_darray_field(arr, DARRAY_CAPACITY); }
|
||||
static inline u64 get_darray_size(const void* arr) { return get_darray_field(arr, DARRAY_SIZE); }
|
||||
static inline u64 get_darray_stride(const void* arr) { return get_darray_field(arr, DARRAY_STRIDE); }
|
||||
|
||||
void* darray_pushback(void* arr, const void* src, u64 src_size);
|
||||
void* darray_insert(void* arr, u64 index, const void* src, u64 src_size);
|
||||
u8 darray_pop(void* arr, void* dst, u64 dst_size);
|
||||
u8 darray_remove(void* arr, u64 index, void* kdst, u64 dst_size);
|
||||
void* darray_pushback(void* arr, const void* src, const u64 src_size);
|
||||
void* darray_insert(void* arr, const u64 index, const void* src, const u64 src_size);
|
||||
u8 darray_pop(void* arr, void* dst, const u64 dst_size);
|
||||
u8 darray_remove(void* arr, const u64 index, void* dst, const u64 dst_size);
|
||||
void clear_darray(void* arr);
|
||||
|
@ -1,14 +1,20 @@
|
||||
/*
|
||||
|
||||
Copyright (C) 2024 Frosch
|
||||
|
||||
This file is part of Shape of Fantasy.
|
||||
Copyright (C) 2024 Frosch
|
||||
|
||||
Shape of Fantasy is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
Shape of Fantasy is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with Shape of Fantasy. If not, see <https://www.gnu.org/licenses/>.
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
*/
|
||||
|
||||
@ -17,7 +23,7 @@ You should have received a copy of the GNU General Public License along with Sha
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
static db_item* create_db_item(db_list* lst, db_item* prev, void* data)
|
||||
static db_item* create_db_item(db_list* restrict lst, db_item* restrict prev, void* restrict data)
|
||||
{
|
||||
db_item* itm = (db_item*)malloc(sizeof(db_item));
|
||||
|
||||
@ -34,20 +40,20 @@ static db_item* create_db_item(db_list* lst, db_item* prev, void* data)
|
||||
return itm;
|
||||
}
|
||||
|
||||
static void destroy_db_item(db_list* lst, db_item* itm)
|
||||
static void destroy_db_item(const db_list* restrict lst, db_item* restrict itm)
|
||||
{
|
||||
if (lst->data_byte_size) free(itm->data);
|
||||
free(itm);
|
||||
}
|
||||
|
||||
void init_db_list(db_list* lst, u64 data_byte_size)
|
||||
void init_db_list(db_list* restrict lst, const u64 data_byte_size)
|
||||
{
|
||||
lst->head = NULL;
|
||||
lst->tail = NULL;
|
||||
lst->data_byte_size = data_byte_size;
|
||||
}
|
||||
|
||||
void shutdown_db_list(db_list* lst)
|
||||
void shutdown_db_list(db_list* restrict lst)
|
||||
{
|
||||
db_item* next;
|
||||
for (db_item* current = lst->head; current != NULL; current = next)
|
||||
@ -57,7 +63,7 @@ void shutdown_db_list(db_list* lst)
|
||||
}
|
||||
}
|
||||
|
||||
void db_list_pushfront(db_list* lst, void* data)
|
||||
void db_list_pushfront(db_list* restrict lst, void* restrict data)
|
||||
{
|
||||
db_item* itm = create_db_item(lst, lst->tail, data);
|
||||
|
||||
@ -67,7 +73,7 @@ void db_list_pushfront(db_list* lst, void* data)
|
||||
lst->head = itm;
|
||||
}
|
||||
|
||||
void db_list_popfront(db_list* lst, void** dst)
|
||||
void db_list_popfront(db_list* restrict lst, void** restrict dst)
|
||||
{
|
||||
if (lst->head == NULL)
|
||||
{
|
||||
@ -88,7 +94,7 @@ void db_list_popfront(db_list* lst, void** dst)
|
||||
else lst->head->prev = NULL;
|
||||
}
|
||||
|
||||
void db_list_pushback(db_list* lst, void* data)
|
||||
void db_list_pushback(db_list* restrict lst, void* restrict data)
|
||||
{
|
||||
db_item* itm = create_db_item(lst, lst->tail, data);
|
||||
|
||||
@ -98,7 +104,7 @@ void db_list_pushback(db_list* lst, void* data)
|
||||
lst->tail = itm;
|
||||
}
|
||||
|
||||
void db_list_popback(db_list* lst, void** dst)
|
||||
void db_list_popback(db_list* restrict lst, void** restrict dst)
|
||||
{
|
||||
if (lst->head == NULL)
|
||||
{
|
||||
|
@ -50,9 +50,9 @@ typedef struct db_list
|
||||
|
||||
} db_list;
|
||||
|
||||
void init_db_list(db_list* lst, u64 data_byte_size);
|
||||
void shutdown_db_list(db_list* lst);
|
||||
void db_list_pushfront(db_list* lst, void* data);
|
||||
void db_list_popfront(db_list* lst, void** dst);
|
||||
void db_list_pushback(db_list* lst, void* data);
|
||||
void db_list_popback(db_list* lst, void** dst);
|
||||
void init_db_list(db_list* restrict lst, const u64 data_byte_size);
|
||||
void shutdown_db_list(db_list* restrict lst);
|
||||
void db_list_pushfront(db_list* restrict lst, void* restrict data);
|
||||
void db_list_popfront(db_list* restrict lst, void** restrict dst);
|
||||
void db_list_pushback(db_list* restrict lst, void* restrict data);
|
||||
void db_list_popback(db_list* restrict lst, void** restrict dst);
|
||||
|
@ -1,14 +1,20 @@
|
||||
/*
|
||||
|
||||
Copyright (C) 2024 Frosch
|
||||
|
||||
This file is part of Shape of Fantasy.
|
||||
Copyright (C) 2024 Frosch
|
||||
|
||||
Shape of Fantasy is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
Shape of Fantasy is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with Shape of Fantasy. If not, see <https://www.gnu.org/licenses/>.
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
*/
|
||||
|
||||
@ -27,7 +33,7 @@ u32 basic_hash(const hash_table* ht, const char* key)
|
||||
return hashval & (ht->slot_count - 1); // slot count must be a power of 2
|
||||
}
|
||||
|
||||
static hash_table_entry* create_hash_table_entry(hash_table* ht, const char* key, void* data)
|
||||
static hash_table_entry* create_hash_table_entry(hash_table* restrict ht, const char* restrict key, void* restrict data)
|
||||
{
|
||||
hash_table_entry* entry = (hash_table_entry*)malloc(sizeof(hash_table_entry));
|
||||
|
||||
@ -44,14 +50,14 @@ static hash_table_entry* create_hash_table_entry(hash_table* ht, const char* key
|
||||
return entry;
|
||||
}
|
||||
|
||||
static void destroy_hash_table_entry(hash_table* ht, hash_table_entry* entry)
|
||||
static void destroy_hash_table_entry(hash_table* restrict ht, hash_table_entry* restrict entry)
|
||||
{
|
||||
if (ht->data_byte_size) free(entry->data);
|
||||
free(entry);
|
||||
entry = NULL;
|
||||
}
|
||||
|
||||
void init_hash_table(hash_table* ht, u64 slot_count, u64 data_byte_size, hash_fn hash)
|
||||
void init_hash_table(hash_table* restrict ht, const u64 slot_count, const u64 data_byte_size, hash_fn hash)
|
||||
{
|
||||
if (!slot_count || (slot_count & (slot_count - 1)))
|
||||
{
|
||||
@ -66,7 +72,7 @@ void init_hash_table(hash_table* ht, u64 slot_count, u64 data_byte_size, hash_fn
|
||||
for (u32 i = 0; i < slot_count; i++) ht->entries[i] = NULL;
|
||||
}
|
||||
|
||||
void shutdown_hash_table(hash_table* ht)
|
||||
void shutdown_hash_table(hash_table* restrict ht)
|
||||
{
|
||||
hash_table_entry* next;
|
||||
for (u32 i = 0; i < ht->slot_count; i++)
|
||||
@ -79,7 +85,7 @@ void shutdown_hash_table(hash_table* ht)
|
||||
free(ht->entries);
|
||||
}
|
||||
|
||||
void* hash_table_lookup(const hash_table* ht, const char* key)
|
||||
void* hash_table_lookup(const hash_table* restrict ht, const char* restrict key)
|
||||
{
|
||||
if (key == NULL || (strlen(key) > HASH_TABLE_KEY_LEN - 1)) return NULL;
|
||||
|
||||
@ -91,7 +97,7 @@ void* hash_table_lookup(const hash_table* ht, const char* key)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void* hash_table_insert(hash_table* ht, const char* key, void* data)
|
||||
void* hash_table_insert(hash_table* restrict ht, const char* restrict key, void* restrict data)
|
||||
{
|
||||
if (strlen(key) > HASH_TABLE_KEY_LEN - 1)
|
||||
{
|
||||
@ -125,7 +131,7 @@ void* hash_table_insert(hash_table* ht, const char* key, void* data)
|
||||
return prev->next->data;
|
||||
}
|
||||
|
||||
void* hash_table_insert_or_replace(hash_table* ht, const char* key, void* data)
|
||||
void* hash_table_insert_or_replace(hash_table* restrict ht, const char* restrict key, void* restrict data)
|
||||
{
|
||||
if (strlen(key) > HASH_TABLE_KEY_LEN - 1)
|
||||
{
|
||||
@ -160,7 +166,7 @@ void* hash_table_insert_or_replace(hash_table* ht, const char* key, void* data)
|
||||
return prev->next->data;
|
||||
}
|
||||
|
||||
u8 hash_table_remove(hash_table* ht, const char* key, void** dst)
|
||||
u8 hash_table_remove(hash_table* restrict ht, const char* restrict key, void** restrict dst)
|
||||
{
|
||||
if (strlen(key) > HASH_TABLE_KEY_LEN - 1)
|
||||
{
|
||||
@ -197,7 +203,7 @@ u8 hash_table_remove(hash_table* ht, const char* key, void** dst)
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 get_hash_table_entry_count(const hash_table* ht)
|
||||
u32 get_hash_table_entry_count(const hash_table* restrict ht)
|
||||
{
|
||||
u32 count = 0;
|
||||
for (u32 i = 0; i < ht->slot_count; i++)
|
||||
|
@ -60,12 +60,12 @@ typedef struct hash_table
|
||||
u32 basic_hash(const hash_table* ht, const char* key);
|
||||
|
||||
// The data_byte_size is the size of the data you pass in via the insert function.
|
||||
void init_hash_table(hash_table* ht, u64 slot_count, u64 data_byte_size, hash_fn hash);
|
||||
void init_hash_table(hash_table* restrict ht, const u64 slot_count, const u64 data_byte_size, hash_fn hash);
|
||||
// If the hash table is externally handling memory and the pointer is not stored somewhere else, you should never call this before deallocating those entries.
|
||||
void shutdown_hash_table(hash_table* ht);
|
||||
void shutdown_hash_table(hash_table* restrict ht);
|
||||
|
||||
void* hash_table_lookup(const hash_table* ht, const char* key);
|
||||
void* hash_table_insert(hash_table* ht, const char* key, void* data);
|
||||
void* hash_table_insert_or_replace(hash_table* ht, const char* key, void* data);
|
||||
u8 hash_table_remove(hash_table* ht, const char* key, void** dst); // passing NULL to dst is allowed
|
||||
u32 get_hash_table_entry_count(const hash_table* ht);
|
||||
void* hash_table_lookup(const hash_table* restrict ht, const char* restrict key);
|
||||
void* hash_table_insert(hash_table* restrict ht, const char* restrict key, void* restrict data);
|
||||
void* hash_table_insert_or_replace(hash_table* restrict ht, const char* restrict key, void* restrict data);
|
||||
u8 hash_table_remove(hash_table* restrict ht, const char* restrict key, void** restrict dst); // passing NULL to dst is allowed
|
||||
u32 get_hash_table_entry_count(const hash_table* restrict ht);
|
||||
|
@ -1,14 +1,20 @@
|
||||
/*
|
||||
|
||||
Copyright (C) 2024 Frosch
|
||||
|
||||
This file is part of Shape of Fantasy.
|
||||
Copyright (C) 2024 Frosch
|
||||
|
||||
Shape of Fantasy is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
Shape of Fantasy is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with Shape of Fantasy. If not, see <https://www.gnu.org/licenses/>.
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
*/
|
||||
|
||||
@ -16,7 +22,7 @@ You should have received a copy of the GNU General Public License along with Sha
|
||||
#include "logger.h"
|
||||
#include <string.h>
|
||||
|
||||
u8 init_ring_queue(ring_queue* q, void* buffer, u64 capacity, u64 stride)
|
||||
u8 init_ring_queue(ring_queue* restrict q, void* restrict buffer, u64 capacity, u64 stride)
|
||||
{
|
||||
if (!stride)
|
||||
{
|
||||
@ -37,7 +43,7 @@ u8 init_ring_queue(ring_queue* q, void* buffer, u64 capacity, u64 stride)
|
||||
return 1;
|
||||
}
|
||||
|
||||
void shutdown_ring_queue(ring_queue* q)
|
||||
void shutdown_ring_queue(ring_queue* restrict q)
|
||||
{
|
||||
q->buffer = NULL;
|
||||
q->in = 0;
|
||||
@ -46,7 +52,7 @@ void shutdown_ring_queue(ring_queue* q)
|
||||
q->stride = 0;
|
||||
}
|
||||
|
||||
u8 ring_queue_pushback(ring_queue* q, const void* src)
|
||||
u8 ring_queue_pushback(ring_queue* restrict q, const void* restrict src)
|
||||
{
|
||||
if (is_ring_queue_full(q))
|
||||
{
|
||||
@ -54,7 +60,7 @@ u8 ring_queue_pushback(ring_queue* q, const void* src)
|
||||
return 0;
|
||||
}
|
||||
|
||||
u64 address = (u64)q->buffer;
|
||||
const u64 address = (u64)q->buffer;
|
||||
void* dst = (void*)(address + q->in * q->stride);
|
||||
memcpy(dst, src, q->stride);
|
||||
|
||||
@ -62,7 +68,7 @@ u8 ring_queue_pushback(ring_queue* q, const void* src)
|
||||
return 1;
|
||||
}
|
||||
|
||||
u8 ring_queue_popfront(ring_queue* q, void** dst)
|
||||
u8 ring_queue_popfront(ring_queue* restrict q, void** restrict dst)
|
||||
{
|
||||
if (is_ring_queue_empty(q))
|
||||
{
|
||||
@ -72,7 +78,7 @@ u8 ring_queue_popfront(ring_queue* q, void** dst)
|
||||
|
||||
if (dst != NULL)
|
||||
{
|
||||
u64 address = (u64)q->buffer;
|
||||
const u64 address = (u64)q->buffer;
|
||||
void* src = (void*)(address + q->out * q->stride);
|
||||
*dst = src;
|
||||
}
|
||||
|
@ -23,10 +23,10 @@ typedef struct ring_queue
|
||||
|
||||
} ring_queue;
|
||||
|
||||
u8 init_ring_queue(ring_queue* q, void* buffer, u64 capacity, u64 stride);
|
||||
void shutdown_ring_queue(ring_queue* q);
|
||||
u8 ring_queue_pushback(ring_queue* q, const void* src);
|
||||
u8 ring_queue_popfront(ring_queue* q, void** dst);
|
||||
static inline u8 is_ring_queue_full(const ring_queue* q) { return q->in == ((q->out + q->capacity) & q->capacity); }
|
||||
static inline u8 is_ring_queue_empty(const ring_queue* q) { return q->in == q->out; }
|
||||
static inline void* ring_queue_peak(ring_queue* q) { return is_ring_queue_empty(q) ? NULL : (void*)((u64)q->buffer + q->out * q->stride); }
|
||||
u8 init_ring_queue(ring_queue* restrict q, void* restrict buffer, u64 capacity, u64 stride);
|
||||
void shutdown_ring_queue(ring_queue* restrict q);
|
||||
u8 ring_queue_pushback(ring_queue* restrict q, const void* restrict src);
|
||||
u8 ring_queue_popfront(ring_queue* restrict q, void** restrict dst);
|
||||
static inline u8 is_ring_queue_full(const ring_queue* restrict q) { return q->in == ((q->out + q->capacity) & q->capacity); }
|
||||
static inline u8 is_ring_queue_empty(const ring_queue* restrict q) { return q->in == q->out; }
|
||||
static inline void* ring_queue_peak(ring_queue* restrict q) { return is_ring_queue_empty(q) ? NULL : (void*)((u64)q->buffer + q->out * q->stride); }
|
||||
|
@ -1,15 +1,21 @@
|
||||
/*
|
||||
|
||||
Copyright (C) 2024 Frosch
|
||||
|
||||
This file is part of Shape of Fantasy.
|
||||
Copyright (C) 2024 Frosch
|
||||
|
||||
Shape of Fantasy is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
Shape of Fantasy is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with Shape of Fantasy. If not, see <https://www.gnu.org/licenses/>.
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
*/
|
||||
|
||||
#include "logger.h"
|
||||
|
@ -1,14 +1,20 @@
|
||||
/*
|
||||
|
||||
Copyright (C) 2024 Frosch
|
||||
|
||||
This file is part of Shape of Fantasy.
|
||||
Copyright (C) 2024 Frosch
|
||||
|
||||
Shape of Fantasy is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
Shape of Fantasy is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with Shape of Fantasy. If not, see <https://www.gnu.org/licenses/>.
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
*/
|
||||
|
||||
@ -28,7 +34,10 @@ You should have received a copy of the GNU General Public License along with Sha
|
||||
#include <GLFW/glfw3.h>
|
||||
|
||||
#define STB_IMAGE_IMPLEMENTATION
|
||||
#include <stb_image.h>
|
||||
#include <stb_image.h>
|
||||
|
||||
#include <ft2build.h>
|
||||
#include FT_FREETYPE_H
|
||||
|
||||
#define MAX_FRAMES_IN_FLIGHT 2 // must be a power of two!
|
||||
#define MAX_TEXTURES 256
|
||||
@ -221,7 +230,7 @@ u8 can_pop_window_event()
|
||||
return !is_ring_queue_empty(&window_event_queue);
|
||||
}
|
||||
|
||||
void pop_window_event(window_event* event)
|
||||
void pop_window_event(window_event* restrict event)
|
||||
{
|
||||
void* ptr;
|
||||
ring_queue_popfront(&window_event_queue, &ptr);
|
||||
@ -266,7 +275,7 @@ static void window_key_callback(GLFWwindow* window, int key, int scancode, int a
|
||||
ring_queue_pushback(&window_event_queue, &event);
|
||||
}
|
||||
|
||||
u8 init_window(const char* name, int width, int height)
|
||||
u8 init_window(const char* restrict name, const int width, const int height)
|
||||
{
|
||||
init_ring_queue(&window_event_queue, window_event_buffer, MAX_WINDOW_EVENTS_QUEUE_CAPACITY, sizeof(window_event));
|
||||
|
||||
@ -304,7 +313,7 @@ void poll_window_input()
|
||||
// Vulkan
|
||||
//
|
||||
|
||||
static VkDeviceSize aligned_size(VkDeviceSize old_size, VkDeviceSize alignment)
|
||||
static VkDeviceSize aligned_size(const VkDeviceSize old_size, const VkDeviceSize alignment)
|
||||
{
|
||||
return (old_size + alignment - 1) & ~(alignment - 1);
|
||||
}
|
||||
@ -541,7 +550,7 @@ static u8 init_surface()
|
||||
}
|
||||
|
||||
// return queue family indices of a physical device; prefer graphics and presentation in the same queue
|
||||
static queue_family_indices find_queue_families(VkPhysicalDevice physical_device)
|
||||
static queue_family_indices find_queue_families(const VkPhysicalDevice physical_device)
|
||||
{
|
||||
queue_family_indices indices = {.is_graphics = 0, .is_present = 0};
|
||||
u32 queue_family_count = 0;
|
||||
@ -596,7 +605,7 @@ static queue_family_indices find_queue_families(VkPhysicalDevice physical_device
|
||||
return indices;
|
||||
}
|
||||
|
||||
static u8 check_physical_device_extension_support(VkPhysicalDevice physical_device)
|
||||
static u8 check_physical_device_extension_support(const VkPhysicalDevice physical_device)
|
||||
{
|
||||
u32 available_extension_size = 0;
|
||||
if (vkEnumerateDeviceExtensionProperties(physical_device, NULL, &available_extension_size, NULL) != VK_SUCCESS)
|
||||
@ -633,7 +642,7 @@ static u8 check_physical_device_extension_support(VkPhysicalDevice physical_devi
|
||||
return 1;
|
||||
}
|
||||
|
||||
static u8 init_swap_chain_support_details(swap_chain_support_details* support_details, VkPhysicalDevice physical_device)
|
||||
static u8 init_swap_chain_support_details(swap_chain_support_details* restrict support_details, const VkPhysicalDevice physical_device)
|
||||
{
|
||||
support_details->formats = NULL;
|
||||
support_details->format_size = 0;
|
||||
@ -679,7 +688,7 @@ static u8 init_swap_chain_support_details(swap_chain_support_details* support_de
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void shutdown_swap_chain_support_details(swap_chain_support_details* support_details)
|
||||
static void shutdown_swap_chain_support_details(swap_chain_support_details* restrict support_details)
|
||||
{
|
||||
free(support_details->formats);
|
||||
free(support_details->present_modes);
|
||||
@ -807,9 +816,9 @@ static u8 init_logical_device()
|
||||
}
|
||||
|
||||
VkDeviceQueueCreateInfo queue_infos[2];
|
||||
f32 priority = 1.0f;
|
||||
const f32 priority = 1.0f;
|
||||
for (u32 i = 0; i < unique_queue_family_size; i++)
|
||||
queue_infos[i] =(VkDeviceQueueCreateInfo)
|
||||
queue_infos[i] = (VkDeviceQueueCreateInfo)
|
||||
{
|
||||
.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
|
||||
.queueFamilyIndex = unique_queue_families[i],
|
||||
@ -860,7 +869,7 @@ static u8 init_logical_device()
|
||||
return 1;
|
||||
}
|
||||
|
||||
static u8 allocate_command_buffers(VkCommandPool command_pool, VkCommandBuffer* command_buffers, u32 command_buffer_count)
|
||||
static u8 allocate_command_buffers(const VkCommandPool command_pool, VkCommandBuffer* restrict command_buffers, const u32 command_buffer_count)
|
||||
{
|
||||
const VkCommandBufferAllocateInfo alloc_info =
|
||||
{
|
||||
@ -880,7 +889,7 @@ static u8 allocate_command_buffers(VkCommandPool command_pool, VkCommandBuffer*
|
||||
return 1;
|
||||
}
|
||||
|
||||
static u8 begin_command_recording(VkCommandBuffer command_buffer, VkCommandBufferUsageFlags flags)
|
||||
static u8 begin_command_recording(const VkCommandBuffer command_buffer, const VkCommandBufferUsageFlags flags)
|
||||
{
|
||||
const VkCommandBufferBeginInfo info =
|
||||
{
|
||||
@ -897,7 +906,7 @@ static u8 begin_command_recording(VkCommandBuffer command_buffer, VkCommandBuffe
|
||||
return 1;
|
||||
}
|
||||
|
||||
static u8 end_command_recording(VkCommandBuffer command_buffer)
|
||||
static u8 end_command_recording(const VkCommandBuffer command_buffer)
|
||||
{
|
||||
if (vkEndCommandBuffer(command_buffer) != VK_SUCCESS)
|
||||
{
|
||||
@ -916,7 +925,7 @@ void shutdown_single_submit_context()
|
||||
|
||||
u8 init_single_submit_context()
|
||||
{
|
||||
queue_family_indices family_indices = find_queue_families(context.device.physical);
|
||||
const queue_family_indices family_indices = find_queue_families(context.device.physical);
|
||||
const VkCommandPoolCreateInfo create_info =
|
||||
{
|
||||
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
|
||||
@ -948,12 +957,12 @@ u8 init_single_submit_context()
|
||||
return 1;
|
||||
}
|
||||
|
||||
static u8 begin_single_submit_command_recording(VkCommandBuffer command_buffer)
|
||||
static u8 begin_single_submit_command_recording(const VkCommandBuffer command_buffer)
|
||||
{
|
||||
return begin_command_recording(command_buffer, VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT);
|
||||
}
|
||||
|
||||
static u8 end_single_submit_command_recording(VkCommandBuffer command_buffer)
|
||||
static u8 end_single_submit_command_recording(const VkCommandBuffer command_buffer)
|
||||
{
|
||||
end_command_recording(command_buffer);
|
||||
|
||||
@ -982,7 +991,7 @@ static u8 end_single_submit_command_recording(VkCommandBuffer command_buffer)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static u32 buffer_find_memory_type(VkPhysicalDevice physical_device, u32 type_filter, VkMemoryPropertyFlags properties)
|
||||
static u32 buffer_find_memory_type(const VkPhysicalDevice physical_device, const u32 type_filter, const VkMemoryPropertyFlags properties)
|
||||
{
|
||||
VkPhysicalDeviceMemoryProperties device_mem_properties;
|
||||
vkGetPhysicalDeviceMemoryProperties(physical_device, &device_mem_properties);
|
||||
@ -997,9 +1006,9 @@ static u32 buffer_find_memory_type(VkPhysicalDevice physical_device, u32 type_fi
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u8 init_vulkan_buffer(vulkan_buffer* buffer, VkDeviceSize byte_size, VkBufferUsageFlags usage, VkMemoryPropertyFlags properties)
|
||||
static u8 init_vulkan_buffer(vulkan_buffer* restrict buffer, const VkDeviceSize byte_size, const VkBufferUsageFlags usage, const VkMemoryPropertyFlags properties)
|
||||
{
|
||||
VkBufferCreateInfo buffer_info =
|
||||
const VkBufferCreateInfo buffer_info =
|
||||
{
|
||||
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
|
||||
.size = byte_size,
|
||||
@ -1018,7 +1027,7 @@ static u8 init_vulkan_buffer(vulkan_buffer* buffer, VkDeviceSize byte_size, VkBu
|
||||
|
||||
VkMemoryRequirements mem_reqs;
|
||||
vkGetBufferMemoryRequirements(context.device.logical, buffer->handle, &mem_reqs);
|
||||
VkMemoryAllocateInfo mem_alloc_info =
|
||||
const VkMemoryAllocateInfo mem_alloc_info =
|
||||
{
|
||||
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
|
||||
.allocationSize = mem_reqs.size,
|
||||
@ -1042,36 +1051,36 @@ static u8 init_vulkan_buffer(vulkan_buffer* buffer, VkDeviceSize byte_size, VkBu
|
||||
}
|
||||
|
||||
// staging buffer, CPU accessible memory
|
||||
static u8 init_vulkan_staging_buffer(vulkan_buffer* buffer, VkDeviceSize byte_size)
|
||||
static u8 init_vulkan_staging_buffer(vulkan_buffer* restrict buffer, const VkDeviceSize byte_size)
|
||||
{
|
||||
return init_vulkan_buffer(buffer, byte_size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
|
||||
}
|
||||
|
||||
// uniform buffer, CPU accessible memory
|
||||
static u8 init_vulkan_uniform_buffer(vulkan_buffer* buffer, VkDeviceSize byte_size)
|
||||
static u8 init_vulkan_uniform_buffer(vulkan_buffer* restrict buffer, const VkDeviceSize byte_size)
|
||||
{
|
||||
return init_vulkan_buffer(buffer, byte_size, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
|
||||
}
|
||||
|
||||
// vertex buffer, device memory
|
||||
static u8 init_vulkan_vertex_buffer(vulkan_buffer* buffer, VkDeviceSize byte_size)
|
||||
static u8 init_vulkan_vertex_buffer(vulkan_buffer* restrict buffer, const VkDeviceSize byte_size)
|
||||
{
|
||||
return init_vulkan_buffer(buffer, byte_size, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
|
||||
}
|
||||
|
||||
// index buffer, device memory
|
||||
static u8 init_vulkan_index_buffer(vulkan_buffer* buffer, VkDeviceSize byte_size)
|
||||
static u8 init_vulkan_index_buffer(vulkan_buffer* restrict buffer, const VkDeviceSize byte_size)
|
||||
{
|
||||
return init_vulkan_buffer(buffer, byte_size, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
|
||||
}
|
||||
|
||||
static void shutdown_vulkan_buffer(vulkan_buffer* buffer)
|
||||
static void shutdown_vulkan_buffer(const vulkan_buffer* restrict buffer)
|
||||
{
|
||||
vkFreeMemory(context.device.logical, buffer->memory, context.allocator);
|
||||
vkDestroyBuffer(context.device.logical, buffer->handle, context.allocator);
|
||||
}
|
||||
|
||||
static u8 map_vulkan_buffer(vulkan_buffer* buffer, VkDeviceSize offset, VkDeviceSize byte_size, VkMemoryMapFlags flags)
|
||||
static u8 map_vulkan_buffer(vulkan_buffer* restrict buffer, const VkDeviceSize offset, const VkDeviceSize byte_size, const VkMemoryMapFlags flags)
|
||||
{
|
||||
if (vkMapMemory(context.device.logical, buffer->memory, offset, byte_size, flags, &buffer->mapped) != VK_SUCCESS)
|
||||
{
|
||||
@ -1081,12 +1090,12 @@ static u8 map_vulkan_buffer(vulkan_buffer* buffer, VkDeviceSize offset, VkDevice
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void unmap_vulkan_buffer(vulkan_buffer* buffer)
|
||||
static void unmap_vulkan_buffer(const vulkan_buffer* restrict buffer)
|
||||
{
|
||||
vkUnmapMemory(context.device.logical, buffer->memory);
|
||||
}
|
||||
|
||||
static u8 load_vulkan_buffer(vulkan_buffer* buffer, VkDeviceSize offset, VkDeviceSize byte_size, VkMemoryMapFlags flags, const void* data)
|
||||
static u8 load_vulkan_buffer(vulkan_buffer* restrict buffer, const VkDeviceSize offset, const VkDeviceSize byte_size, const VkMemoryMapFlags flags, const void* restrict data)
|
||||
{
|
||||
if (!map_vulkan_buffer(buffer, offset, byte_size, flags))
|
||||
{
|
||||
@ -1099,14 +1108,14 @@ static u8 load_vulkan_buffer(vulkan_buffer* buffer, VkDeviceSize offset, VkDevic
|
||||
return 1;
|
||||
}
|
||||
|
||||
static u8 copy_vulkan_buffer(vulkan_buffer* dst, vulkan_buffer* src, VkDeviceSize dst_offset, VkDeviceSize src_offset, VkDeviceSize byte_size)
|
||||
static u8 copy_vulkan_buffer(const vulkan_buffer* restrict dst, const vulkan_buffer* restrict src, const VkDeviceSize dst_offset, const VkDeviceSize src_offset, const VkDeviceSize byte_size)
|
||||
{
|
||||
VkCommandBuffer command_buffer;
|
||||
if (!allocate_command_buffers(context.single_submit_context.command_pool, &command_buffer, 1)) return 0;
|
||||
|
||||
if (!begin_single_submit_command_recording(command_buffer)) return 0;
|
||||
|
||||
VkBufferCopy copy_region =
|
||||
const VkBufferCopy copy_region =
|
||||
{
|
||||
.dstOffset = dst_offset,
|
||||
.srcOffset = src_offset,
|
||||
@ -1120,7 +1129,7 @@ static u8 copy_vulkan_buffer(vulkan_buffer* dst, vulkan_buffer* src, VkDeviceSiz
|
||||
return 1;
|
||||
}
|
||||
|
||||
static u8 upload_vulkan_gpu_buffer(vulkan_buffer* buffer, const void* data, const VkDeviceSize byte_size)
|
||||
static u8 upload_vulkan_gpu_buffer(const vulkan_buffer* restrict buffer, const void* restrict data, const VkDeviceSize byte_size)
|
||||
{
|
||||
vulkan_buffer staging_buffer;
|
||||
if (!init_vulkan_staging_buffer(&staging_buffer, byte_size)) return 0;
|
||||
@ -1130,7 +1139,7 @@ static u8 upload_vulkan_gpu_buffer(vulkan_buffer* buffer, const void* data, cons
|
||||
return 1;
|
||||
}
|
||||
|
||||
static u8 find_vulkan_memory_type(u32* type_index, u32 type_filter, VkMemoryPropertyFlags properties)
|
||||
static u8 find_vulkan_memory_type(u32* restrict type_index, const u32 type_filter, const VkMemoryPropertyFlags properties)
|
||||
{
|
||||
VkPhysicalDeviceMemoryProperties physical_properties;
|
||||
vkGetPhysicalDeviceMemoryProperties(context.device.physical, &physical_properties);
|
||||
@ -1145,7 +1154,7 @@ static u8 find_vulkan_memory_type(u32* type_index, u32 type_filter, VkMemoryProp
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u8 init_image_view(VkImageView* view, VkImage image, VkImageViewType type, VkFormat format, VkImageAspectFlags flags, u32 layer_count)
|
||||
static u8 init_image_view(VkImageView* restrict view, const VkImage image, const VkImageViewType type, const VkFormat format, const VkImageAspectFlags flags, const u32 layer_count)
|
||||
{
|
||||
const VkImageSubresourceRange subresource_range =
|
||||
{
|
||||
@ -1175,14 +1184,14 @@ static u8 init_image_view(VkImageView* view, VkImage image, VkImageViewType type
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void shutdown_image_view(VkImageView view)
|
||||
static void shutdown_image_view(const VkImageView view)
|
||||
{
|
||||
vkDestroyImageView(context.device.logical, view, context.allocator);
|
||||
}
|
||||
|
||||
static u8 init_vulkan_image(vulkan_image* image, u32 width, u32 height, u32 layer_count, VkFormat format,
|
||||
VkImageTiling tiling, VkImageUsageFlags usage, VkMemoryPropertyFlags properties,
|
||||
VkImageViewType view_type, VkImageAspectFlags aspect_flags)
|
||||
static u8 init_vulkan_image(vulkan_image* restrict image, const u32 width, const u32 height, const u32 layer_count, const VkFormat format,
|
||||
const VkImageTiling tiling, const VkImageUsageFlags usage, const VkMemoryPropertyFlags properties,
|
||||
const VkImageViewType view_type, const VkImageAspectFlags aspect_flags)
|
||||
{
|
||||
image->width = width;
|
||||
image->height = height;
|
||||
@ -1224,7 +1233,7 @@ static u8 init_vulkan_image(vulkan_image* image, u32 width, u32 height, u32 laye
|
||||
trace_log(LOG_ERROR, "Failed to create Vulkan image; failed to find a suitable memory type.");
|
||||
return 0;
|
||||
}
|
||||
VkMemoryAllocateInfo mem_alloc_info =
|
||||
const VkMemoryAllocateInfo mem_alloc_info =
|
||||
{
|
||||
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
|
||||
.allocationSize = mem_reqs.size,
|
||||
@ -1246,14 +1255,14 @@ static u8 init_vulkan_image(vulkan_image* image, u32 width, u32 height, u32 laye
|
||||
return init_image_view(&image->view_handle, image->handle, view_type, format, aspect_flags, layer_count);
|
||||
}
|
||||
|
||||
static void shutdown_vulkan_image(vulkan_image* image)
|
||||
static void shutdown_vulkan_image(const vulkan_image* restrict image)
|
||||
{
|
||||
shutdown_image_view(image->view_handle);
|
||||
vkFreeMemory(context.device.logical, image->memory, context.allocator);
|
||||
vkDestroyImage(context.device.logical, image->handle, context.allocator);
|
||||
}
|
||||
|
||||
static u8 transition_image_layout(vulkan_image* image, VkImageLayout old_layout, VkImageLayout new_layout)
|
||||
static u8 transition_image_layout(const vulkan_image* restrict image, const VkImageLayout old_layout, const VkImageLayout new_layout)
|
||||
{
|
||||
VkCommandBuffer command_buffer;
|
||||
if (!allocate_command_buffers(context.single_submit_context.command_pool, &command_buffer, 1)) return 0;
|
||||
@ -1313,7 +1322,7 @@ static u8 transition_image_layout(vulkan_image* image, VkImageLayout old_layout,
|
||||
return 1;
|
||||
}
|
||||
|
||||
static u8 load_image_2D(vulkan_image* image, u32 offset_x, u32 offset_y, u32 size_x, u32 size_y, const void* pixels, u64 byte_size)
|
||||
static u8 load_image_2D(const vulkan_image* restrict image, const u32 offset_x, const u32 offset_y, const u32 size_x, const u32 size_y, const void* restrict pixels, const u64 byte_size)
|
||||
{
|
||||
vulkan_buffer staging_buffer;
|
||||
if (!init_vulkan_staging_buffer(&staging_buffer, byte_size)) return 0;
|
||||
@ -1355,7 +1364,7 @@ static u8 load_image_2D(vulkan_image* image, u32 offset_x, u32 offset_y, u32 siz
|
||||
return 1;
|
||||
}
|
||||
|
||||
static u8 load_image_2D_array(vulkan_image* image, u32 size_x, u32 size_y, const void* pixels, u64 byte_size)
|
||||
static u8 load_image_2D_array(const vulkan_image* restrict image, const u32 size_x, const u32 size_y, const void* restrict pixels, const u64 byte_size)
|
||||
{
|
||||
vulkan_buffer staging_buffer;
|
||||
if (!init_vulkan_staging_buffer(&staging_buffer, byte_size)) return 0;
|
||||
@ -1402,7 +1411,7 @@ static u8 load_image_2D_array(vulkan_image* image, u32 size_x, u32 size_y, const
|
||||
return 1;
|
||||
}
|
||||
|
||||
static VkSurfaceFormatKHR choose_surface_format(const VkSurfaceFormatKHR* available_formats, u32 available_format_size)
|
||||
static VkSurfaceFormatKHR choose_surface_format(const VkSurfaceFormatKHR* restrict available_formats, const u32 available_format_size)
|
||||
{
|
||||
for (u32 i = 0; i < available_format_size; i++)
|
||||
if (available_formats[i].format == VK_FORMAT_B8G8R8A8_SRGB && available_formats[i].colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR)
|
||||
@ -1411,7 +1420,7 @@ static VkSurfaceFormatKHR choose_surface_format(const VkSurfaceFormatKHR* availa
|
||||
return available_formats[0];
|
||||
}
|
||||
|
||||
static VkPresentModeKHR choose_present_mode(const VkPresentModeKHR* available_present_modes, u32 available_present_mode_size)
|
||||
static VkPresentModeKHR choose_present_mode(const VkPresentModeKHR* restrict available_present_modes, const u32 available_present_mode_size)
|
||||
{
|
||||
for (u32 i = 0; i < available_present_mode_size; i++)
|
||||
if (available_present_modes[i] == VK_PRESENT_MODE_MAILBOX_KHR)
|
||||
@ -1420,7 +1429,7 @@ static VkPresentModeKHR choose_present_mode(const VkPresentModeKHR* available_pr
|
||||
return VK_PRESENT_MODE_FIFO_KHR;
|
||||
}
|
||||
|
||||
static VkExtent2D choose_extent(const VkSurfaceCapabilitiesKHR* capabilities)
|
||||
static VkExtent2D choose_extent(const VkSurfaceCapabilitiesKHR* restrict capabilities)
|
||||
{
|
||||
if (capabilities->currentExtent.width != UINT32_MAX) return capabilities->currentExtent;
|
||||
|
||||
@ -1449,7 +1458,7 @@ static void shutdown_swap_chain()
|
||||
trace_log(LOG_DEBUG, "Vulkan swap chain successfully destroyed.");
|
||||
}
|
||||
|
||||
static u8 init_swap_chain(u8 do_signal_shutdown)
|
||||
static u8 init_swap_chain(const u8 do_signal_shutdown)
|
||||
{
|
||||
swap_chain_support_details support_details;
|
||||
init_swap_chain_support_details(&support_details, context.device.physical);
|
||||
@ -1563,9 +1572,9 @@ void shutdown_depth()
|
||||
trace_log(LOG_DEBUG, "Depth image successfully destroyed.");
|
||||
}
|
||||
|
||||
static u8 init_depth(u8 do_signal_shutdown)
|
||||
static u8 init_depth(const u8 do_signal_shutdown)
|
||||
{
|
||||
VkFormat format = find_depth_format();
|
||||
const VkFormat format = find_depth_format();
|
||||
if (format == VK_FORMAT_UNDEFINED)
|
||||
{
|
||||
trace_log(LOG_ERROR, "Failed to create depth image; failed to find a supported format.");
|
||||
@ -1667,7 +1676,7 @@ static u8 init_render_pass()
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void begin_render_pass(VkCommandBuffer command_buffer, u32 image_index)
|
||||
static void begin_render_pass(const VkCommandBuffer command_buffer, const u32 image_index)
|
||||
{
|
||||
const VkRect2D render_area =
|
||||
{
|
||||
@ -1692,7 +1701,7 @@ static void begin_render_pass(VkCommandBuffer command_buffer, u32 image_index)
|
||||
vkCmdBeginRenderPass(command_buffer, &info, VK_SUBPASS_CONTENTS_INLINE);
|
||||
}
|
||||
|
||||
static void end_render_pass(VkCommandBuffer command_buffer)
|
||||
static void end_render_pass(const VkCommandBuffer command_buffer)
|
||||
{
|
||||
vkCmdEndRenderPass(command_buffer);
|
||||
}
|
||||
@ -1705,7 +1714,7 @@ static void shutdown_swap_chain_framebuffers()
|
||||
trace_log(LOG_DEBUG, "Swap chain framebuffers successfully destroyed.");
|
||||
}
|
||||
|
||||
static u8 init_swap_chain_framebuffers(u8 do_signal_shutdown)
|
||||
static u8 init_swap_chain_framebuffers(const u8 do_signal_shutdown)
|
||||
{
|
||||
context.swap_chain_framebuffers = malloc(context.swap_chain.image_count * sizeof(VkFramebuffer));
|
||||
for (u32 i = 0; i < context.swap_chain.image_count; i++)
|
||||
@ -1743,7 +1752,7 @@ static void shutdown_command_pool()
|
||||
|
||||
static u8 init_command_pool()
|
||||
{
|
||||
queue_family_indices family_indices = find_queue_families(context.device.physical);
|
||||
const queue_family_indices family_indices = find_queue_families(context.device.physical);
|
||||
const VkCommandPoolCreateInfo create_info =
|
||||
{
|
||||
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
|
||||
@ -1912,11 +1921,11 @@ static u8 init_camera()
|
||||
};
|
||||
const VkWriteDescriptorSet write =
|
||||
{
|
||||
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
|
||||
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
|
||||
.dstSet = context.camera_descriptor_sets[i],
|
||||
.dstBinding = 0,
|
||||
.dstArrayElement = 0,
|
||||
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
|
||||
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
|
||||
.descriptorCount = 1,
|
||||
.pBufferInfo = &buffer_info,
|
||||
.pImageInfo = NULL,
|
||||
@ -1933,25 +1942,25 @@ static u8 init_camera()
|
||||
return 1;
|
||||
}
|
||||
|
||||
void set_camera_view(const f32* view)
|
||||
void set_camera_view(const f32* restrict view)
|
||||
{
|
||||
memcpy(context.camera.view, view, sizeof(f32) * 16);
|
||||
context.update_camera_uniform_counter= MAX_FRAMES_IN_FLIGHT;
|
||||
}
|
||||
|
||||
void set_camera_perspective_projection(const f32* perspective)
|
||||
void set_camera_perspective_projection(const f32* restrict perspective)
|
||||
{
|
||||
memcpy(context.camera.perspective, perspective, sizeof(f32) * 16);
|
||||
context.update_camera_uniform_counter = MAX_FRAMES_IN_FLIGHT;
|
||||
}
|
||||
|
||||
void set_camera_orthographic_projection(const f32* orthographic)
|
||||
void set_camera_orthographic_projection(const f32* restrict orthographic)
|
||||
{
|
||||
memcpy(context.camera.orthographic, orthographic, sizeof(f32) * 16);
|
||||
context.update_camera_uniform_counter = MAX_FRAMES_IN_FLIGHT;
|
||||
}
|
||||
|
||||
static u8 init_texture(texture* t, u32 width, u32 height, VkImageViewType view_type, VkFormat format, u32 layer_count)
|
||||
static u8 init_texture(texture* restrict t, const u32 width, const u32 height, const VkImageViewType view_type, const VkFormat format, const u32 layer_count)
|
||||
{
|
||||
if (!init_vulkan_image(&t->image, width, height, layer_count, format, VK_IMAGE_TILING_OPTIMAL,
|
||||
VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,
|
||||
@ -2011,7 +2020,7 @@ static u8 init_texture(texture* t, u32 width, u32 height, VkImageViewType view_t
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void shutdown_texture(texture* t)
|
||||
static void shutdown_texture(const texture* restrict t)
|
||||
{
|
||||
vkFreeDescriptorSets(context.device.logical, context.texture_descriptor_pool, MAX_FRAMES_IN_FLIGHT, t->descriptor_sets);
|
||||
shutdown_vulkan_image(&t->image);
|
||||
@ -2041,7 +2050,7 @@ static u8 init_textures()
|
||||
VkPhysicalDeviceProperties properties;
|
||||
vkGetPhysicalDeviceProperties(context.device.physical, &properties);
|
||||
|
||||
VkSamplerCreateInfo sampler_info =
|
||||
const VkSamplerCreateInfo sampler_info =
|
||||
{
|
||||
.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
|
||||
.magFilter = VK_FILTER_LINEAR,
|
||||
@ -2118,7 +2127,7 @@ static u8 init_textures()
|
||||
return 1;
|
||||
}
|
||||
|
||||
u8 insert_image_2d_texture_from_file(const char* name)
|
||||
u8 insert_image_2d_texture_from_file(const char* restrict name)
|
||||
{
|
||||
char path[512];
|
||||
snprintf(path, 512, "resources/textures/%s.png", name);
|
||||
@ -2142,7 +2151,7 @@ u8 insert_image_2d_texture_from_file(const char* name)
|
||||
return (hash_table_insert(&context.textures, name, &t) != NULL);
|
||||
}
|
||||
|
||||
u8 remove_texture(const char* name)
|
||||
u8 remove_texture(const char* restrict name)
|
||||
{
|
||||
texture t;
|
||||
void* ptexture = &t;
|
||||
@ -2168,7 +2177,7 @@ static VkDeviceSize get_vertex_input_byte_size(const vertex_input_type t, const
|
||||
}
|
||||
}
|
||||
|
||||
static u8 init_mesh(mesh* msh, const mesh_geometry* geometry, vertex_input_type geometry_type, u64 max_instances, vertex_input_type instance_type)
|
||||
static u8 init_mesh(mesh* restrict msh, const mesh_geometry* restrict geometry, const vertex_input_type geometry_type, const u64 max_instances, const vertex_input_type instance_type)
|
||||
{
|
||||
if (!geometry) return 0;
|
||||
|
||||
@ -2183,22 +2192,22 @@ static u8 init_mesh(mesh* msh, const mesh_geometry* geometry, vertex_input_type
|
||||
|
||||
if (!max_instances) return 0;
|
||||
|
||||
const VkDeviceSize byte_size = get_vertex_input_byte_size(instance_type, max_instances);
|
||||
if (!init_vulkan_vertex_buffer(&msh->instance_buffer, byte_size)) return 0;
|
||||
const VkDeviceSize instance_byte_size = get_vertex_input_byte_size(instance_type, max_instances);
|
||||
if (!init_vulkan_vertex_buffer(&msh->instance_buffer, instance_byte_size)) return 0;
|
||||
msh->max_instances = max_instances;
|
||||
msh->instance_count = 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void shutdown_mesh(mesh* msh)
|
||||
static void shutdown_mesh(const mesh* restrict msh)
|
||||
{
|
||||
shutdown_vulkan_buffer(&msh->index_buffer);
|
||||
shutdown_vulkan_buffer(&msh->instance_buffer);
|
||||
shutdown_vulkan_buffer(&msh->vertex_buffer);
|
||||
}
|
||||
|
||||
static u8 init_shader_module(VkShaderModule* module, const char* path)
|
||||
static u8 init_shader_module(VkShaderModule* restrict module, const char* restrict path)
|
||||
{
|
||||
*module = VK_NULL_HANDLE;
|
||||
|
||||
@ -2209,7 +2218,7 @@ static u8 init_shader_module(VkShaderModule* module, const char* path)
|
||||
return 0;
|
||||
}
|
||||
fseek(file, 0L, SEEK_END);
|
||||
long int size = ftell(file);
|
||||
const long int size = ftell(file);
|
||||
rewind(file);
|
||||
char* byte_code = (char*)malloc(size + 1);
|
||||
fread(byte_code, size, 1, file);
|
||||
@ -2233,12 +2242,12 @@ static u8 init_shader_module(VkShaderModule* module, const char* path)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void shutdown_shader_module(VkShaderModule module)
|
||||
static void shutdown_shader_module(const VkShaderModule module)
|
||||
{
|
||||
vkDestroyShaderModule(context.device.logical, module, context.allocator);
|
||||
}
|
||||
|
||||
static u8 init_mesh_pipeline(mesh_pipeline* pipeline, const mesh_pipeline_template* template)
|
||||
static u8 init_mesh_pipeline(mesh_pipeline* restrict pipeline, const mesh_pipeline_template* restrict template)
|
||||
{
|
||||
// device features
|
||||
VkPhysicalDeviceFeatures device_features;
|
||||
@ -2410,7 +2419,7 @@ static u8 init_mesh_pipeline(mesh_pipeline* pipeline, const mesh_pipeline_templa
|
||||
};
|
||||
|
||||
// depth and stencil; reverse z convention means higher depth brings you closer
|
||||
VkPipelineDepthStencilStateCreateInfo depth_stencil =
|
||||
const VkPipelineDepthStencilStateCreateInfo depth_stencil =
|
||||
{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
|
||||
.depthTestEnable = VK_TRUE,
|
||||
@ -2535,7 +2544,7 @@ static u8 init_mesh_pipeline(mesh_pipeline* pipeline, const mesh_pipeline_templa
|
||||
destroy_darray(stage_infos);
|
||||
for (u8 i = 0; i < 2; i++) shutdown_shader_module(modules[i]);
|
||||
|
||||
// meshes
|
||||
// prep for meshes
|
||||
pipeline->geometry_type = template->geometry_type;
|
||||
pipeline->instance_type = template->instance_type;
|
||||
init_hash_table(&pipeline->meshes, 8, sizeof(mesh), basic_hash);
|
||||
@ -2547,7 +2556,7 @@ static u8 init_mesh_pipeline(mesh_pipeline* pipeline, const mesh_pipeline_templa
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void shutdown_mesh_pipeline(mesh_pipeline* pipeline)
|
||||
static void shutdown_mesh_pipeline(mesh_pipeline* restrict pipeline)
|
||||
{
|
||||
// clean up meshes
|
||||
for (u32 i = 0; i < pipeline->meshes.slot_count; i++)
|
||||
@ -2563,11 +2572,11 @@ static void shutdown_mesh_pipeline(mesh_pipeline* pipeline)
|
||||
vkDestroyPipelineLayout(context.device.logical, pipeline->layout, context.allocator);
|
||||
}
|
||||
|
||||
u8 draw_mesh_pipeline(const char* name)
|
||||
u8 draw_mesh_pipeline(const char* restrict name)
|
||||
{
|
||||
// bind pipeline
|
||||
VkCommandBuffer command_buffer = context.frames[context.current_frame].command_buffer;
|
||||
mesh_pipeline* pipeline = hash_table_lookup(&context.mesh_pipelines, name);
|
||||
const VkCommandBuffer command_buffer = context.frames[context.current_frame].command_buffer;
|
||||
const mesh_pipeline* pipeline = hash_table_lookup(&context.mesh_pipelines, name);
|
||||
if (!pipeline)
|
||||
{
|
||||
trace_log(LOG_ERROR, "Failed to draw mesh pipeline '%s'; pipeline lookup failed." , name);
|
||||
@ -2581,7 +2590,7 @@ u8 draw_mesh_pipeline(const char* name)
|
||||
// bind texture descriptor set (set 1)
|
||||
if (pipeline->texture_name[0] != '\0')
|
||||
{
|
||||
texture* t = hash_table_lookup(&context.textures, pipeline->texture_name);
|
||||
const texture* t = hash_table_lookup(&context.textures, pipeline->texture_name);
|
||||
if (!t)
|
||||
{
|
||||
trace_log(LOG_ERROR, "Failed to draw mesh pipeline '%s'; texture '%s' lookup failed.", name, pipeline->texture_name);
|
||||
@ -2593,7 +2602,7 @@ u8 draw_mesh_pipeline(const char* name)
|
||||
for (u32 i = 0; i < pipeline->meshes.slot_count; i++)
|
||||
for (hash_table_entry* entry = pipeline->meshes.entries[i]; entry != NULL; entry = entry->next)
|
||||
{
|
||||
mesh* msh = entry->data;
|
||||
const mesh* msh = entry->data;
|
||||
|
||||
// bind vertex, instance, and index buffers
|
||||
const VkDeviceSize offsets[] = { 0 };
|
||||
@ -2610,9 +2619,9 @@ u8 draw_mesh_pipeline(const char* name)
|
||||
return 1;
|
||||
}
|
||||
|
||||
u8 insert_mesh_pipeline(const char* name, vertex_input_type geometry_type, vertex_input_type instance_type, const char* texture_name)
|
||||
u8 insert_mesh_pipeline(const char* restrict name, const vertex_input_type geometry_type, const vertex_input_type instance_type, const char* restrict texture_name)
|
||||
{
|
||||
mesh_pipeline_template template =
|
||||
const mesh_pipeline_template template =
|
||||
{
|
||||
.name = name,
|
||||
.geometry_type = geometry_type,
|
||||
@ -2630,7 +2639,7 @@ u8 insert_mesh_pipeline(const char* name, vertex_input_type geometry_type, verte
|
||||
return 1;
|
||||
}
|
||||
|
||||
u8 remove_mesh_pipeline(const char* name)
|
||||
u8 remove_mesh_pipeline(const char* restrict name)
|
||||
{
|
||||
mesh_pipeline* pipeline = hash_table_lookup(&context.mesh_pipelines, name);
|
||||
if (!pipeline)
|
||||
@ -2643,7 +2652,7 @@ u8 remove_mesh_pipeline(const char* name)
|
||||
return hash_table_remove(&context.mesh_pipelines, name, NULL);
|
||||
}
|
||||
|
||||
u8 change_mesh_pipeline_mode(const char* name, mesh_pipeline_mode mode)
|
||||
u8 change_mesh_pipeline_mode(const char* restrict name, const mesh_pipeline_mode mode)
|
||||
{
|
||||
mesh_pipeline* pipeline = hash_table_lookup(&context.mesh_pipelines, name);
|
||||
if (!pipeline)
|
||||
@ -2672,7 +2681,7 @@ u8 change_mesh_pipeline_mode(const char* name, mesh_pipeline_mode mode)
|
||||
}
|
||||
}
|
||||
|
||||
u8 insert_mesh(const char* pipeline_name, const char* mesh_name, const mesh_geometry* geometry, const u64 max_instances)
|
||||
u8 insert_mesh(const char* restrict pipeline_name, const char* restrict mesh_name, const mesh_geometry* restrict geometry, const u64 max_instances)
|
||||
{
|
||||
mesh_pipeline* pipeline = hash_table_lookup(&context.mesh_pipelines, pipeline_name);
|
||||
if (!pipeline)
|
||||
@ -2692,7 +2701,7 @@ u8 insert_mesh(const char* pipeline_name, const char* mesh_name, const mesh_geom
|
||||
return 1;
|
||||
}
|
||||
|
||||
u8 remove_mesh(const char* pipeline_name, const char* mesh_name)
|
||||
u8 remove_mesh(const char* restrict pipeline_name, const char* restrict mesh_name)
|
||||
{
|
||||
mesh_pipeline* pipeline = hash_table_lookup(&context.mesh_pipelines, pipeline_name);
|
||||
if (!pipeline)
|
||||
@ -2712,7 +2721,7 @@ u8 remove_mesh(const char* pipeline_name, const char* mesh_name)
|
||||
return hash_table_remove(&pipeline->meshes, mesh_name, NULL);
|
||||
}
|
||||
|
||||
u8 load_mesh_instance_data(const char* pipeline_name, const char* mesh_name, const mesh_instance_data* instance_data)
|
||||
u8 load_mesh_instance_data(const char* restrict pipeline_name, const char* restrict mesh_name, const mesh_instance_data* restrict instance_data)
|
||||
{
|
||||
mesh_pipeline* pipeline = hash_table_lookup(&context.mesh_pipelines, pipeline_name);
|
||||
if (!pipeline)
|
||||
@ -2734,8 +2743,8 @@ u8 load_mesh_instance_data(const char* pipeline_name, const char* mesh_name, con
|
||||
return 0;
|
||||
}
|
||||
|
||||
const VkDeviceSize byte_size = get_vertex_input_byte_size(pipeline->instance_type, instance_data->instance_count);
|
||||
if (!upload_vulkan_gpu_buffer(&msh->instance_buffer, instance_data->instances, byte_size)) return 0;
|
||||
const VkDeviceSize instance_byte_size = get_vertex_input_byte_size(pipeline->instance_type, instance_data->instance_count);
|
||||
if (!upload_vulkan_gpu_buffer(&msh->instance_buffer, instance_data->instances, instance_byte_size)) return 0;
|
||||
msh->instance_count = instance_data->instance_count;
|
||||
|
||||
return 1;
|
||||
@ -2819,7 +2828,7 @@ static u8 recreate_swap_chain()
|
||||
|
||||
u8 begin_render_frame()
|
||||
{
|
||||
vulkan_frame_data* frame = context.frames + context.current_frame;
|
||||
const vulkan_frame_data* frame = context.frames + context.current_frame;
|
||||
|
||||
vkWaitForFences(context.device.logical, 1, &frame->in_flight_fence, VK_TRUE, UINT64_MAX);
|
||||
|
||||
@ -2844,11 +2853,11 @@ u8 begin_render_frame()
|
||||
|
||||
vkResetFences(context.device.logical, 1, &frame->in_flight_fence);
|
||||
|
||||
VkCommandBuffer command_buffer = frame->command_buffer;
|
||||
const VkCommandBuffer command_buffer = frame->command_buffer;
|
||||
vkResetCommandBuffer(command_buffer, 0);
|
||||
begin_command_recording(command_buffer, 0);
|
||||
|
||||
VkViewport viewport =
|
||||
const VkViewport viewport =
|
||||
{
|
||||
.x = 0.0f,
|
||||
.y = 0.0f,
|
||||
@ -2859,7 +2868,7 @@ u8 begin_render_frame()
|
||||
};
|
||||
vkCmdSetViewport(command_buffer, 0, 1, &viewport);
|
||||
|
||||
VkRect2D scissor =
|
||||
const VkRect2D scissor =
|
||||
{
|
||||
.offset = {0, 0},
|
||||
.extent = context.swap_chain.extent
|
||||
@ -2873,8 +2882,8 @@ u8 begin_render_frame()
|
||||
|
||||
u8 end_render_frame()
|
||||
{
|
||||
vulkan_frame_data* frame = context.frames + context.current_frame;
|
||||
VkCommandBuffer command_buffer = frame->command_buffer;
|
||||
const vulkan_frame_data* frame = context.frames + context.current_frame;
|
||||
const VkCommandBuffer command_buffer = frame->command_buffer;
|
||||
end_render_pass(command_buffer);
|
||||
end_command_recording(command_buffer);
|
||||
|
||||
|
@ -116,26 +116,26 @@ typedef enum
|
||||
//
|
||||
|
||||
u8 can_pop_window_event();
|
||||
void pop_window_event(window_event* event);
|
||||
u8 init_window(const char* name, int width, int height);
|
||||
void pop_window_event(window_event* restrict event);
|
||||
u8 init_window(const char* restrict name, const int width, const int height);
|
||||
void shutdown_window();
|
||||
u8 is_window_open();
|
||||
void poll_window_input();
|
||||
|
||||
void set_camera_view(const f32* view);
|
||||
void set_camera_perspective_projection(const f32* perspective);
|
||||
void set_camera_orthographic_projection(const f32* orthographic);
|
||||
void set_camera_view(const f32* restrict view);
|
||||
void set_camera_perspective_projection(const f32* restrict perspective);
|
||||
void set_camera_orthographic_projection(const f32* restrict orthographic);
|
||||
|
||||
u8 insert_image_2d_texture_from_file(const char* name);
|
||||
u8 remove_texture(const char* name);
|
||||
u8 insert_image_2d_texture_from_file(const char* restrict name);
|
||||
u8 remove_texture(const char* restrict name);
|
||||
|
||||
u8 draw_mesh_pipeline(const char* name);
|
||||
u8 insert_mesh_pipeline(const char* name, vertex_input_type geometry_type, vertex_input_type instance_type, const char* texture_name);
|
||||
u8 remove_mesh_pipeline(const char* name);
|
||||
u8 change_mesh_pipeline_mode(const char* name, mesh_pipeline_mode mode);
|
||||
u8 insert_mesh(const char* pipeline_name, const char* mesh_name, const mesh_geometry* geometry, const u64 max_instances);
|
||||
u8 remove_mesh(const char* pipeline_name, const char* mesh_name);
|
||||
u8 load_mesh_instance_data(const char* pipeline_name, const char* mesh_name, const mesh_instance_data* instance_data);
|
||||
u8 draw_mesh_pipeline(const char* restrict name);
|
||||
u8 insert_mesh_pipeline(const char* restrict name, const vertex_input_type geometry_type, const vertex_input_type instance_type, const char* restrict texture_name);
|
||||
u8 remove_mesh_pipeline(const char* restrict name);
|
||||
u8 change_mesh_pipeline_mode(const char* restrict name, const mesh_pipeline_mode mode);
|
||||
u8 insert_mesh(const char* restrict pipeline_name, const char* restrict mesh_name, const mesh_geometry* restrict geometry, const u64 max_instances);
|
||||
u8 remove_mesh(const char* restrict pipeline_name, const char* restrict mesh_name);
|
||||
u8 load_mesh_instance_data(const char* restrict pipeline_name, const char* restrict mesh_name, const mesh_instance_data* restrict instance_data);
|
||||
|
||||
u8 init_vulkan();
|
||||
void shutdown_vulkan();
|
||||
|
Loading…
Reference in New Issue
Block a user