From 16957726f71b9b8124b7e5f24c9ac145aaf60fc3 Mon Sep 17 00:00:00 2001 From: Andre Weissflog Date: Sun, 7 Feb 2021 18:36:39 +0100 Subject: [PATCH 1/8] update changelog and readme --- CHANGELOG.md | 2 ++ README.md | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index da531452b..53c70d6d5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ > NOTE: this list will usually only be updated with changes that affect the public APIs +- **07-Feb-2021**: A PSA about upcoming breaking changes in (mainly) sokol_gfx.h: https://floooh.github.io/2021/02/07/sokol-api-overhaul.html + - **20-Dec-2020**: A couple of minor breaking changes in the sokol_gfx.h and sokol_app.h APIs as preparation for the upcoming automatic language binding generation: diff --git a/README.md b/README.md index b8f7b12e5..28bc2f2fa 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ Simple [STB-style](https://github.com/nothings/stb/blob/master/docs/stb_howto.txt) cross-platform libraries for C and C++, written in C. -[**See what's new**](https://github.com/floooh/sokol/blob/master/CHANGELOG.md) (**20-Dec-2020**: minor but breaking API changes in sokol_gfx.h/sokol_app.h) +[**See what's new**](https://github.com/floooh/sokol/blob/master/CHANGELOG.md) (**07-Feb-2021**: new blog post about upcoming breaking API changes in sokol_gfx.h) ## Examples and Related Projects From 78061e2a9a5286b9fab19fd461dfd22c1f3c803c Mon Sep 17 00:00:00 2001 From: spaceface Date: Mon, 8 Feb 2021 19:46:57 +0100 Subject: [PATCH 2/8] Fix #pragma compilation with tcc on windows tcc does not like the `.lib` suffix inside `#pragma comment (lib, "foo.lib")` directives - it fails with `tcc: error: library 'foo.lib' not found` errors. Removing the extension fixes tcc, and all major compilers (msvc/clang/gcc/mingw) compile fine without it, so this change should simply allow the headers to be compiled with a different compiler. We've needed to, and have made this change over at vlang/v several times already, ever since we supported tcc as a C compiler on windows, since tcc, but it keeps getting reverted every time we update the headers :) --- sokol_app.h | 18 +++++++++--------- sokol_audio.h | 8 ++++---- sokol_gfx.h | 12 ++++++------ 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/sokol_app.h b/sokol_app.h index 1f2b3a8ec..893049a09 100644 --- a/sokol_app.h +++ b/sokol_app.h @@ -1455,16 +1455,16 @@ inline void sapp_run(const sapp_desc& desc) { return sapp_run(&desc); } #pragma comment (linker, "/subsystem:windows") #endif - #pragma comment (lib, "kernel32.lib") - #pragma comment (lib, "user32.lib") - #pragma comment (lib, "shell32.lib") /* CommandLineToArgvW, DragQueryFileW, DragFinished */ + #pragma comment (lib, "kernel32") + #pragma comment (lib, "user32") + #pragma comment (lib, "shell32") /* CommandLineToArgvW, DragQueryFileW, DragFinished */ #if defined(SOKOL_D3D11) - #pragma comment (lib, "dxgi.lib") - #pragma comment (lib, "d3d11.lib") - #pragma comment (lib, "dxguid.lib") + #pragma comment (lib, "dxgi") + #pragma comment (lib, "d3d11") + #pragma comment (lib, "dxguid") #endif #if defined(SOKOL_GLCORE33) - #pragma comment (lib, "gdi32.lib") + #pragma comment (lib, "gdi32") #endif #if defined(SOKOL_D3D11) @@ -1508,8 +1508,8 @@ inline void sapp_run(const sapp_desc& desc) { return sapp_run(&desc); } #include #include - #pragma comment (lib, "WindowsApp.lib") - #pragma comment (lib, "dxguid.lib") + #pragma comment (lib, "WindowsApp") + #pragma comment (lib, "dxguid") #elif defined(_SAPP_ANDROID) #include #include diff --git a/sokol_audio.h b/sokol_audio.h index 8a6f8db74..2e02ae8ac 100644 --- a/sokol_audio.h +++ b/sokol_audio.h @@ -495,12 +495,12 @@ inline void saudio_setup(const saudio_desc& desc) { return saudio_setup(&desc); #include #if (defined(WINAPI_FAMILY_PARTITION) && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)) #define SOKOL_WIN32_NO_MMDEVICE - #pragma comment (lib, "WindowsApp.lib") + #pragma comment (lib, "WindowsApp") #else - #pragma comment (lib, "kernel32.lib") - #pragma comment (lib, "ole32.lib") + #pragma comment (lib, "kernel32") + #pragma comment (lib, "ole32") #if defined(SOKOL_WIN32_NO_MMDEVICE) - #pragma comment (lib, "mmdevapi.lib") + #pragma comment (lib, "mmdevapi") #endif #endif #endif diff --git a/sokol_gfx.h b/sokol_gfx.h index 63f2e0af4..139c3aebd 100644 --- a/sokol_gfx.h +++ b/sokol_gfx.h @@ -2565,13 +2565,13 @@ inline void sg_init_pass(sg_pass pass_id, const sg_pass_desc& desc) { return sg_ #include #ifdef _MSC_VER #if (defined(WINAPI_FAMILY_PARTITION) && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)) - #pragma comment (lib, "WindowsApp.lib") + #pragma comment (lib, "WindowsApp") #else - #pragma comment (lib, "kernel32.lib") - #pragma comment (lib, "user32.lib") - #pragma comment (lib, "dxgi.lib") - #pragma comment (lib, "d3d11.lib") - #pragma comment (lib, "dxguid.lib") + #pragma comment (lib, "kernel32") + #pragma comment (lib, "user32") + #pragma comment (lib, "dxgi") + #pragma comment (lib, "d3d11") + #pragma comment (lib, "dxguid") #endif #endif #elif defined(SOKOL_METAL) From d2826d75c0931bc48a4b474f6db1ae301552a423 Mon Sep 17 00:00:00 2001 From: Andre Weissflog Date: Wed, 10 Feb 2021 18:36:43 +0100 Subject: [PATCH 3/8] Language-binding-friendly API changes. (#458) See: https://floooh.github.io/2021/02/07/sokol-api-overhaul.html --- README.md | 25 +- bindgen/gen_all.py | 22 +- bindgen/gen_ir.py | 34 +- bindgen/gen_zig.py | 292 ++++-- sokol_app.h | 153 +-- sokol_args.h | 4 +- sokol_audio.h | 120 +-- sokol_fetch.h | 8 +- sokol_gfx.h | 2239 +++++++++++++++++++++------------------- sokol_time.h | 4 +- util/sokol_debugtext.h | 129 ++- util/sokol_fontstash.h | 38 +- util/sokol_gfx_imgui.h | 261 ++--- util/sokol_gl.h | 82 +- util/sokol_imgui.h | 79 +- util/sokol_memtrack.h | 2 +- util/sokol_shape.h | 82 +- 17 files changed, 1976 insertions(+), 1598 deletions(-) diff --git a/README.md b/README.md index 28bc2f2fa..f2340e98d 100644 --- a/README.md +++ b/README.md @@ -86,7 +86,7 @@ int main() { glfwInit(); glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3); glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3); - glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); + glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE); glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); GLFWwindow* w = glfwCreateWindow(640, 480, "Sokol Triangle GLFW", 0, 0); glfwMakeContextCurrent(w); @@ -104,8 +104,7 @@ int main() { -0.5f, -0.5f, 0.5f, 0.0f, 0.0f, 1.0f, 1.0f }; sg_buffer vbuf = sg_make_buffer(&(sg_buffer_desc){ - .size = sizeof(vertices), - .content = vertices, + .data = SG_RANGE(vertices) }); /* a shader */ @@ -140,7 +139,7 @@ int main() { }); /* resource bindings */ - sg_bindings binds = { + sg_bindings bind = { .vertex_buffers[0] = vbuf }; @@ -153,7 +152,7 @@ int main() { glfwGetFramebufferSize(w, &cur_width, &cur_height); sg_begin_default_pass(&pass_action, cur_width, cur_height); sg_apply_pipeline(pip); - sg_apply_bindings(&binds); + sg_apply_bindings(&bind); sg_draw(0, 3, 1); sg_end_pass(); sg_commit(); @@ -183,9 +182,9 @@ A simple clear-loop sample using sokol_app.h and sokol_gfx.h (does not include separate sokol.c/.m implementation file which is necessary to split the Objective-C code from the C code of the sample): -```cpp -#include "sokol_app.h" +```c #include "sokol_gfx.h" +#include "sokol_app.h" #include "sokol_glue.h" sg_pass_action pass_action; @@ -195,13 +194,13 @@ void init(void) { .context = sapp_sgcontext() }); pass_action = (sg_pass_action) { - .colors[0] = { .action=SG_ACTION_CLEAR, .val={1.0f, 0.0f, 0.0f, 1.0f} } + .colors[0] = { .action=SG_ACTION_CLEAR, .value={1.0f, 0.0f, 0.0f, 1.0f} } }; } void frame(void) { - float g = pass_action.colors[0].val[1] + 0.01f; - pass_action.colors[0].val[1] = (g > 1.0f) ? 0.0f : g; + float g = pass_action.colors[0].value.g + 0.01f; + pass_action.colors[0].value.g = (g > 1.0f) ? 0.0f : g; sg_begin_default_pass(&pass_action, sapp_width(), sapp_height()); sg_end_pass(); sg_commit(); @@ -218,7 +217,7 @@ sapp_desc sokol_main(int argc, char* argv[]) { .cleanup_cb = cleanup, .width = 400, .height = 300, - .window_title = "Clear (sokol app)", + .window_title = "Clear Sample", }; } ``` @@ -240,7 +239,7 @@ A minimal audio-streaming API: A simple mono square-wave generator using the callback model: -```cpp +```c // the sample callback, running in audio thread static void stream_cb(float* buffer, int num_frames, int num_channels) { assert(1 == num_channels); @@ -266,7 +265,7 @@ int main() { The same code using the push-model -```cpp +```c #define BUF_SIZE (32) int main() { // init sokol-audio with default params, no callback diff --git a/bindgen/gen_all.py b/bindgen/gen_all.py index 0cbaa5caa..11b6240e8 100644 --- a/bindgen/gen_all.py +++ b/bindgen/gen_all.py @@ -1,19 +1,19 @@ -import gen_ir, gen_zig +import os, gen_zig tasks = [ - [ '../sokol_gfx.h', 'sg_', 'gfx' ], - [ '../sokol_app.h', 'sapp_', 'app' ], - [ '../sokol_time.h', 'stm_', 'time' ], - [ '../sokol_audio.h', 'saudio_', 'audio' ] + [ '../sokol_gfx.h', 'sg_', [] ], + [ '../sokol_app.h', 'sapp_', [] ], + [ '../sokol_time.h', 'stm_', [] ], + [ '../sokol_audio.h', 'saudio_', [] ], + [ '../util/sokol_gl.h', 'sgl_', ['sg_'] ], + [ '../util/sokol_debugtext.h', 'sdtx_', ['sg_'] ], + [ '../util/sokol_shape.h', 'sshape_', ['sg_'] ], ] # Zig -print('> generating Zig bindings...') gen_zig.prepare() for task in tasks: c_header_path = task[0] - c_prefix = task[1] - module_name = task[2] - print(f' {c_header_path} => {module_name}.zig') - ir = gen_ir.gen(c_header_path, module_name, c_prefix, ["-DSOKOL_ZIG_BINDINGS"]) - gen_zig.gen(c_header_path, ir) + main_prefix = task[1] + dep_prefixes = task[2] + gen_zig.gen(c_header_path, main_prefix, dep_prefixes) diff --git a/bindgen/gen_ir.py b/bindgen/gen_ir.py index 7e026add9..c1e2ab508 100644 --- a/bindgen/gen_ir.py +++ b/bindgen/gen_ir.py @@ -12,6 +12,18 @@ def is_api_decl(decl, prefix): else: return False +def is_dep_decl(decl, dep_prefixes): + for prefix in dep_prefixes: + if is_api_decl(decl, prefix): + return True + return False + +def dep_prefix(decl, dep_prefixes): + for prefix in dep_prefixes: + if is_api_decl(decl, prefix): + return prefix + return None + def filter_types(str): return str.replace('_Bool', 'bool') @@ -67,7 +79,8 @@ def parse_func(decl): if 'inner' in decl: for param in decl['inner']: if param['kind'] != 'ParmVarDecl': - sys.exit(f"ERROR: func param kind must be 'ParmVarDecl' ({decl['name']})") + print(f"warning: ignoring func {decl['name']} (unsupported parameter type)") + return None outp_param = {} outp_param['name'] = param['name'] outp_param['type'] = filter_types(param['type']['qualType']) @@ -85,22 +98,25 @@ def parse_decl(decl): else: return None -def clang(header_path, additional_options): - cmd = ['clang', '-Xclang', '-ast-dump=json' ] - cmd.extend(additional_options) - cmd.append(header_path) +def clang(csrc_path): + cmd = ['clang', '-Xclang', '-ast-dump=json', '-c' ] + cmd.append(csrc_path) return subprocess.check_output(cmd) -def gen(header_path, module, prefix, clang_options): - ast = clang(header_path, clang_options) +def gen(header_path, source_path, module, main_prefix, dep_prefixes): + ast = clang(source_path) inp = json.loads(ast) outp = {} outp['module'] = module - outp['prefix'] = prefix + outp['prefix'] = main_prefix + outp['dep_prefixes'] = dep_prefixes outp['decls'] = [] for decl in inp['inner']: - if is_api_decl(decl, prefix): + is_dep = is_dep_decl(decl, dep_prefixes) + if is_api_decl(decl, main_prefix) or is_dep: outp_decl = parse_decl(decl) if outp_decl is not None: + outp_decl['is_dep'] = is_dep + outp_decl['dep_prefix'] = dep_prefix(decl, dep_prefixes) outp['decls'].append(outp_decl) return outp diff --git a/bindgen/gen_zig.py b/bindgen/gen_zig.py index 9dcc46fe4..6c19fb03a 100644 --- a/bindgen/gen_zig.py +++ b/bindgen/gen_zig.py @@ -6,29 +6,49 @@ # - functions are camelCase # - otherwise snake_case #------------------------------------------------------------------------------- +import gen_ir import json, re, os, shutil -struct_types = [] -enum_types = [] -enum_items = {} -out_lines = '' +module_names = { + 'sg_': 'gfx', + 'sapp_': 'app', + 'stm_': 'time', + 'saudio_': 'audio', + 'sgl_': 'gl', + 'sdtx_': 'debugtext', + 'sshape_': 'shape', +} -def reset_globals(): - global struct_types - global enum_types - global enum_items - global out_lines - struct_types = [] - enum_types = [] - enum_items = {} - out_lines = '' +c_source_paths = { + 'sg_': 'sokol-zig/src/sokol/c/sokol_app_gfx.c', + 'sapp_': 'sokol-zig/src/sokol/c/sokol_app_gfx.c', + 'stm_': 'sokol-zig/src/sokol/c/sokol_time.c', + 'saudio_': 'sokol-zig/src/sokol/c/sokol_audio.c', + 'sgl_': 'sokol-zig/src/sokol/c/sokol_gl.c', + 'sdtx_': 'sokol-zig/src/sokol/c/sokol_debugtext.c', + 'sshape_': 'sokol-zig/src/sokol/c/sokol_shape.c', +} -re_1d_array = re.compile("^(?:const )?\w*\s\*?\[\d*\]$") -re_2d_array = re.compile("^(?:const )?\w*\s\*?\[\d*\]\[\d*\]$") +func_name_ignores = [ + 'sdtx_printf', + 'sdtx_vprintf', +] + +func_name_overrides = { + 'sgl_error': 'sgl_get_error', # 'error' is reserved in Zig + 'sgl_deg': 'sgl_as_degrees', + 'sgl_rad': 'sgl_as_radians' +} + +struct_field_type_overrides = { + 'sg_context_desc.color_format': 'int', + 'sg_context_desc.depth_format': 'int', +} prim_types = { 'int': 'i32', 'bool': 'bool', + 'char': 'u8', 'int8_t': 'i8', 'uint8_t': 'u8', 'int16_t': 'i16', @@ -40,7 +60,8 @@ def reset_globals(): 'float': 'f32', 'double': 'f64', 'uintptr_t': 'usize', - 'intptr_t': 'isize' + 'intptr_t': 'isize', + 'size_t': 'usize' } prim_defaults = { @@ -57,13 +78,27 @@ def reset_globals(): 'float': '0.0', 'double': '0.0', 'uintptr_t': '0', - 'intptr_t': '0' + 'intptr_t': '0', + 'size_t': '0' } -struct_field_type_overrides = { - 'sg_context_desc.color_format': 'int', - 'sg_context_desc.depth_format': 'int', -} +struct_types = [] +enum_types = [] +enum_items = {} +out_lines = '' + +def reset_globals(): + global struct_types + global enum_types + global enum_items + global out_lines + struct_types = [] + enum_types = [] + enum_items = {} + out_lines = '' + +re_1d_array = re.compile("^(?:const )?\w*\s\*?\[\d*\]$") +re_2d_array = re.compile("^(?:const )?\w*\s\*?\[\d*\]\[\d*\]$") def l(s): global out_lines @@ -72,6 +107,24 @@ def l(s): def as_zig_prim_type(s): return prim_types[s] +# prefix_bla_blub(_t) => (dep.)BlaBlub +def as_zig_struct_type(s, prefix): + parts = s.lower().split('_') + outp = '' if s.startswith(prefix) else f'{parts[0]}.' + for part in parts[1:]: + if (part != 't'): + outp += part.capitalize() + return outp + +# prefix_bla_blub(_t) => (dep.)BlaBlub +def as_zig_enum_type(s, prefix): + parts = s.lower().split('_') + outp = '' if s.startswith(prefix) else f'{parts[0]}.' + for part in parts[1:]: + if (part != 't'): + outp += part.capitalize() + return outp + def check_struct_field_type_override(struct_name, field_name, orig_type): s = f"{struct_name}.{field_name}" if s in struct_field_type_overrides: @@ -79,6 +132,15 @@ def check_struct_field_type_override(struct_name, field_name, orig_type): else: return orig_type +def check_func_name_ignore(func_name): + return func_name in func_name_ignores + +def check_func_name_override(func_name): + if func_name in func_name_overrides: + return func_name_overrides[func_name] + else: + return func_name + # PREFIX_BLA_BLUB to bla_blub def as_snake_case(s, prefix): outp = s.lower() @@ -86,14 +148,6 @@ def as_snake_case(s, prefix): outp = outp[len(prefix):] return outp -# prefix_bla_blub => BlaBlub -def as_title_case(s): - parts = s.lower().split('_')[1:] - outp = '' - for part in parts: - outp += part.capitalize() - return outp - # prefix_bla_blub => blaBlub def as_camel_case(s): parts = s.lower().split('_')[1:] @@ -177,15 +231,15 @@ def extract_ptr_type(s): else: return tokens[0] -def as_extern_c_arg_type(arg_type): +def as_extern_c_arg_type(arg_type, prefix): if arg_type == "void": return "void" elif is_prim_type(arg_type): return as_zig_prim_type(arg_type) elif is_struct_type(arg_type): - return as_title_case(arg_type) + return as_zig_struct_type(arg_type, prefix) elif is_enum_type(arg_type): - return as_title_case(arg_type) + return as_zig_enum_type(arg_type, prefix) elif is_void_ptr(arg_type): return "?*c_void" elif is_const_void_ptr(arg_type): @@ -193,7 +247,7 @@ def as_extern_c_arg_type(arg_type): elif is_string_ptr(arg_type): return "[*c]const u8" elif is_const_struct_ptr(arg_type): - return f"[*c]const {as_title_case(extract_ptr_type(arg_type))}" + return f"[*c]const {as_zig_struct_type(extract_ptr_type(arg_type), prefix)}" elif is_prim_ptr(arg_type): return f"[*c] {as_zig_prim_type(extract_ptr_type(arg_type))}" elif is_const_prim_ptr(arg_type): @@ -201,7 +255,7 @@ def as_extern_c_arg_type(arg_type): else: return '??? (as_extern_c_arg_type)' -def as_zig_arg_type(arg_prefix, arg_type): +def as_zig_arg_type(arg_prefix, arg_type, prefix): # NOTE: if arg_prefix is None, the result is used as return value pre = "" if arg_prefix is None else arg_prefix if arg_type == "void": @@ -212,18 +266,18 @@ def as_zig_arg_type(arg_prefix, arg_type): elif is_prim_type(arg_type): return pre + as_zig_prim_type(arg_type) elif is_struct_type(arg_type): - return pre + as_title_case(arg_type) + return pre + as_zig_struct_type(arg_type, prefix) elif is_enum_type(arg_type): - return pre + as_title_case(arg_type) + return pre + as_zig_enum_type(arg_type, prefix) elif is_void_ptr(arg_type): return pre + "?*c_void" elif is_const_void_ptr(arg_type): return pre + "?*const c_void" elif is_string_ptr(arg_type): - return pre + "[]const u8" + return pre + "[:0]const u8" elif is_const_struct_ptr(arg_type): # not a bug, pass const structs by value - return pre + f"{as_title_case(extract_ptr_type(arg_type))}" + return pre + f"{as_zig_struct_type(extract_ptr_type(arg_type), prefix)}" elif is_prim_ptr(arg_type): return pre + f"* {as_zig_prim_type(extract_ptr_type(arg_type))}" elif is_const_prim_ptr(arg_type): @@ -232,14 +286,14 @@ def as_zig_arg_type(arg_prefix, arg_type): return arg_prefix + "??? (as_zig_arg_type)" # get C-style arguments of a function pointer as string -def funcptr_args_c(field_type): +def funcptr_args_c(field_type, prefix): tokens = field_type[field_type.index('(*)')+4:-1].split(',') s = "" for token in tokens: - arg_type = token.strip(); + arg_type = token.strip() if s != "": s += ", " - c_arg = as_extern_c_arg_type(arg_type) + c_arg = as_extern_c_arg_type(arg_type, prefix) if (c_arg == "void"): return "" else: @@ -256,43 +310,42 @@ def funcptr_res_c(field_type): else: return '???' -def funcdecl_args_c(decl): +def funcdecl_args_c(decl, prefix): s = "" for param_decl in decl['params']: if s != "": s += ", " arg_type = param_decl['type'] - s += as_extern_c_arg_type(arg_type) + s += as_extern_c_arg_type(arg_type, prefix) return s -def funcdecl_args_zig(decl): +def funcdecl_args_zig(decl, prefix): s = "" for param_decl in decl['params']: if s != "": s += ", " arg_name = param_decl['name'] arg_type = param_decl['type'] - s += f"{as_zig_arg_type(f'{arg_name}: ', arg_type)}" + s += f"{as_zig_arg_type(f'{arg_name}: ', arg_type, prefix)}" return s -def funcdecl_res_c(decl): +def funcdecl_res_c(decl, prefix): decl_type = decl['type'] res_type = decl_type[:decl_type.index('(')].strip() - return as_extern_c_arg_type(res_type) + return as_extern_c_arg_type(res_type, prefix) -def funcdecl_res_zig(decl): +def funcdecl_res_zig(decl, prefix): decl_type = decl['type'] res_type = decl_type[:decl_type.index('(')].strip() - zig_res_type = as_zig_arg_type(None, res_type) + zig_res_type = as_zig_arg_type(None, res_type, prefix) if zig_res_type == "": zig_res_type = "void" return zig_res_type def gen_struct(decl, prefix, callconvc_funcptrs = True, use_raw_name=False, use_extern=True): struct_name = decl['name'] - zig_type = struct_name if use_raw_name else as_title_case(struct_name) + zig_type = struct_name if use_raw_name else as_zig_struct_type(struct_name, prefix) l(f"pub const {zig_type} = {'extern ' if use_extern else ''}struct {{") - #l(f" pub fn init(options: anytype) {zig_type} {{ var item: {zig_type} = .{{ }}; init_with(&item, options); return item; }}") for field in decl['fields']: field_name = field['name'] field_type = field['type'] @@ -300,9 +353,9 @@ def gen_struct(decl, prefix, callconvc_funcptrs = True, use_raw_name=False, use_ if is_prim_type(field_type): l(f" {field_name}: {as_zig_prim_type(field_type)} = {type_default_value(field_type)},") elif is_struct_type(field_type): - l(f" {field_name}: {as_title_case(field_type)} = .{{ }},") + l(f" {field_name}: {as_zig_struct_type(field_type, prefix)} = .{{ }},") elif is_enum_type(field_type): - l(f" {field_name}: {as_title_case(field_type)} = .{enum_default_item(field_type)},") + l(f" {field_name}: {as_zig_enum_type(field_type, prefix)} = .{enum_default_item(field_type)},") elif is_string_ptr(field_type): l(f" {field_name}: [*c]const u8 = null,") elif is_const_void_ptr(field_type): @@ -313,9 +366,9 @@ def gen_struct(decl, prefix, callconvc_funcptrs = True, use_raw_name=False, use_ l(f" {field_name}: ?[*]const {as_zig_prim_type(extract_ptr_type(field_type))} = null,") elif is_func_ptr(field_type): if callconvc_funcptrs: - l(f" {field_name}: ?fn({funcptr_args_c(field_type)}) callconv(.C) {funcptr_res_c(field_type)} = null,") + l(f" {field_name}: ?fn({funcptr_args_c(field_type, prefix)}) callconv(.C) {funcptr_res_c(field_type)} = null,") else: - l(f" {field_name}: ?fn({funcptr_args_c(field_type)}) {funcptr_res_c(field_type)} = null,") + l(f" {field_name}: ?fn({funcptr_args_c(field_type, prefix)}) {funcptr_res_c(field_type)} = null,") elif is_1d_array_type(field_type): array_type = extract_array_type(field_type) array_nums = extract_array_nums(field_type) @@ -323,9 +376,15 @@ def gen_struct(decl, prefix, callconvc_funcptrs = True, use_raw_name=False, use_ if is_prim_type(array_type): zig_type = as_zig_prim_type(array_type) def_val = type_default_value(array_type) + elif is_struct_type(array_type): + zig_type = as_zig_struct_type(array_type, prefix) + def_val = '.{}' + elif is_enum_type(array_type): + zig_type = as_zig_enum_type(array_type, prefix) + def_val = '.{}' else: - zig_type = as_title_case(array_type) - def_val = ".{}" + zig_type = '??? (array type)' + def_val = '???' t0 = f"[{array_nums[0]}]{zig_type}" t0_slice = f"[]const {zig_type}" t1 = f"[_]{zig_type}" @@ -338,11 +397,16 @@ def gen_struct(decl, prefix, callconvc_funcptrs = True, use_raw_name=False, use_ array_type = extract_array_type(field_type) array_nums = extract_array_nums(field_type) if is_prim_type(array_type): - l(f"// FIXME: 2D array with primitive type: {field_name}") + zig_type = as_zig_prim_type(array_type) + def_val = type_default_value(array_type) elif is_struct_type(array_type): - zig_type = as_title_case(array_type) - t0 = f"[{array_nums[0]}][{array_nums[1]}]{zig_type}" - l(f" {field_name}: {t0} = [_][{array_nums[1]}]{zig_type}{{[_]{zig_type}{{ .{{ }} }}**{array_nums[1]}}}**{array_nums[0]},") + zig_type = as_zig_struct_type(array_type, prefix) + def_val = ".{ }" + else: + zig_type = "???" + def_val = "???" + t0 = f"[{array_nums[0]}][{array_nums[1]}]{zig_type}" + l(f" {field_name}: {t0} = [_][{array_nums[1]}]{zig_type}{{[_]{zig_type}{{ {def_val} }}**{array_nums[1]}}}**{array_nums[0]},") else: l(f"// FIXME: {field_name}: {field_type};") l("};") @@ -352,7 +416,7 @@ def gen_consts(decl, prefix): l(f"pub const {as_snake_case(item['name'], prefix)} = {item['value']};") def gen_enum(decl, prefix): - l(f"pub const {as_title_case(decl['name'])} = extern enum(i32) {{") + l(f"pub const {as_zig_enum_type(decl['name'], prefix)} = extern enum(i32) {{") for item in decl['items']: item_name = as_enum_item_name(item['name']) if item_name != "FORCE_U32": @@ -363,13 +427,13 @@ def gen_enum(decl, prefix): l("};") def gen_func_c(decl, prefix): - l(f"pub extern fn {decl['name']}({funcdecl_args_c(decl)}) {funcdecl_res_c(decl)};") + l(f"pub extern fn {decl['name']}({funcdecl_args_c(decl, prefix)}) {funcdecl_res_c(decl, prefix)};") def gen_func_zig(decl, prefix): c_func_name = decl['name'] - zig_func_name = as_camel_case(decl['name']) - zig_res_type = funcdecl_res_zig(decl) - l(f"pub fn {zig_func_name}({funcdecl_args_zig(decl)}) {funcdecl_res_zig(decl)} {{") + zig_func_name = as_camel_case(check_func_name_override(decl['name'])) + zig_res_type = funcdecl_res_zig(decl, prefix) + l(f"pub inline fn {zig_func_name}({funcdecl_args_zig(decl, prefix)}) {funcdecl_res_zig(decl, prefix)} {{") if zig_res_type != 'void': s = f" return {c_func_name}(" else: @@ -380,7 +444,9 @@ def gen_func_zig(decl, prefix): arg_name = param_decl['name'] arg_type = param_decl['type'] if is_const_struct_ptr(arg_type): - s += "&" + arg_name + s += f"&{arg_name}" + elif is_string_ptr(arg_type): + s += f"@ptrCast([*c]const u8,{arg_name})" else: s += arg_name s += ");" @@ -401,31 +467,93 @@ def pre_parse(inp): for item in decl['items']: enum_items[enum_name].append(as_enum_item_name(item['name'])) -def gen_module(inp): +def gen_imports(inp, dep_prefixes): + for dep_prefix in dep_prefixes: + dep_module_name = module_names[dep_prefix] + l(f'const {dep_prefix[:-1]} = @import("{dep_module_name}.zig");') + l('') + +def gen_helpers(inp): + if inp['prefix'] in ['sg_', 'sdtx_', 'sshape_']: + l('// helper function to convert "anything" to a Range struct') + l('pub fn asRange(val: anytype) Range {') + l(' const type_info = @typeInfo(@TypeOf(val));') + l(' switch (type_info) {') + l(' .Pointer => {') + l(' switch (type_info.Pointer.size) {') + l(' .One => return .{ .ptr = val, .size = @sizeOf(type_info.Pointer.child) },') + l(' .Slice => return .{ .ptr = val.ptr, .size = @sizeOf(type_info.Pointer.child) * val.len },') + l(' else => @compileError("FIXME: Pointer type!"),') + l(' }') + l(' },') + l(' .Struct, .Array => {') + l(' return .{ .ptr = &val, .size = @sizeOf(@TypeOf(val)) };') + l(' },') + l(' else => {') + l(' @compileError("Cannot convert to range!");') + l(' }') + l(' }') + l('}') + l('') + if inp['prefix'] == 'sdtx_': + l('// std.fmt compatible Writer') + l('pub const Writer = struct {') + l(' pub const Error = error { };') + l(' pub fn writeAll(self: Writer, bytes: []const u8) Error!void {') + l(' for (bytes) |byte| {') + l(' putc(byte);') + l(' }') + l(' }') + l(' pub fn writeByteNTimes(self: Writer, byte: u8, n: u64) Error!void {') + l(' var i: u64 = 0;') + l(' while (i < n): (i += 1) {') + l(' putc(byte);') + l(' }') + l(' }') + l('};') + l('// std.fmt-style formatted print') + l('pub fn print(comptime fmt: anytype, args: anytype) void {') + l(' var writer: Writer = .{};') + l(' @import("std").fmt.format(writer, fmt, args) catch {};') + l('}') + l('') + +def gen_module(inp, dep_prefixes): l('// machine generated, do not edit') l('') + gen_imports(inp, dep_prefixes) + gen_helpers(inp) pre_parse(inp) prefix = inp['prefix'] for decl in inp['decls']: - kind = decl['kind'] - if kind == 'struct': - gen_struct(decl, prefix) - elif kind == 'consts': - gen_consts(decl, prefix) - elif kind == 'enum': - gen_enum(decl, prefix) - elif kind == 'func': - gen_func_c(decl, prefix) - gen_func_zig(decl, prefix) + if not decl['is_dep']: + kind = decl['kind'] + if kind == 'struct': + gen_struct(decl, prefix) + elif kind == 'consts': + gen_consts(decl, prefix) + elif kind == 'enum': + gen_enum(decl, prefix) + elif kind == 'func': + if not check_func_name_ignore(decl['name']): + gen_func_c(decl, prefix) + gen_func_zig(decl, prefix) def prepare(): + print('Generating zig bindings:') if not os.path.isdir('sokol-zig/src/sokol'): os.makedirs('sokol-zig/src/sokol') + if not os.path.isdir('sokol-zig/src/sokol/c'): + os.makedirs('sokol-zig/src/sokol/c') -def gen(c_header_path, input_ir): +def gen(c_header_path, c_prefix, dep_c_prefixes): + module_name = module_names[c_prefix] + c_source_path = c_source_paths[c_prefix] + print(f' {c_header_path} => {module_name}') reset_globals() - gen_module(input_ir) - shutil.copyfile(c_header_path, f'sokol-zig/src/sokol/{os.path.basename(c_header_path)}') - output_path = f"sokol-zig/src/sokol/{input_ir['module']}.zig" + shutil.copyfile(c_header_path, f'sokol-zig/src/sokol/c/{os.path.basename(c_header_path)}') + ir = gen_ir.gen(c_header_path, c_source_path, module_name, c_prefix, dep_c_prefixes) + gen_module(ir, dep_c_prefixes) + output_path = f"sokol-zig/src/sokol/{ir['module']}.zig" with open(output_path, 'w', newline='\n') as f_outp: f_outp.write(out_lines) diff --git a/sokol_app.h b/sokol_app.h index 893049a09..011597058 100644 --- a/sokol_app.h +++ b/sokol_app.h @@ -250,6 +250,13 @@ this may change from one frame to the next, and it may be different from the initial size provided in the sapp_desc struct. + float sapp_widthf(void) + float sapp_heightf(void) + These are alternatives to sapp_width() and sapp_height() which return + the default framebuffer size as float values instead of integer. This + may help to prevent casting back and forth between int and float + in more strongly typed languages than C and C++. + int sapp_color_format(void) int sapp_depth_format(void) The color and depth-stencil pixelformats of the default framebuffer, @@ -340,6 +347,7 @@ - the application was suspended or restored (on mobile platforms) - the user or application code has asked to quit the application - a string was pasted to the system clipboard + - one or more files have been dropped onto the application window To explicitly 'consume' an event and prevent that the event is forwarded for further handling to the operating system, call @@ -1155,8 +1163,12 @@ extern sapp_desc sokol_main(int argc, char* argv[]); SOKOL_APP_API_DECL bool sapp_isvalid(void); /* returns the current framebuffer width in pixels */ SOKOL_APP_API_DECL int sapp_width(void); +/* same as sapp_width(), but returns float */ +SOKOL_APP_API_DECL float sapp_widthf(void); /* returns the current framebuffer height in pixels */ SOKOL_APP_API_DECL int sapp_height(void); +/* same as sapp_height(), but returns float */ +SOKOL_APP_API_DECL float sapp_heightf(void); /* get default framebuffer color pixel format */ SOKOL_APP_API_DECL int sapp_color_format(void); /* get default framebuffer depth pixel format */ @@ -1277,7 +1289,8 @@ inline void sapp_run(const sapp_desc& desc) { return sapp_run(&desc); } #ifdef SOKOL_APP_IMPL #define SOKOL_APP_IMPL_INCLUDED (1) -#include /* memset */ +#include // memset +#include // size_t /* check if the config defines are alright */ #if defined(__APPLE__) @@ -2355,6 +2368,7 @@ typedef int GLint; _SAPP_XMACRO(glUniform1i, void, (GLint location, GLint v0)) \ _SAPP_XMACRO(glDisable, void, (GLenum cap)) \ _SAPP_XMACRO(glColorMask, void, (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha)) \ + _SAPP_XMACRO(glColorMaski, void, (GLuint buf, GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha)) \ _SAPP_XMACRO(glBindBuffer, void, (GLenum target, GLuint buffer)) \ _SAPP_XMACRO(glDeleteVertexArrays, void, (GLsizei n, const GLuint * arrays)) \ _SAPP_XMACRO(glDepthMask, void, (GLboolean flag)) \ @@ -2555,14 +2569,14 @@ _SOKOL_PRIVATE void _sapp_init_state(const sapp_desc* desc) { _sapp.clipboard.enabled = _sapp.desc.enable_clipboard; if (_sapp.clipboard.enabled) { _sapp.clipboard.buf_size = _sapp.desc.clipboard_size; - _sapp.clipboard.buffer = (char*) SOKOL_CALLOC(1, _sapp.clipboard.buf_size); + _sapp.clipboard.buffer = (char*) SOKOL_CALLOC(1, (size_t)_sapp.clipboard.buf_size); } _sapp.drop.enabled = _sapp.desc.enable_dragndrop; if (_sapp.drop.enabled) { _sapp.drop.max_files = _sapp.desc.max_dropped_files; _sapp.drop.max_path_length = _sapp.desc.max_dropped_file_path_length; _sapp.drop.buf_size = _sapp.drop.max_files * _sapp.drop.max_path_length; - _sapp.drop.buffer = (char*) SOKOL_CALLOC(1, _sapp.drop.buf_size); + _sapp.drop.buffer = (char*) SOKOL_CALLOC(1, (size_t)_sapp.drop.buf_size); } _sapp_strcpy(_sapp.desc.window_title, _sapp.window_title, sizeof(_sapp.window_title)); _sapp.desc.window_title = _sapp.window_title; @@ -2615,7 +2629,7 @@ _SOKOL_PRIVATE sapp_keycode _sapp_translate_key(int scan_code) { _SOKOL_PRIVATE void _sapp_clear_drop_buffer(void) { if (_sapp.drop.enabled) { SOKOL_ASSERT(_sapp.drop.buffer); - memset(_sapp.drop.buffer, 0, _sapp.drop.buf_size); + memset(_sapp.drop.buffer, 0, (size_t)_sapp.drop.buf_size); } } @@ -3009,7 +3023,7 @@ _SOKOL_PRIVATE void _sapp_macos_frame(void) { _sapp.macos.view.device = _sapp.macos.mtl_device; _sapp.macos.view.colorPixelFormat = MTLPixelFormatBGRA8Unorm; _sapp.macos.view.depthStencilPixelFormat = MTLPixelFormatDepth32Float_Stencil8; - _sapp.macos.view.sampleCount = _sapp.sample_count; + _sapp.macos.view.sampleCount = (NSUInteger) _sapp.sample_count; _sapp.macos.view.autoResizeDrawable = false; _sapp.macos.window.contentView = _sapp.macos.view; [_sapp.macos.window makeFirstResponder:_sapp.macos.view]; @@ -3027,7 +3041,7 @@ _SOKOL_PRIVATE void _sapp_macos_frame(void) { if (_sapp.sample_count > 1) { attrs[i++] = NSOpenGLPFAMultisample; attrs[i++] = NSOpenGLPFASampleBuffers; attrs[i++] = 1; - attrs[i++] = NSOpenGLPFASamples; attrs[i++] = _sapp.sample_count; + attrs[i++] = NSOpenGLPFASamples; attrs[i++] = (NSOpenGLPixelFormatAttribute)_sapp.sample_count; } else { attrs[i++] = NSOpenGLPFASampleBuffers; attrs[i++] = 0; @@ -3162,7 +3176,7 @@ _SOKOL_PRIVATE void _sapp_macos_frame(void) { _sapp.drop.num_files = ((int)pboard.pasteboardItems.count > _sapp.drop.max_files) ? _sapp.drop.max_files : pboard.pasteboardItems.count; bool drop_failed = false; for (int i = 0; i < _sapp.drop.num_files; i++) { - NSURL *fileUrl = [NSURL fileURLWithPath:[pboard.pasteboardItems[i] stringForType:NSPasteboardTypeFileURL]]; + NSURL *fileUrl = [NSURL fileURLWithPath:[pboard.pasteboardItems[(NSUInteger)i] stringForType:NSPasteboardTypeFileURL]]; if (!_sapp_strcpy(fileUrl.standardizedURL.path.UTF8String, _sapp_dropped_file_path_ptr(i), _sapp.drop.max_path_length)) { SOKOL_LOG("sokol_app.h: dropped file path too long (sapp_desc.max_dropped_file_path_length)\n"); drop_failed = true; @@ -4297,7 +4311,7 @@ _SOKOL_PRIVATE EM_BOOL _sapp_emsc_key_cb(int emsc_type, const EmscriptenKeyboard } } else { - _sapp.event.key_code = _sapp_translate_key(emsc_event->keyCode); + _sapp.event.key_code = _sapp_translate_key((int)emsc_event->keyCode); /* Special hack for macOS: if the Super key is pressed, macOS doesn't send keyUp events. As a workaround, to prevent keys from "sticking", we'll send a keyup event following a keydown @@ -4436,7 +4450,7 @@ _SOKOL_PRIVATE EM_BOOL _sapp_emsc_touch_cb(int emsc_type, const EmscriptenTouchE for (int i = 0; i < _sapp.event.num_touches; i++) { const EmscriptenTouchPoint* src = &emsc_event->touches[i]; sapp_touchpoint* dst = &_sapp.event.touches[i]; - dst->identifier = src->identifier; + dst->identifier = (uintptr_t)src->identifier; dst->pos_x = src->targetX * _sapp.dpi_scale; dst->pos_y = src->targetY * _sapp.dpi_scale; dst->changed = src->isChanged; @@ -4888,14 +4902,13 @@ _SOKOL_PRIVATE void _sapp_gl_init_fbconfig(_sapp_gl_fbconfig* fbconfig) { fbconfig->samples = -1; } -_SOKOL_PRIVATE const _sapp_gl_fbconfig* _sapp_gl_choose_fbconfig(const _sapp_gl_fbconfig* desired, const _sapp_gl_fbconfig* alternatives, unsigned int count) { - unsigned int i; - unsigned int missing, least_missing = 1000000; - unsigned int color_diff, least_color_diff = 10000000; - unsigned int extra_diff, least_extra_diff = 10000000; +_SOKOL_PRIVATE const _sapp_gl_fbconfig* _sapp_gl_choose_fbconfig(const _sapp_gl_fbconfig* desired, const _sapp_gl_fbconfig* alternatives, int count) { + int missing, least_missing = 1000000; + int color_diff, least_color_diff = 10000000; + int extra_diff, least_extra_diff = 10000000; const _sapp_gl_fbconfig* current; - const _sapp_gl_fbconfig* closest = NULL; - for (i = 0; i < count; i++) { + const _sapp_gl_fbconfig* closest = 0; + for (int i = 0; i < count; i++) { current = alternatives + i; if (desired->doublebuffer != current->doublebuffer) { continue; @@ -4976,8 +4989,8 @@ _SOKOL_PRIVATE const _sapp_gl_fbconfig* _sapp_gl_choose_fbconfig(const _sapp_gl_ #if defined(_SAPP_WIN32) || defined(_SAPP_UWP) _SOKOL_PRIVATE bool _sapp_win32_uwp_utf8_to_wide(const char* src, wchar_t* dst, int dst_num_bytes) { SOKOL_ASSERT(src && dst && (dst_num_bytes > 1)); - memset(dst, 0, dst_num_bytes); - const int dst_chars = dst_num_bytes / sizeof(wchar_t); + memset(dst, 0, (size_t)dst_num_bytes); + const int dst_chars = dst_num_bytes / (int)sizeof(wchar_t); const int dst_needed = MultiByteToWideChar(CP_UTF8, 0, src, -1, 0, 0); if ((dst_needed > 0) && (dst_needed < dst_chars)) { MultiByteToWideChar(CP_UTF8, 0, src, -1, dst, dst_chars); @@ -5190,8 +5203,8 @@ static inline HRESULT _sapp_dxgi_Present(IDXGISwapChain* self, UINT SyncInterval _SOKOL_PRIVATE void _sapp_d3d11_create_device_and_swapchain(void) { DXGI_SWAP_CHAIN_DESC* sc_desc = &_sapp.d3d11.swap_chain_desc; - sc_desc->BufferDesc.Width = _sapp.framebuffer_width; - sc_desc->BufferDesc.Height = _sapp.framebuffer_height; + sc_desc->BufferDesc.Width = (UINT)_sapp.framebuffer_width; + sc_desc->BufferDesc.Height = (UINT)_sapp.framebuffer_height; sc_desc->BufferDesc.Format = DXGI_FORMAT_B8G8R8A8_UNORM; sc_desc->BufferDesc.RefreshRate.Numerator = 60; sc_desc->BufferDesc.RefreshRate.Denominator = 1; @@ -5208,7 +5221,7 @@ _SOKOL_PRIVATE void _sapp_d3d11_create_device_and_swapchain(void) { sc_desc->SampleDesc.Count = 1; sc_desc->SampleDesc.Quality = 0; sc_desc->BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT; - int create_flags = D3D11_CREATE_DEVICE_SINGLETHREADED | D3D11_CREATE_DEVICE_BGRA_SUPPORT; + UINT create_flags = D3D11_CREATE_DEVICE_SINGLETHREADED | D3D11_CREATE_DEVICE_BGRA_SUPPORT; #if defined(SOKOL_DEBUG) create_flags |= D3D11_CREATE_DEVICE_DEBUG; #endif @@ -5259,14 +5272,14 @@ _SOKOL_PRIVATE void _sapp_d3d11_create_default_render_target(void) { /* common desc for MSAA and depth-stencil texture */ D3D11_TEXTURE2D_DESC tex_desc; memset(&tex_desc, 0, sizeof(tex_desc)); - tex_desc.Width = _sapp.framebuffer_width; - tex_desc.Height = _sapp.framebuffer_height; + tex_desc.Width = (UINT)_sapp.framebuffer_width; + tex_desc.Height = (UINT)_sapp.framebuffer_height; tex_desc.MipLevels = 1; tex_desc.ArraySize = 1; tex_desc.Usage = D3D11_USAGE_DEFAULT; tex_desc.BindFlags = D3D11_BIND_RENDER_TARGET; - tex_desc.SampleDesc.Count = _sapp.sample_count; - tex_desc.SampleDesc.Quality = _sapp.sample_count > 1 ? D3D11_STANDARD_MULTISAMPLE_PATTERN : 0; + tex_desc.SampleDesc.Count = (UINT) _sapp.sample_count; + tex_desc.SampleDesc.Quality = (UINT) (_sapp.sample_count > 1 ? D3D11_STANDARD_MULTISAMPLE_PATTERN : 0); /* create MSAA texture and view if antialiasing requested */ if (_sapp.sample_count > 1) { @@ -5298,7 +5311,7 @@ _SOKOL_PRIVATE void _sapp_d3d11_destroy_default_render_target(void) { _SOKOL_PRIVATE void _sapp_d3d11_resize_default_render_target(void) { if (_sapp.d3d11.swap_chain) { _sapp_d3d11_destroy_default_render_target(); - _sapp_dxgi_ResizeBuffers(_sapp.d3d11.swap_chain, _sapp.d3d11.swap_chain_desc.BufferCount, _sapp.framebuffer_width, _sapp.framebuffer_height, DXGI_FORMAT_B8G8R8A8_UNORM, 0); + _sapp_dxgi_ResizeBuffers(_sapp.d3d11.swap_chain, _sapp.d3d11.swap_chain_desc.BufferCount, (UINT)_sapp.framebuffer_width, (UINT)_sapp.framebuffer_height, DXGI_FORMAT_B8G8R8A8_UNORM, 0); _sapp_d3d11_create_default_render_target(); } } @@ -5310,7 +5323,7 @@ _SOKOL_PRIVATE void _sapp_d3d11_present(void) { SOKOL_ASSERT(_sapp.d3d11.msaa_rt); _sapp_d3d11_ResolveSubresource(_sapp.d3d11.device_context, (ID3D11Resource*)_sapp.d3d11.rt, 0, (ID3D11Resource*)_sapp.d3d11.msaa_rt, 0, DXGI_FORMAT_B8G8R8A8_UNORM); } - _sapp_dxgi_Present(_sapp.d3d11.swap_chain, _sapp.swap_interval, 0); + _sapp_dxgi_Present(_sapp.d3d11.swap_chain, (UINT)_sapp.swap_interval, 0); } #endif /* SOKOL_D3D11 */ @@ -5344,6 +5357,7 @@ _SOKOL_PRIVATE void _sapp_wgl_init(void) { if (!_sapp.wgl.msg_hwnd) { _sapp_fail("Win32: failed to create helper window!\n"); } + SOKOL_ASSERT(_sapp.wgl.msg_hwnd); ShowWindow(_sapp.wgl.msg_hwnd, SW_HIDE); MSG msg; while (PeekMessageW(&msg, _sapp.wgl.msg_hwnd, 0, 0, PM_REMOVE)) { @@ -5450,7 +5464,8 @@ _SOKOL_PRIVATE int _sapp_wgl_find_pixel_format(void) { const _sapp_gl_fbconfig* closest; int native_count = _sapp_wgl_attrib(1, WGL_NUMBER_PIXEL_FORMATS_ARB); - _sapp_gl_fbconfig* usable_configs = (_sapp_gl_fbconfig*) SOKOL_CALLOC(native_count, sizeof(_sapp_gl_fbconfig)); + _sapp_gl_fbconfig* usable_configs = (_sapp_gl_fbconfig*) SOKOL_CALLOC((size_t)native_count, sizeof(_sapp_gl_fbconfig)); + SOKOL_ASSERT(usable_configs); int usable_count = 0; for (int i = 0; i < native_count; i++) { const int n = i + 1; @@ -5477,7 +5492,7 @@ _SOKOL_PRIVATE int _sapp_wgl_find_pixel_format(void) { if (_sapp.wgl.arb_multisample) { u->samples = _sapp_wgl_attrib(n, WGL_SAMPLES_ARB); } - u->handle = n; + u->handle = (uintptr_t)n; usable_count++; } SOKOL_ASSERT(usable_count > 0); @@ -5563,7 +5578,7 @@ _SOKOL_PRIVATE void _sapp_wgl_swap_buffers(void) { _SOKOL_PRIVATE bool _sapp_win32_wide_to_utf8(const wchar_t* src, char* dst, int dst_num_bytes) { SOKOL_ASSERT(src && dst && (dst_num_bytes > 1)); - memset(dst, 0, dst_num_bytes); + memset(dst, 0, (size_t)dst_num_bytes); const int bytes_needed = WideCharToMultiByte(CP_UTF8, 0, src, -1, NULL, 0, NULL, NULL); if (bytes_needed <= dst_num_bytes) { WideCharToMultiByte(CP_UTF8, 0, src, -1, dst, dst_num_bytes, NULL, NULL); @@ -5796,11 +5811,11 @@ _SOKOL_PRIVATE void _sapp_win32_files_dropped(HDROP hdrop) { bool drop_failed = false; const int count = (int) DragQueryFileW(hdrop, 0xffffffff, NULL, 0); _sapp.drop.num_files = (count > _sapp.drop.max_files) ? _sapp.drop.max_files : count; - for (int i = 0; i < _sapp.drop.num_files; i++) { + for (UINT i = 0; i < (UINT)_sapp.drop.num_files; i++) { const UINT num_chars = DragQueryFileW(hdrop, i, NULL, 0) + 1; WCHAR* buffer = (WCHAR*) SOKOL_CALLOC(num_chars, sizeof(WCHAR)); DragQueryFileW(hdrop, i, buffer, num_chars); - if (!_sapp_win32_wide_to_utf8(buffer, _sapp_dropped_file_path_ptr(i), _sapp.drop.max_path_length)) { + if (!_sapp_win32_wide_to_utf8(buffer, _sapp_dropped_file_path_ptr((int)i), _sapp.drop.max_path_length)) { SOKOL_LOG("sokol_app.h: dropped file path too long (sapp_desc.max_dropped_file_path_length)\n"); drop_failed = true; } @@ -5931,7 +5946,8 @@ _SOKOL_PRIVATE LRESULT CALLBACK _sapp_win32_wndproc(HWND hWnd, UINT uMsg, WPARAM if (_sapp.mouse.locked) { HRAWINPUT ri = (HRAWINPUT) lParam; UINT size = sizeof(_sapp.win32.raw_input_data); - if (-1 == GetRawInputData(ri, RID_INPUT, &_sapp.win32.raw_input_data, &size, sizeof(RAWINPUTHEADER))) { + // see: https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-getrawinputdata + if ((UINT)-1 == GetRawInputData(ri, RID_INPUT, &_sapp.win32.raw_input_data, &size, sizeof(RAWINPUTHEADER))) { SOKOL_LOG("GetRawInputData() failed\n"); break; } @@ -6143,7 +6159,7 @@ _SOKOL_PRIVATE bool _sapp_win32_set_clipboard_string(const char* str) { SOKOL_ASSERT(_sapp.clipboard.enabled && (_sapp.clipboard.buf_size > 0)); wchar_t* wchar_buf = 0; - const int wchar_buf_size = _sapp.clipboard.buf_size * sizeof(wchar_t); + const SIZE_T wchar_buf_size = (SIZE_T)_sapp.clipboard.buf_size * sizeof(wchar_t); HANDLE object = GlobalAlloc(GMEM_MOVEABLE, wchar_buf_size); if (!object) { goto error; @@ -6152,7 +6168,7 @@ _SOKOL_PRIVATE bool _sapp_win32_set_clipboard_string(const char* str) { if (!wchar_buf) { goto error; } - if (!_sapp_win32_uwp_utf8_to_wide(str, wchar_buf, wchar_buf_size)) { + if (!_sapp_win32_uwp_utf8_to_wide(str, wchar_buf, (int)wchar_buf_size)) { goto error; } GlobalUnlock(wchar_buf); @@ -6260,7 +6276,7 @@ _SOKOL_PRIVATE void _sapp_win32_run(const sapp_desc* desc) { #if defined(SOKOL_D3D11) _sapp_d3d11_present(); if (IsIconic(_sapp.win32.hwnd)) { - Sleep(16 * _sapp.swap_interval); + Sleep((DWORD)(16 * _sapp.swap_interval)); } #endif #if defined(SOKOL_GLCORE33) @@ -6300,8 +6316,9 @@ _SOKOL_PRIVATE char** _sapp_win32_command_line_to_utf8_argv(LPWSTR w_command_lin _sapp_fail("Win32: failed to parse command line"); } else { size_t size = wcslen(w_command_line) * 4; - argv = (char**) SOKOL_CALLOC(1, (argc + 1) * sizeof(char*) + size); - args = (char*)&argv[argc + 1]; + argv = (char**) SOKOL_CALLOC(1, ((size_t)argc + 1) * sizeof(char*) + size); + SOKOL_ASSERT(argv); + args = (char*) &argv[argc + 1]; int n; for (int i = 0; i < argc; ++i) { n = WideCharToMultiByte(CP_UTF8, 0, w_argv[i], -1, args, (int)size, NULL, NULL); @@ -6310,7 +6327,7 @@ _SOKOL_PRIVATE char** _sapp_win32_command_line_to_utf8_argv(LPWSTR w_command_lin break; } argv[i] = args; - size -= n; + size -= (size_t)n; args += n; } LocalFree(w_argv); @@ -7595,19 +7612,19 @@ _SOKOL_PRIVATE bool _sapp_android_touch_event(const AInputEvent* e) { } int32_t idx = action_idx >> AMOTION_EVENT_ACTION_POINTER_INDEX_SHIFT; _sapp_init_event(type); - _sapp.event.num_touches = AMotionEvent_getPointerCount(e); + _sapp.event.num_touches = (int)AMotionEvent_getPointerCount(e); if (_sapp.event.num_touches > SAPP_MAX_TOUCHPOINTS) { _sapp.event.num_touches = SAPP_MAX_TOUCHPOINTS; } for (int32_t i = 0; i < _sapp.event.num_touches; i++) { sapp_touchpoint* dst = &_sapp.event.touches[i]; - dst->identifier = AMotionEvent_getPointerId(e, i); - dst->pos_x = (AMotionEvent_getRawX(e, i) / _sapp.window_width) * _sapp.framebuffer_width; - dst->pos_y = (AMotionEvent_getRawY(e, i) / _sapp.window_height) * _sapp.framebuffer_height; + dst->identifier = (uintptr_t)AMotionEvent_getPointerId(e, (size_t)i); + dst->pos_x = (AMotionEvent_getRawX(e, (size_t)i) / _sapp.window_width) * _sapp.framebuffer_width; + dst->pos_y = (AMotionEvent_getRawY(e, (size_t)i) / _sapp.window_height) * _sapp.framebuffer_height; if (action == AMOTION_EVENT_ACTION_POINTER_DOWN || action == AMOTION_EVENT_ACTION_POINTER_UP) { - dst->changed = i == idx; + dst->changed = (i == idx); } else { dst->changed = true; } @@ -9053,7 +9070,7 @@ _SOKOL_PRIVATE GLXFBConfig _sapp_glx_choosefbconfig() { _sapp_fail("GLX: No GLXFBConfigs returned"); } - usable_configs = (_sapp_gl_fbconfig*) SOKOL_CALLOC(native_count, sizeof(_sapp_gl_fbconfig)); + usable_configs = (_sapp_gl_fbconfig*) SOKOL_CALLOC((size_t)native_count, sizeof(_sapp_gl_fbconfig)); usable_count = 0; for (i = 0; i < native_count; i++) { const GLXFBConfig n = native_configs[i]; @@ -9233,7 +9250,7 @@ _SOKOL_PRIVATE void _sapp_x11_create_hidden_cursor(void) { SOKOL_ASSERT(img && (img->width == 16) && (img->height == 16) && img->pixels); img->xhot = 0; img->yhot = 0; - const size_t num_bytes = w * h * sizeof(XcursorPixel); + const size_t num_bytes = (size_t)(w * h) * sizeof(XcursorPixel); memset(img->pixels, 0, num_bytes); _sapp.x11.hidden_cursor = XcursorImageLoadCursor(_sapp.x11.display, img); XcursorImageDestroy(img); @@ -9329,8 +9346,8 @@ _SOKOL_PRIVATE void _sapp_x11_create_window(Visual* visual, int depth) { _sapp.x11.window = XCreateWindow(_sapp.x11.display, _sapp.x11.root, 0, 0, - _sapp.window_width, - _sapp.window_height, + (uint32_t)_sapp.window_width, + (uint32_t)_sapp.window_height, 0, /* border width */ depth, /* color depth */ InputOutput, @@ -9420,7 +9437,7 @@ _SOKOL_PRIVATE int _sapp_x11_get_window_state(void) { } *state = NULL; if (_sapp_x11_get_window_property(_sapp.x11.window, _sapp.x11.WM_STATE, _sapp.x11.WM_STATE, (unsigned char**)&state) >= 2) { - result = state->state; + result = (int)state->state; } if (state) { XFree(state); @@ -9428,7 +9445,7 @@ _SOKOL_PRIVATE int _sapp_x11_get_window_state(void) { return result; } -_SOKOL_PRIVATE uint32_t _sapp_x11_mod(int x11_mods) { +_SOKOL_PRIVATE uint32_t _sapp_x11_mod(uint32_t x11_mods) { uint32_t mods = 0; if (x11_mods & ShiftMask) { mods |= SAPP_MODIFIER_SHIFT; @@ -9798,7 +9815,7 @@ _SOKOL_PRIVATE void _sapp_x11_process_event(XEvent* event) { break; case KeyPress: { - int keycode = event->xkey.keycode; + int keycode = (int)event->xkey.keycode; const sapp_keycode key = _sapp_x11_translate_key(keycode); bool repeat = _sapp_x11_keycodes[keycode & 0xFF]; _sapp_x11_keycodes[keycode & 0xFF] = true; @@ -9816,7 +9833,7 @@ _SOKOL_PRIVATE void _sapp_x11_process_event(XEvent* event) { break; case KeyRelease: { - int keycode = event->xkey.keycode; + int keycode = (int)event->xkey.keycode; const sapp_keycode key = _sapp_x11_translate_key(keycode); _sapp_x11_keycodes[keycode & 0xFF] = false; if (key != SAPP_KEYCODE_INVALID) { @@ -9908,14 +9925,14 @@ _SOKOL_PRIVATE void _sapp_x11_process_event(XEvent* event) { return; } if (event->xclient.message_type == _sapp.x11.WM_PROTOCOLS) { - const Atom protocol = event->xclient.data.l[0]; + const Atom protocol = (Atom)event->xclient.data.l[0]; if (protocol == _sapp.x11.WM_DELETE_WINDOW) { _sapp.quit_requested = true; } } else if (event->xclient.message_type == _sapp.x11.xdnd.XdndEnter) { const bool is_list = 0 != (event->xclient.data.l[1] & 1); - _sapp.x11.xdnd.source = event->xclient.data.l[0]; + _sapp.x11.xdnd.source = (Window)event->xclient.data.l[0]; _sapp.x11.xdnd.version = event->xclient.data.l[1] >> 24; _sapp.x11.xdnd.format = None; if (_sapp.x11.xdnd.version > _SAPP_X11_XDND_VERSION) { @@ -9947,7 +9964,7 @@ _SOKOL_PRIVATE void _sapp_x11_process_event(XEvent* event) { Time time = CurrentTime; if (_sapp.x11.xdnd.format) { if (_sapp.x11.xdnd.version >= 1) { - time = event->xclient.data.l[2]; + time = (Time)event->xclient.data.l[2]; } XConvertSelection(_sapp.x11.display, _sapp.x11.xdnd.XdndSelection, @@ -9963,7 +9980,7 @@ _SOKOL_PRIVATE void _sapp_x11_process_event(XEvent* event) { reply.xclient.window = _sapp.x11.window; reply.xclient.message_type = _sapp.x11.xdnd.XdndFinished; reply.xclient.format = 32; - reply.xclient.data.l[0] = _sapp.x11.window; + reply.xclient.data.l[0] = (long)_sapp.x11.window; reply.xclient.data.l[1] = 0; // drag was rejected reply.xclient.data.l[2] = None; XSendEvent(_sapp.x11.display, _sapp.x11.xdnd.source, False, NoEventMask, &reply); @@ -9984,12 +10001,12 @@ _SOKOL_PRIVATE void _sapp_x11_process_event(XEvent* event) { reply.xclient.window = _sapp.x11.xdnd.source; reply.xclient.message_type = _sapp.x11.xdnd.XdndStatus; reply.xclient.format = 32; - reply.xclient.data.l[0] = _sapp.x11.window; + reply.xclient.data.l[0] = (long)_sapp.x11.window; if (_sapp.x11.xdnd.format) { /* reply that we are ready to copy the dragged data */ reply.xclient.data.l[1] = 1; // accept with no rectangle if (_sapp.x11.xdnd.version >= 2) { - reply.xclient.data.l[4] = _sapp.x11.xdnd.XdndActionCopy; + reply.xclient.data.l[4] = (long)_sapp.x11.xdnd.XdndActionCopy; } } XSendEvent(_sapp.x11.display, _sapp.x11.xdnd.source, False, NoEventMask, &reply); @@ -10018,9 +10035,9 @@ _SOKOL_PRIVATE void _sapp_x11_process_event(XEvent* event) { reply.xclient.window = _sapp.x11.window; reply.xclient.message_type = _sapp.x11.xdnd.XdndFinished; reply.xclient.format = 32; - reply.xclient.data.l[0] = _sapp.x11.window; + reply.xclient.data.l[0] = (long)_sapp.x11.window; reply.xclient.data.l[1] = result; - reply.xclient.data.l[2] = _sapp.x11.xdnd.XdndActionCopy; + reply.xclient.data.l[2] = (long)_sapp.x11.xdnd.XdndActionCopy; XSendEvent(_sapp.x11.display, _sapp.x11.xdnd.source, False, NoEventMask, &reply); XFlush(_sapp.x11.display); } @@ -10164,6 +10181,18 @@ SOKOL_API_IMPL int sapp_width(void) { return (_sapp.framebuffer_width > 0) ? _sapp.framebuffer_width : 1; } +SOKOL_API_IMPL float sapp_widthf(void) { + return (float)sapp_width(); +} + +SOKOL_API_IMPL int sapp_height(void) { + return (_sapp.framebuffer_height > 0) ? _sapp.framebuffer_height : 1; +} + +SOKOL_API_IMPL float sapp_heightf(void) { + return (float)sapp_height(); +} + SOKOL_API_IMPL int sapp_color_format(void) { #if defined(_SAPP_EMSCRIPTEN) && defined(SOKOL_WGPU) switch (_sapp.emsc.wgpu.render_format) { @@ -10190,10 +10219,6 @@ SOKOL_API_IMPL int sapp_sample_count(void) { return _sapp.sample_count; } -SOKOL_API_IMPL int sapp_height(void) { - return (_sapp.framebuffer_height > 0) ? _sapp.framebuffer_height : 1; -} - SOKOL_API_IMPL bool sapp_high_dpi(void) { return _sapp.desc.high_dpi && (_sapp.dpi_scale >= 1.5f); } diff --git a/sokol_args.h b/sokol_args.h index 994ba5932..e3639a8f3 100644 --- a/sokol_args.h +++ b/sokol_args.h @@ -665,8 +665,8 @@ SOKOL_API_IMPL void sargs_setup(const sargs_desc* desc) { _sargs.max_args = _sargs_def(desc->max_args, _SARGS_MAX_ARGS_DEF); _sargs.buf_size = _sargs_def(desc->buf_size, _SARGS_BUF_SIZE_DEF); SOKOL_ASSERT(_sargs.buf_size > 8); - _sargs.args = (_sargs_kvp_t*) SOKOL_CALLOC(_sargs.max_args, sizeof(_sargs_kvp_t)); - _sargs.buf = (char*) SOKOL_CALLOC(_sargs.buf_size, sizeof(char)); + _sargs.args = (_sargs_kvp_t*) SOKOL_CALLOC((size_t)_sargs.max_args, sizeof(_sargs_kvp_t)); + _sargs.buf = (char*) SOKOL_CALLOC((size_t)_sargs.buf_size, sizeof(char)); /* the first character in buf is reserved and always zero, this is the 'empty string' */ _sargs.buf_pos = 1; _sargs.valid = true; diff --git a/sokol_audio.h b/sokol_audio.h index 2e02ae8ac..5b22913d4 100644 --- a/sokol_audio.h +++ b/sokol_audio.h @@ -440,7 +440,8 @@ inline void saudio_setup(const saudio_desc& desc) { return saudio_setup(&desc); /*=== IMPLEMENTATION =========================================================*/ #ifdef SOKOL_AUDIO_IMPL #define SOKOL_AUDIO_IMPL_INCLUDED (1) -#include /* memset, memcpy */ +#include // memset, memcpy +#include // size_t #ifndef SOKOL_API_IMPL #define SOKOL_API_IMPL @@ -687,10 +688,10 @@ typedef struct { } _saudio_backend_t; /* a ringbuffer structure */ typedef struct { - uint32_t head; /* next slot to write to */ - uint32_t tail; /* next slot to read from */ - uint32_t num; /* number of slots in queue */ - uint32_t queue[SAUDIO_RING_MAX_SLOTS]; + int head; // next slot to write to + int tail; // next slot to read from + int num; // number of slots in queue + int queue[SAUDIO_RING_MAX_SLOTS]; } _saudio_ring_t; /* a packet FIFO structure */ @@ -787,11 +788,11 @@ _SOKOL_PRIVATE void _saudio_mutex_unlock(_saudio_mutex_t* m) { (void)m; } #endif /*=== RING-BUFFER QUEUE IMPLEMENTATION =======================================*/ -_SOKOL_PRIVATE uint16_t _saudio_ring_idx(_saudio_ring_t* ring, uint32_t i) { - return (uint16_t) (i % ring->num); +_SOKOL_PRIVATE int _saudio_ring_idx(_saudio_ring_t* ring, int i) { + return (i % ring->num); } -_SOKOL_PRIVATE void _saudio_ring_init(_saudio_ring_t* ring, uint32_t num_slots) { +_SOKOL_PRIVATE void _saudio_ring_init(_saudio_ring_t* ring, int num_slots) { SOKOL_ASSERT((num_slots + 1) <= SAUDIO_RING_MAX_SLOTS); ring->head = 0; ring->tail = 0; @@ -808,7 +809,7 @@ _SOKOL_PRIVATE bool _saudio_ring_empty(_saudio_ring_t* ring) { } _SOKOL_PRIVATE int _saudio_ring_count(_saudio_ring_t* ring) { - uint32_t count; + int count; if (ring->head >= ring->tail) { count = ring->head - ring->tail; } @@ -819,15 +820,15 @@ _SOKOL_PRIVATE int _saudio_ring_count(_saudio_ring_t* ring) { return count; } -_SOKOL_PRIVATE void _saudio_ring_enqueue(_saudio_ring_t* ring, uint32_t val) { +_SOKOL_PRIVATE void _saudio_ring_enqueue(_saudio_ring_t* ring, int val) { SOKOL_ASSERT(!_saudio_ring_full(ring)); ring->queue[ring->head] = val; ring->head = _saudio_ring_idx(ring, ring->head + 1); } -_SOKOL_PRIVATE uint32_t _saudio_ring_dequeue(_saudio_ring_t* ring) { +_SOKOL_PRIVATE int _saudio_ring_dequeue(_saudio_ring_t* ring) { SOKOL_ASSERT(!_saudio_ring_empty(ring)); - uint32_t val = ring->queue[ring->tail]; + int val = ring->queue[ring->tail]; ring->tail = _saudio_ring_idx(ring, ring->tail + 1); return val; } @@ -847,7 +848,7 @@ _SOKOL_PRIVATE void _saudio_fifo_init(_saudio_fifo_t* fifo, int packet_size, int SOKOL_ASSERT((packet_size > 0) && (num_packets > 0)); fifo->packet_size = packet_size; fifo->num_packets = num_packets; - fifo->base_ptr = (uint8_t*) SOKOL_MALLOC(packet_size * num_packets); + fifo->base_ptr = (uint8_t*) SOKOL_MALLOC((size_t)(packet_size * num_packets)); SOKOL_ASSERT(fifo->base_ptr); fifo->cur_packet = -1; fifo->cur_offset = 0; @@ -907,7 +908,7 @@ _SOKOL_PRIVATE int _saudio_fifo_write(_saudio_fifo_t* fifo, const uint8_t* ptr, to_copy = max_copy; } uint8_t* dst = fifo->base_ptr + fifo->cur_packet * fifo->packet_size + fifo->cur_offset; - memcpy(dst, ptr, to_copy); + memcpy(dst, ptr, (size_t)to_copy); ptr += to_copy; fifo->cur_offset += to_copy; all_to_copy -= to_copy; @@ -949,7 +950,7 @@ _SOKOL_PRIVATE int _saudio_fifo_read(_saudio_fifo_t* fifo, uint8_t* ptr, int num int packet_index = _saudio_ring_dequeue(&fifo->read_queue); _saudio_ring_enqueue(&fifo->write_queue, packet_index); const uint8_t* src = fifo->base_ptr + packet_index * fifo->packet_size; - memcpy(dst, src, fifo->packet_size); + memcpy(dst, src, (size_t)fifo->packet_size); dst += fifo->packet_size; num_bytes_copied += fifo->packet_size; } @@ -963,7 +964,7 @@ _SOKOL_PRIVATE int _saudio_fifo_read(_saudio_fifo_t* fifo, uint8_t* ptr, int num /*=== DUMMY BACKEND IMPLEMENTATION ===========================================*/ #if defined(SOKOL_DUMMY_BACKEND) _SOKOL_PRIVATE bool _saudio_backend_init(void) { - _saudio.bytes_per_frame = _saudio.num_channels * sizeof(float); + _saudio.bytes_per_frame = _saudio.num_channels * (int)sizeof(float); return true; }; _SOKOL_PRIVATE void _saudio_backend_shutdown(void) { }; @@ -975,7 +976,7 @@ _SOKOL_PRIVATE void _saudio_backend_shutdown(void) { }; _SOKOL_PRIVATE void _saudio_coreaudio_callback(void* user_data, AudioQueueRef queue, AudioQueueBufferRef buffer) { _SOKOL_UNUSED(user_data); if (_saudio_has_callback()) { - const int num_frames = buffer->mAudioDataByteSize / _saudio.bytes_per_frame; + const int num_frames = (int)buffer->mAudioDataByteSize / _saudio.bytes_per_frame; const int num_channels = _saudio.num_channels; _saudio_stream_callback((float*)buffer->mAudioData, num_frames, num_channels); } @@ -984,7 +985,7 @@ _SOKOL_PRIVATE void _saudio_coreaudio_callback(void* user_data, AudioQueueRef qu int num_bytes = (int) buffer->mAudioDataByteSize; if (0 == _saudio_fifo_read(&_saudio.fifo, ptr, num_bytes)) { /* not enough read data available, fill the entire buffer with silence */ - memset(ptr, 0, num_bytes); + memset(ptr, 0, (size_t)num_bytes); } } AudioQueueEnqueueBuffer(queue, buffer, 0, NULL); @@ -1000,8 +1001,8 @@ _SOKOL_PRIVATE bool _saudio_backend_init(void) { fmt.mFormatID = kAudioFormatLinearPCM; fmt.mFormatFlags = kLinearPCMFormatFlagIsFloat | kAudioFormatFlagIsPacked; fmt.mFramesPerPacket = 1; - fmt.mChannelsPerFrame = _saudio.num_channels; - fmt.mBytesPerFrame = sizeof(float) * _saudio.num_channels; + fmt.mChannelsPerFrame = (uint32_t) _saudio.num_channels; + fmt.mBytesPerFrame = (uint32_t)sizeof(float) * (uint32_t)_saudio.num_channels; fmt.mBytesPerPacket = fmt.mBytesPerFrame; fmt.mBitsPerChannel = 32; OSStatus res = AudioQueueNewOutput(&fmt, _saudio_coreaudio_callback, 0, NULL, NULL, 0, &_saudio.backend.ca_audio_queue); @@ -1010,7 +1011,7 @@ _SOKOL_PRIVATE bool _saudio_backend_init(void) { /* create 2 audio buffers */ for (int i = 0; i < 2; i++) { AudioQueueBufferRef buf = NULL; - const uint32_t buf_byte_size = _saudio.buffer_frames * fmt.mBytesPerFrame; + const uint32_t buf_byte_size = (uint32_t)_saudio.buffer_frames * fmt.mBytesPerFrame; res = AudioQueueAllocateBuffer(_saudio.backend.ca_audio_queue, buf_byte_size, &buf); SOKOL_ASSERT((res == 0) && buf); buf->mAudioDataByteSize = buf_byte_size; @@ -1019,7 +1020,7 @@ _SOKOL_PRIVATE bool _saudio_backend_init(void) { } /* init or modify actual playback parameters */ - _saudio.bytes_per_frame = fmt.mBytesPerFrame; + _saudio.bytes_per_frame = (int)fmt.mBytesPerFrame; /* ...and start playback */ res = AudioQueueStart(_saudio.backend.ca_audio_queue, NULL); @@ -1042,7 +1043,7 @@ _SOKOL_PRIVATE void* _saudio_alsa_cb(void* param) { _SOKOL_UNUSED(param); while (!_saudio.backend.thread_stop) { /* snd_pcm_writei() will be blocking until it needs data */ - int write_res = snd_pcm_writei(_saudio.backend.device, _saudio.backend.buffer, _saudio.backend.buffer_frames); + int write_res = snd_pcm_writei(_saudio.backend.device, _saudio.backend.buffer, (snd_pcm_uframes_t)_saudio.backend.buffer_frames); if (write_res < 0) { /* underrun occurred */ snd_pcm_prepare(_saudio.backend.device); @@ -1055,7 +1056,7 @@ _SOKOL_PRIVATE void* _saudio_alsa_cb(void* param) { else { if (0 == _saudio_fifo_read(&_saudio.fifo, (uint8_t*)_saudio.backend.buffer, _saudio.backend.buffer_byte_size)) { /* not enough read data available, fill the entire buffer with silence */ - memset(_saudio.backend.buffer, 0, _saudio.backend.buffer_byte_size); + memset(_saudio.backend.buffer, 0, (size_t)_saudio.backend.buffer_byte_size); } } } @@ -1064,7 +1065,7 @@ _SOKOL_PRIVATE void* _saudio_alsa_cb(void* param) { } _SOKOL_PRIVATE bool _saudio_backend_init(void) { - int dir; unsigned int rate; + int dir; uint32_t rate; int rc = snd_pcm_open(&_saudio.backend.device, "default", SND_PCM_STREAM_PLAYBACK, 0); if (rc < 0) { SOKOL_LOG("sokol_audio.h: snd_pcm_open() failed"); @@ -1083,16 +1084,16 @@ _SOKOL_PRIVATE bool _saudio_backend_init(void) { SOKOL_LOG("sokol_audio.h: float samples not supported"); goto error; } - if (0 > snd_pcm_hw_params_set_buffer_size(_saudio.backend.device, params, _saudio.buffer_frames)) { + if (0 > snd_pcm_hw_params_set_buffer_size(_saudio.backend.device, params, (snd_pcm_uframes_t)_saudio.buffer_frames)) { SOKOL_LOG("sokol_audio.h: requested buffer size not supported"); goto error; } - if (0 > snd_pcm_hw_params_set_channels(_saudio.backend.device, params, _saudio.num_channels)) { + if (0 > snd_pcm_hw_params_set_channels(_saudio.backend.device, params, (uint32_t)_saudio.num_channels)) { SOKOL_LOG("sokol_audio.h: requested channel count not supported"); goto error; } /* let ALSA pick a nearby sampling rate */ - rate = _saudio.sample_rate; + rate = (uint32_t) _saudio.sample_rate; dir = 0; if (0 > snd_pcm_hw_params_set_rate_near(_saudio.backend.device, params, &rate, &dir)) { SOKOL_LOG("sokol_audio.h: snd_pcm_hw_params_set_rate_near() failed"); @@ -1104,14 +1105,14 @@ _SOKOL_PRIVATE bool _saudio_backend_init(void) { } /* read back actual sample rate and channels */ - _saudio.sample_rate = rate; - _saudio.bytes_per_frame = _saudio.num_channels * sizeof(float); + _saudio.sample_rate = (int)rate; + _saudio.bytes_per_frame = _saudio.num_channels * (int)sizeof(float); /* allocate the streaming buffer */ _saudio.backend.buffer_byte_size = _saudio.buffer_frames * _saudio.bytes_per_frame; _saudio.backend.buffer_frames = _saudio.buffer_frames; - _saudio.backend.buffer = (float*) SOKOL_MALLOC(_saudio.backend.buffer_byte_size); - memset(_saudio.backend.buffer, 0, _saudio.backend.buffer_byte_size); + _saudio.backend.buffer = (float*) SOKOL_MALLOC((size_t)_saudio.backend.buffer_byte_size); + memset(_saudio.backend.buffer, 0, (size_t)_saudio.backend.buffer_byte_size); /* create the buffer-streaming start thread */ if (0 != pthread_create(&_saudio.backend.thread, 0, _saudio_alsa_cb, 0)) { @@ -1190,12 +1191,12 @@ _SOKOL_PRIVATE void _saudio_wasapi_fill_buffer(void) { else { if (0 == _saudio_fifo_read(&_saudio.fifo, (uint8_t*)_saudio.backend.thread.src_buffer, _saudio.backend.thread.src_buffer_byte_size)) { /* not enough read data available, fill the entire buffer with silence */ - memset(_saudio.backend.thread.src_buffer, 0, _saudio.backend.thread.src_buffer_byte_size); + memset(_saudio.backend.thread.src_buffer, 0, (size_t)_saudio.backend.thread.src_buffer_byte_size); } } } -_SOKOL_PRIVATE void _saudio_wasapi_submit_buffer(UINT32 num_frames) { +_SOKOL_PRIVATE void _saudio_wasapi_submit_buffer(int num_frames) { BYTE* wasapi_buffer = 0; if (FAILED(IAudioRenderClient_GetBuffer(_saudio.backend.render_client, num_frames, &wasapi_buffer))) { return; @@ -1205,8 +1206,8 @@ _SOKOL_PRIVATE void _saudio_wasapi_submit_buffer(UINT32 num_frames) { /* convert float samples to int16_t, refill float buffer if needed */ const int num_samples = num_frames * _saudio.num_channels; int16_t* dst = (int16_t*) wasapi_buffer; - uint32_t buffer_pos = _saudio.backend.thread.src_buffer_pos; - const uint32_t buffer_float_size = _saudio.backend.thread.src_buffer_byte_size / sizeof(float); + int buffer_pos = _saudio.backend.thread.src_buffer_pos; + const int buffer_float_size = _saudio.backend.thread.src_buffer_byte_size / (int)sizeof(float); float* src = _saudio.backend.thread.src_buffer; for (int i = 0; i < num_samples; i++) { if (0 == buffer_pos) { @@ -1234,7 +1235,7 @@ _SOKOL_PRIVATE DWORD WINAPI _saudio_wasapi_thread_fn(LPVOID param) { continue; } SOKOL_ASSERT(_saudio.backend.thread.dst_buffer_frames >= padding); - UINT32 num_frames = _saudio.backend.thread.dst_buffer_frames - padding; + int num_frames = (int)_saudio.backend.thread.dst_buffer_frames - (int)padding; if (num_frames > 0) { _saudio_wasapi_submit_buffer(num_frames); } @@ -1359,8 +1360,8 @@ _SOKOL_PRIVATE bool _saudio_backend_init(void) { #endif WAVEFORMATEX fmt; memset(&fmt, 0, sizeof(fmt)); - fmt.nChannels = (WORD) _saudio.num_channels; - fmt.nSamplesPerSec = _saudio.sample_rate; + fmt.nChannels = (WORD)_saudio.num_channels; + fmt.nSamplesPerSec = (DWORD)_saudio.sample_rate; fmt.wFormatTag = WAVE_FORMAT_PCM; fmt.wBitsPerSample = 16; fmt.nBlockAlign = (fmt.nChannels * fmt.wBitsPerSample) / 8; @@ -1390,13 +1391,13 @@ _SOKOL_PRIVATE bool _saudio_backend_init(void) { SOKOL_LOG("sokol_audio wasapi: audio client SetEventHandle failed"); goto error; } - _saudio.backend.si16_bytes_per_frame = _saudio.num_channels * sizeof(int16_t); - _saudio.bytes_per_frame = _saudio.num_channels * sizeof(float); + _saudio.backend.si16_bytes_per_frame = _saudio.num_channels * (int)sizeof(int16_t); + _saudio.bytes_per_frame = _saudio.num_channels * (int)sizeof(float); _saudio.backend.thread.src_buffer_frames = _saudio.buffer_frames; _saudio.backend.thread.src_buffer_byte_size = _saudio.backend.thread.src_buffer_frames * _saudio.bytes_per_frame; /* allocate an intermediate buffer for sample format conversion */ - _saudio.backend.thread.src_buffer = (float*) SOKOL_MALLOC(_saudio.backend.thread.src_buffer_byte_size); + _saudio.backend.thread.src_buffer = (float*) SOKOL_MALLOC((size_t)_saudio.backend.thread.src_buffer_byte_size); SOKOL_ASSERT(_saudio.backend.thread.src_buffer); /* create streaming thread */ @@ -1446,7 +1447,7 @@ EMSCRIPTEN_KEEPALIVE int _saudio_emsc_pull(int num_frames) { const int num_bytes = num_frames * _saudio.bytes_per_frame; if (0 == _saudio_fifo_read(&_saudio.fifo, _saudio.backend.buffer, num_bytes)) { /* not enough read data available, fill the entire buffer with silence */ - memset(_saudio.backend.buffer, 0, num_bytes); + memset(_saudio.backend.buffer, 0, (size_t)num_bytes); } } int res = (int) _saudio.backend.buffer; @@ -1551,10 +1552,10 @@ EM_JS(int, saudio_js_buffer_frames, (void), { _SOKOL_PRIVATE bool _saudio_backend_init(void) { if (saudio_js_init(_saudio.sample_rate, _saudio.num_channels, _saudio.buffer_frames)) { - _saudio.bytes_per_frame = sizeof(float) * _saudio.num_channels; + _saudio.bytes_per_frame = (int)sizeof(float) * _saudio.num_channels; _saudio.sample_rate = saudio_js_sample_rate(); _saudio.buffer_frames = saudio_js_buffer_frames(); - const int buf_size = _saudio.buffer_frames * _saudio.bytes_per_frame; + const size_t buf_size = (size_t) (_saudio.buffer_frames * _saudio.bytes_per_frame); _saudio.backend.buffer = (uint8_t*) SOKOL_MALLOC(buf_size); return true; } @@ -1637,10 +1638,10 @@ _SOKOL_PRIVATE void _saudio_opensles_fill_buffer(void) { _saudio_stream_callback(_saudio.backend.src_buffer, src_buffer_frames, _saudio.num_channels); } else { - const int src_buffer_byte_size = src_buffer_frames * _saudio.num_channels * sizeof(float); + const int src_buffer_byte_size = src_buffer_frames * _saudio.num_channels * (int)sizeof(float); if (0 == _saudio_fifo_read(&_saudio.fifo, (uint8_t*)_saudio.backend.src_buffer, src_buffer_byte_size)) { /* not enough read data available, fill the entire buffer with silence */ - memset(_saudio.backend.src_buffer, 0x0, src_buffer_byte_size); + memset(_saudio.backend.src_buffer, 0x0, (size_t)src_buffer_byte_size); } } } @@ -1662,8 +1663,8 @@ _SOKOL_PRIVATE void* _saudio_opensles_thread_fn(void* param) { int16_t* next_buffer = _saudio.backend.output_buffers[_saudio.backend.active_buffer]; /* queue this buffer */ - const int buffer_size_bytes = _saudio.buffer_frames * _saudio.num_channels * sizeof(short); - (*_saudio.backend.player_buffer_queue)->Enqueue(_saudio.backend.player_buffer_queue, out_buffer, buffer_size_bytes); + const int buffer_size_bytes = _saudio.buffer_frames * _saudio.num_channels * (int)sizeof(short); + (*_saudio.backend.player_buffer_queue)->Enqueue(_saudio.backend.player_buffer_queue, out_buffer, (SLuint32)buffer_size_bytes); /* fill the next buffer */ _saudio_opensles_fill_buffer(); @@ -1701,23 +1702,22 @@ _SOKOL_PRIVATE void _saudio_backend_shutdown(void) { } _SOKOL_PRIVATE bool _saudio_backend_init(void) { - _saudio.bytes_per_frame = sizeof(float) * _saudio.num_channels; + _saudio.bytes_per_frame = (int)sizeof(float) * _saudio.num_channels; for (int i = 0; i < SAUDIO_NUM_BUFFERS; ++i) { - const int buffer_size_bytes = sizeof(int16_t) * _saudio.num_channels * _saudio.buffer_frames; - _saudio.backend.output_buffers[i] = (int16_t*) SOKOL_MALLOC(buffer_size_bytes); + const int buffer_size_bytes = (int)sizeof(int16_t) * _saudio.num_channels * _saudio.buffer_frames; + _saudio.backend.output_buffers[i] = (int16_t*) SOKOL_MALLOC((size_t)buffer_size_bytes); SOKOL_ASSERT(_saudio.backend.output_buffers[i]); - memset(_saudio.backend.output_buffers[i], 0x0, buffer_size_bytes); + memset(_saudio.backend.output_buffers[i], 0x0, (size_t)buffer_size_bytes); } { const int buffer_size_bytes = _saudio.bytes_per_frame * _saudio.buffer_frames; - _saudio.backend.src_buffer = (float*) SOKOL_MALLOC(buffer_size_bytes); + _saudio.backend.src_buffer = (float*) SOKOL_MALLOC((size_t)buffer_size_bytes); SOKOL_ASSERT(_saudio.backend.src_buffer); - memset(_saudio.backend.src_buffer, 0x0, buffer_size_bytes); + memset(_saudio.backend.src_buffer, 0x0, (size_t)buffer_size_bytes); } - /* Create engine */ const SLEngineOption opts[] = { SL_ENGINEOPTION_THREADSAFE, SL_BOOLEAN_TRUE }; if (slCreateEngine(&_saudio.backend.engine_obj, 1, opts, 0, NULL, NULL ) != SL_RESULT_SUCCESS) { @@ -1758,8 +1758,8 @@ _SOKOL_PRIVATE bool _saudio_backend_init(void) { /* data format */ SLDataFormat_PCM format; format.formatType = SL_DATAFORMAT_PCM; - format.numChannels = _saudio.num_channels; - format.samplesPerSec = _saudio.sample_rate * 1000; + format.numChannels = (SLuint32)_saudio.num_channels; + format.samplesPerSec = (SLuint32) (_saudio.sample_rate * 1000); format.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16; format.containerSize = 16; format.endianness = SL_BYTEORDER_LITTLEENDIAN; @@ -1798,8 +1798,8 @@ _SOKOL_PRIVATE bool _saudio_backend_init(void) { /* begin */ { - const int buffer_size_bytes = sizeof(int16_t) * _saudio.num_channels * _saudio.buffer_frames; - (*_saudio.backend.player_buffer_queue)->Enqueue(_saudio.backend.player_buffer_queue, _saudio.backend.output_buffers[0], buffer_size_bytes); + const int buffer_size_bytes = (int)sizeof(int16_t) * _saudio.num_channels * _saudio.buffer_frames; + (*_saudio.backend.player_buffer_queue)->Enqueue(_saudio.backend.player_buffer_queue, _saudio.backend.output_buffers[0], (SLuint32)buffer_size_bytes); _saudio.backend.active_buffer = (_saudio.backend.active_buffer + 1) % SAUDIO_NUM_BUFFERS; (*_saudio.backend.player)->RegisterCallback(_saudio.backend.player, _saudio_opensles_play_cb, NULL); diff --git a/sokol_fetch.h b/sokol_fetch.h index 1a8ac54b5..a8c763157 100644 --- a/sokol_fetch.h +++ b/sokol_fetch.h @@ -1475,7 +1475,7 @@ _SOKOL_PRIVATE uint32_t _sfetch_file_size(_sfetch_file_handle_t h) { } _SOKOL_PRIVATE bool _sfetch_file_read(_sfetch_file_handle_t h, uint32_t offset, uint32_t num_bytes, void* ptr) { - fseek(h, offset, SEEK_SET); + fseek(h, (long)offset, SEEK_SET); return num_bytes == fread(ptr, 1, num_bytes, h); } @@ -1615,8 +1615,8 @@ _SOKOL_PRIVATE void _sfetch_thread_dequeue_outgoing(_sfetch_thread_t* thread, _s #if _SFETCH_PLATFORM_WINDOWS _SOKOL_PRIVATE bool _sfetch_win32_utf8_to_wide(const char* src, wchar_t* dst, int dst_num_bytes) { SOKOL_ASSERT(src && dst && (dst_num_bytes > 1)); - memset(dst, 0, dst_num_bytes); - const int dst_chars = dst_num_bytes / sizeof(wchar_t); + memset(dst, 0, (size_t)dst_num_bytes); + const int dst_chars = dst_num_bytes / (int)sizeof(wchar_t); const int dst_needed = MultiByteToWideChar(CP_UTF8, 0, src, -1, 0, 0); if ((dst_needed > 0) && (dst_needed < dst_chars)) { MultiByteToWideChar(CP_UTF8, 0, src, -1, dst, dst_chars); @@ -1935,7 +1935,7 @@ EM_JS(void, sfetch_js_send_head_request, (uint32_t slot_id, const char* path_cst }); /* if bytes_to_read != 0, a range-request will be sent, otherwise a normal request */ -EM_JS(void, sfetch_js_send_get_request, (uint32_t slot_id, const char* path_cstr, int offset, int bytes_to_read, void* buf_ptr, int buf_size), { +EM_JS(void, sfetch_js_send_get_request, (uint32_t slot_id, const char* path_cstr, uint32_t offset, uint32_t bytes_to_read, void* buf_ptr, uint32_t buf_size), { var path_str = UTF8ToString(path_cstr); var req = new XMLHttpRequest(); req.open('GET', path_str); diff --git a/sokol_gfx.h b/sokol_gfx.h index 139c3aebd..a1e369bd3 100644 --- a/sokol_gfx.h +++ b/sokol_gfx.h @@ -7,6 +7,8 @@ Project URL: https://github.com/floooh/sokol + Example code: https://github.com/floooh/sokol-samples + Do this: #define SOKOL_IMPL or #define SOKOL_GFX_IMPL @@ -75,16 +77,9 @@ on how the window and 3D-API context/device was created - provide a unified shader language, instead 3D-API-specific shader - source-code or shader-bytecode must be provided - - For complete code examples using the various backend 3D-APIs, see: - - https://github.com/floooh/sokol-samples - - For an optional shader-cross-compile solution, see: - - https://github.com/floooh/sokol-tools/blob/master/docs/sokol-shdc.md - + source-code or shader-bytecode must be provided (for the "official" + offline shader cross-compiler, see here: + https://github.com/floooh/sokol-tools/blob/master/docs/sokol-shdc.md) STEP BY STEP ============ @@ -106,6 +101,12 @@ sg_begin_default_pass(const sg_pass_action* actions, int width, int height) + ...or alternatively with: + + sg_begin_default_passf(const sg_pass_action* actions, float width, float height) + + ...which takes the framebuffer width and height as float values. + --- or start rendering to an offscreen framebuffer with: sg_begin_pass(sg_pass pass, const sg_pass_action* actions) @@ -131,10 +132,15 @@ sg_draw(int base_element, int num_elements, int num_instances) - In the case of no instancing: num_instances should be set to 1 and base_element/num_elements are - amounts of vertices. In the case of instancing (meaning num_instances > 1), num elements is the - number of vertices in one instance, while base_element remains unchanged. base_element is the index - of the first vertex to begin drawing from. + The sg_draw() function unifies all the different ways to render primitives + in a single call (indexed vs non-indexed rendering, and instanced vs non-instanced + rendering). In case of indexed rendering, base_element and num_element specify + indices in the currently bound index buffer. In case of non-indexed rendering + base_element and num_elements specify vertices in the currently bound + vertex-buffer(s). To perform instanced rendering, the rendering pipeline + must be setup for instancing (see sg_pipeline_desc below), a separate vertex buffer + containing per-instance data must be bound, and the num_instances parameter + must be > 1. --- finish the current rendering pass with: @@ -160,28 +166,36 @@ sg_apply_viewport(int x, int y, int width, int height, bool origin_top_left) + ...or if you want to specifiy the viewport rectangle with float values: + + sg_apply_viewportf(float x, float y, float width, float height, bool origin_top_left) + --- to set a new scissor rect, call: sg_apply_scissor_rect(int x, int y, int width, int height, bool origin_top_left) - both sg_apply_viewport() and sg_apply_scissor_rect() must be called + ...or with float values: + + sg_apply_scissor_rectf(float x, float y, float width, float height, bool origin_top_left) + + Both sg_apply_viewport() and sg_apply_scissor_rect() must be called inside a rendering pass - beginning a pass will reset the viewport to the size of the framebuffer used - in the new pass, + Note that sg_begin_default_pass() and sg_begin_pass() will reset both the + viewport and scissor rectangles to cover the entire framebuffer. --- to update (overwrite) the content of buffer and image resources, call: - sg_update_buffer(sg_buffer buf, const void* ptr, int num_bytes) - sg_update_image(sg_image img, const sg_image_content* content) + sg_update_buffer(sg_buffer buf, const sg_range* data) + sg_update_image(sg_image img, const sg_image_data* data) Buffers and images to be updated must have been created with SG_USAGE_DYNAMIC or SG_USAGE_STREAM - Only one update per frame is allowed for buffer and image resources. - The rationale is to have a simple countermeasure to avoid the CPU - scribbling over data the GPU is currently using, or the CPU having to - wait for the GPU + Only one update per frame is allowed for buffer and image resources when + using the sg_update_*() functions. The rationale is to have a simple + countermeasure to avoid the CPU scribbling over data the GPU is currently + using, or the CPU having to wait for the GPU Buffer and image updates can be partial, as long as a rendering operation only references the valid (updated) data in the @@ -189,7 +203,7 @@ --- to append a chunk of data to a buffer resource, call: - int sg_append_buffer(sg_buffer buf, const void* ptr, int num_bytes) + int sg_append_buffer(sg_buffer buf, const sg_range* data) The difference to sg_update_buffer() is that sg_append_buffer() can be called multiple times per frame to append new data to the @@ -206,7 +220,7 @@ for (...) { const void* data = ...; const int num_bytes = ...; - int offset = sg_append_buffer(buf, data, num_bytes); + int offset = sg_append_buffer(buf, &(sg_range) { .ptr=data, .size=num_bytes }); bindings.vertex_buffer_offsets[0] = offset; sg_apply_pipeline(pip); sg_apply_bindings(&bindings); @@ -565,6 +579,7 @@ distribution. */ #define SOKOL_GFX_INCLUDED (1) +#include // size_t #include #include @@ -615,11 +630,30 @@ typedef struct sg_pass { uint32_t id; } sg_pass; typedef struct sg_context { uint32_t id; } sg_context; /* - various compile-time constants - - FIXME: it may make sense to convert some of those into defines so - that the user code can override them. + sg_range is a pointer-size-pair struct used to pass memory blobs into + sokol-gfx. When initialized from a value type (array or struct), you can + use the SG_RANGE() macro to build an sg_range struct. For functions which + take either a sg_range pointer, or a (C++) sg_range reference, use the + SG_RANGE_REF macro as a solution which compiles both in C and C++. */ +typedef struct sg_range { + const void* ptr; + size_t size; +} sg_range; + +// disabling this for every includer isn't great, but the warning is also quite pointless +#if defined(_MSC_VER) +#pragma warning(disable:4221) /* /W4 only: nonstandard extension used: 'x': cannot be initialized using address of automatic variable 'y' */ +#endif +#if defined(__cplusplus) +#define SG_RANGE(x) sg_range{ &x, sizeof(x) } +#define SG_RANGE_REF(x) sg_range{ &x, sizeof(x) } +#else +#define SG_RANGE(x) (sg_range){ &x, sizeof(x) } +#define SG_RANGE_REF(x) &(sg_range){ &x, sizeof(x) } +#endif + +// various compile-time constants enum { SG_INVALID_ID = 0, SG_NUM_SHADER_STAGES = 2, @@ -634,16 +668,22 @@ enum { SG_MAX_TEXTUREARRAY_LAYERS = 128 }; +/* + sg_color + + An RGBA color value. +*/ +typedef struct sg_color { float r, g, b, a; } sg_color; + /* sg_backend The active 3D-API backend, use the function sg_query_backend() to get the currently active backend. - The returned value corresponds with the compile-time define to select - a backend, with the only exception of SOKOL_GLES3: this may - return SG_BACKEND_GLES2 if the backend has to fallback to GLES2 mode - because GLES3 isn't supported. + NOTE that SG_BACKEND_GLES2 will be returned if sokol-gfx was + compiled with SOKOL_GLES3, but the runtime platform doesn't support + GLES3/WebGL2 and sokol-gfx had to fallback to GLES2/WebGL. */ typedef enum sg_backend { SG_BACKEND_GLCORE33, @@ -661,9 +701,10 @@ typedef enum sg_backend { sg_pixel_format sokol_gfx.h basically uses the same pixel formats as WebGPU, since these - are supported on most newer GPUs. GLES2 and WebGL has a much smaller - subset of available pixel formats. Call sg_query_pixelformat() to check - at runtime if a pixel format supports the desired features. + are supported on most newer GPUs. GLES2 and WebGL only supports a much + smaller subset of actually available pixel formats. Call + sg_query_pixelformat() to check at runtime if a pixel format supports the + desired features. A pixelformat name consist of three parts: @@ -787,12 +828,12 @@ typedef enum sg_pixel_format { by sg_query_pixelformat(). */ typedef struct sg_pixelformat_info { - bool sample; /* pixel format can be sampled in shaders */ - bool filter; /* pixel format can be sampled with filtering */ - bool render; /* pixel format can be used as render target */ - bool blend; /* alpha-blending is supported */ - bool msaa; /* pixel format can be used as MSAA render target */ - bool depth; /* pixel format is a depth format */ + bool sample; // pixel format can be sampled in shaders + bool filter; // pixel format can be sampled with filtering + bool render; // pixel format can be used as render target + bool blend; // alpha-blending is supported + bool msaa; // pixel format can be used as MSAA render target + bool depth; // pixel format is a depth format #if defined(SOKOL_ZIG_BINDINGS) uint32_t __pad[3]; #endif @@ -803,13 +844,15 @@ typedef struct sg_pixelformat_info { returned by sg_query_features() */ typedef struct sg_features { - bool instancing; /* hardware instancing supported */ - bool origin_top_left; /* framebuffer and texture origin is in top left corner */ - bool multiple_render_targets; /* offscreen render passes can have multiple render targets attached */ - bool msaa_render_targets; /* offscreen render passes support MSAA antialiasing */ - bool imagetype_3d; /* creation of SG_IMAGETYPE_3D images is supported */ - bool imagetype_array; /* creation of SG_IMAGETYPE_ARRAY images is supported */ - bool image_clamp_to_border; /* border color and clamp-to-border UV-wrap mode is supported */ + bool instancing; // hardware instancing supported + bool origin_top_left; // framebuffer and texture origin is in top left corner + bool multiple_render_targets; // offscreen render passes can have multiple render targets attached + bool msaa_render_targets; // offscreen render passes support MSAA antialiasing + bool imagetype_3d; // creation of SG_IMAGETYPE_3D images is supported + bool imagetype_array; // creation of SG_IMAGETYPE_ARRAY images is supported + bool image_clamp_to_border; // border color and clamp-to-border UV-wrap mode is supported + bool mrt_independent_blend_state; // multiple-render-target rendering can use per-render-target blend state + bool mrt_independent_write_mask; // multiple-render-target rendering can use per-render-target color write masks #if defined(SOKOL_ZIG_BINDINGS) uint32_t __pad[3]; #endif @@ -819,12 +862,12 @@ typedef struct sg_features { Runtime information about resource limits, returned by sg_query_limit() */ typedef struct sg_limits { - uint32_t max_image_size_2d; /* max width/height of SG_IMAGETYPE_2D images */ - uint32_t max_image_size_cube; /* max width/height of SG_IMAGETYPE_CUBE images */ - uint32_t max_image_size_3d; /* max width/height/depth of SG_IMAGETYPE_3D images */ - uint32_t max_image_size_array; /* max width/height of SG_IMAGETYPE_ARRAY images */ - uint32_t max_image_array_layers; /* max number of layers in SG_IMAGETYPE_ARRAY images */ - uint32_t max_vertex_attrs; /* <= SG_MAX_VERTEX_ATTRIBUTES (only on some GLES2 impls) */ + int max_image_size_2d; // max width/height of SG_IMAGETYPE_2D images + int max_image_size_cube; // max width/height of SG_IMAGETYPE_CUBE images + int max_image_size_3d; // max width/height/depth of SG_IMAGETYPE_3D images + int max_image_size_array; // max width/height of SG_IMAGETYPE_ARRAY images + int max_image_array_layers; // max number of layers in SG_IMAGETYPE_ARRAY images + int max_vertex_attrs; // <= SG_MAX_VERTEX_ATTRIBUTES (only on some GLES2 impls) } sg_limits; /* @@ -1169,7 +1212,7 @@ typedef enum sg_uniform_type { sg_cull_mode The face-culling mode, this is used in the - sg_pipeline_desc.rasterizer.cull_mode member when creating a + sg_pipeline_desc.cull_mode member when creating a pipeline object. The default cull mode is SG_CULLMODE_NONE @@ -1187,7 +1230,7 @@ typedef enum sg_cull_mode { sg_face_winding The vertex-winding rule that determines a front-facing primitive. This - is used in the member sg_pipeline_desc.rasterizer.face_winding + is used in the member sg_pipeline_desc.face_winding when creating a pipeline object. The default winding is SG_FACEWINDING_CW (clockwise) @@ -1207,10 +1250,11 @@ typedef enum sg_face_winding { This is used when creating pipeline objects in the members: sg_pipeline_desc - .depth_stencil - .depth_compare_func - .stencil_front.compare_func - .stencil_back.compare_func + .depth + .compare + .stencil + .front.compare + .back.compar The default compare func for depth- and stencil-tests is SG_COMPAREFUNC_ALWAYS. @@ -1237,12 +1281,12 @@ typedef enum sg_compare_func { object in the members: sg_pipeline_desc - .depth_stencil - .stencil_front + .stencil + .front .fail_op .depth_fail_op .pass_op - .stencil_back + .back .fail_op .depth_fail_op .pass_op @@ -1270,11 +1314,12 @@ typedef enum sg_stencil_op { This is used in the following members when creating a pipeline object: sg_pipeline_desc - .blend - .src_factor_rgb - .dst_factor_rgb - .src_factor_alpha - .dst_factor_alpha + .colors[i] + .blend + .src_factor_rgb + .dst_factor_rgb + .src_factor_alpha + .dst_factor_alpha The default value is SG_BLENDFACTOR_ONE for source factors, and SG_BLENDFACTOR_ZERO for destination factors. @@ -1308,9 +1353,10 @@ typedef enum sg_blend_factor { creating a pipeline object: sg_pipeline_desc - .blend - .op_rgb - .op_alpha + .colors[i] + .blend + .op_rgb + .op_alpha The default value is SG_BLENDOP_ADD. */ @@ -1326,9 +1372,9 @@ typedef enum sg_blend_op { /* sg_color_mask - Selects the color channels when writing a fragment color to the + Selects the active color channels when writing a fragment color to the framebuffer. This is used in the members - sg_pipeline_desc.blend.color_write_mask when creating a pipeline object. + sg_pipeline_desc.colors[i].write_mask when creating a pipeline object. The default colormask is SG_COLORMASK_RGBA (write all colors channels) @@ -1337,14 +1383,23 @@ typedef enum sg_blend_op { should be disabled. */ typedef enum sg_color_mask { - _SG_COLORMASK_DEFAULT = 0, /* value 0 reserved for default-init */ - SG_COLORMASK_NONE = 0x10, /* special value for 'all channels disabled */ - SG_COLORMASK_R = 0x1, - SG_COLORMASK_G = 0x2, - SG_COLORMASK_B = 0x4, - SG_COLORMASK_A = 0x8, - SG_COLORMASK_RGB = 0x7, - SG_COLORMASK_RGBA = 0xF, + _SG_COLORMASK_DEFAULT = 0, /* value 0 reserved for default-init */ + SG_COLORMASK_NONE = 0x10, /* special value for 'all channels disabled */ + SG_COLORMASK_R = 0x1, + SG_COLORMASK_G = 0x2, + SG_COLORMASK_RG = 0x3, + SG_COLORMASK_B = 0x4, + SG_COLORMASK_RB = 0x5, + SG_COLORMASK_GB = 0x6, + SG_COLORMASK_RGB = 0x7, + SG_COLORMASK_A = 0x8, + SG_COLORMASK_RA = 0x9, + SG_COLORMASK_GA = 0xA, + SG_COLORMASK_RGA = 0xB, + SG_COLORMASK_BA = 0xC, + SG_COLORMASK_RBA = 0xD, + SG_COLORMASK_GBA = 0xE, + SG_COLORMASK_RGBA = 0xF, _SG_COLORMASK_FORCE_U32 = 0x7FFFFFFF } sg_color_mask; @@ -1396,17 +1451,17 @@ typedef enum sg_action { */ typedef struct sg_color_attachment_action { sg_action action; - float val[4]; + sg_color value; } sg_color_attachment_action; typedef struct sg_depth_attachment_action { sg_action action; - float val; + float value; } sg_depth_attachment_action; typedef struct sg_stencil_attachment_action { sg_action action; - uint8_t val; + uint8_t value; } sg_stencil_attachment_action; typedef struct sg_pass_action { @@ -1459,16 +1514,28 @@ typedef struct sg_bindings { The default configuration is: - .size: 0 (this *must* be set to a valid size in bytes) + .size: 0 (*must* be >0 for buffers without data) .type: SG_BUFFERTYPE_VERTEXBUFFER .usage: SG_USAGE_IMMUTABLE - .content 0 + .data.ptr 0 (*must* be valid for immutable buffers) + .data.size 0 (*must* be > 0 for immutable buffers) .label 0 (optional string label for trace hooks) The label will be ignored by sokol_gfx.h, it is only useful when hooking into sg_make_buffer() or sg_init_buffer() via the sg_install_trace_hooks() function. + For immutable buffers which are initialized with initial data, + keep the .size item zero-initialized, and set the size together with the + pointer to the initial data in the .data item. + + For mutable buffers without initial data, keep the .data item + zero-initialized, and set the buffer size in the .size item instead. + + You can also set both size values, but currently both size values must + be identical (this may change in the future when the dynamic resource + management may become more flexible). + ADVANCED TOPIC: Injecting native 3D-API buffers: The following struct members allow to inject your own GL, Metal @@ -1478,7 +1545,7 @@ typedef struct sg_bindings { .mtl_buffers[SG_NUM_INFLIGHT_FRAMES] .d3d11_buffer - You must still provide all other members except the .content member, and + You must still provide all other struct items except the .data item, and these must match the creation parameters of the native buffers you provide. For SG_USAGE_IMMUTABLE, only provide a single native 3D-API buffer, otherwise you need to provide SG_NUM_INFLIGHT_FRAMES buffers @@ -1494,10 +1561,10 @@ typedef struct sg_bindings { */ typedef struct sg_buffer_desc { uint32_t _start_canary; - int size; + size_t size; sg_buffer_type type; sg_usage usage; - const void* content; + sg_range data; const char* label; /* GL specific */ uint32_t gl_buffers[SG_NUM_INFLIGHT_FRAMES]; @@ -1511,39 +1578,21 @@ typedef struct sg_buffer_desc { } sg_buffer_desc; /* - sg_subimage_content - - Pointer to and size of a subimage-surface data, this is - used to describe the initial content of immutable-usage images, - or for updating a dynamic- or stream-usage images. - - For 3D- or array-textures, one sg_subimage_content item - describes an entire mipmap level consisting of all array- or - 3D-slices of the mipmap level. It is only possible to update - an entire mipmap level, not parts of it. -*/ -typedef struct sg_subimage_content { - const void* ptr; /* pointer to subimage data */ - int size; /* size in bytes of pointed-to subimage data */ -} sg_subimage_content; - -/* - sg_image_content + sg_image_data - Defines the content of an image through a 2D array - of sg_subimage_content structs. The first array dimension - is the cubemap face, and the second array dimension the - mipmap level. + Defines the content of an image through a 2D array of sg_range structs. + The first array dimension is the cubemap face, and the second array + dimension the mipmap level. */ -typedef struct sg_image_content { - sg_subimage_content subimage[SG_CUBEFACE_NUM][SG_MAX_MIPMAPS]; -} sg_image_content; +typedef struct sg_image_data { + sg_range subimage[SG_CUBEFACE_NUM][SG_MAX_MIPMAPS]; +} sg_image_data; /* sg_image_desc - Creation parameters for sg_image objects, used in the - sg_make_image() call. + Creation parameters for sg_image objects, used in the sg_make_image() + call. The default configuration is: @@ -1555,7 +1604,7 @@ typedef struct sg_image_content { .num_mipmaps: 1 .usage: SG_USAGE_IMMUTABLE .pixel_format: SG_PIXELFORMAT_RGBA8 for textures, or sg_desc.context.color_format for render targets - .sample_count: 1 for textures, or sg_desc.context.sample_count for render target + .sample_count: 1 for textures, or sg_desc.context.sample_count for render targets .min_filter: SG_FILTER_NEAREST .mag_filter: SG_FILTER_NEAREST .wrap_u: SG_WRAP_REPEAT @@ -1565,7 +1614,7 @@ typedef struct sg_image_content { .max_anisotropy 1 (must be 1..16) .min_lod 0.0f .max_lod FLT_MAX - .content an sg_image_content struct to define the initial content + .data an sg_image_data struct to define the initial content .label 0 (optional string label for trace hooks) Q: Why is the default sample_count for render targets identical with the @@ -1579,37 +1628,35 @@ typedef struct sg_image_content { NOTE: - SG_IMAGETYPE_ARRAY and SG_IMAGETYPE_3D are not supported on - WebGL/GLES2, use sg_query_features().imagetype_array and - sg_query_features().imagetype_3d at runtime to check - if array- and 3D-textures are supported. + SG_IMAGETYPE_ARRAY and SG_IMAGETYPE_3D are not supported on WebGL/GLES2, + use sg_query_features().imagetype_array and + sg_query_features().imagetype_3d at runtime to check if array- and + 3D-textures are supported. Images with usage SG_USAGE_IMMUTABLE must be fully initialized by - providing a valid .content member which points to - initialization data. + providing a valid .data member which points to initialization data. ADVANCED TOPIC: Injecting native 3D-API textures: - The following struct members allow to inject your own GL, Metal - or D3D11 textures into sokol_gfx: + The following struct members allow to inject your own GL, Metal or D3D11 + textures into sokol_gfx: .gl_textures[SG_NUM_INFLIGHT_FRAMES] .mtl_textures[SG_NUM_INFLIGHT_FRAMES] .d3d11_texture .d3d11_shader_resource_view - For GL, you can also specify the texture target or leave it empty - to use the default texture target for the image type (GL_TEXTURE_2D - for SG_IMAGETYPE_2D etc) + For GL, you can also specify the texture target or leave it empty to use + the default texture target for the image type (GL_TEXTURE_2D for + SG_IMAGETYPE_2D etc) For D3D11, you can provide either a D3D11 texture, or a - shader-resource-view, or both. If only a texture is provided, - a matching shader-resource-view will be created. If only a - shader-resource-view is provided, the texture will be looked - up from the shader-resource-view. + shader-resource-view, or both. If only a texture is provided, a matching + shader-resource-view will be created. If only a shader-resource-view is + provided, the texture will be looked up from the shader-resource-view. - The same rules apply as for injecting native buffers - (see sg_buffer_desc documentation for more details). + The same rules apply as for injecting native buffers (see sg_buffer_desc + documentation for more details). */ typedef struct sg_image_desc { uint32_t _start_canary; @@ -1631,7 +1678,7 @@ typedef struct sg_image_desc { uint32_t max_anisotropy; float min_lod; float max_lod; - sg_image_content content; + sg_image_data data; const char* label; /* GL specific */ uint32_t gl_textures[SG_NUM_INFLIGHT_FRAMES]; @@ -1649,8 +1696,8 @@ typedef struct sg_image_desc { /* sg_shader_desc - The structure sg_shader_desc defines all creation parameters - for shader programs, used as input to the sg_make_shader() function: + The structure sg_shader_desc defines all creation parameters for shader + programs, used as input to the sg_make_shader() function: - reflection information for vertex attributes (vertex shader inputs): - vertex attribute name (required for GLES2, optional for GLES3 and GL) @@ -1681,9 +1728,9 @@ typedef struct sg_image_desc { vertex shader stage and "ps_4_0" for the pixel shader stage. */ typedef struct sg_shader_attr_desc { - const char* name; /* GLSL vertex attribute name (only required for GLES2) */ - const char* sem_name; /* HLSL semantic name */ - int sem_index; /* HLSL semantic index */ + const char* name; // GLSL vertex attribute name (only strictly required for GLES2) + const char* sem_name; // HLSL semantic name + int sem_index; // HLSL semantic index } sg_shader_attr_desc; typedef struct sg_shader_uniform_desc { @@ -1693,20 +1740,19 @@ typedef struct sg_shader_uniform_desc { } sg_shader_uniform_desc; typedef struct sg_shader_uniform_block_desc { - int size; + size_t size; sg_shader_uniform_desc uniforms[SG_MAX_UB_MEMBERS]; } sg_shader_uniform_block_desc; typedef struct sg_shader_image_desc { const char* name; - sg_image_type type; /* FIXME: should this be renamed to 'image_type'? */ + sg_image_type image_type; sg_sampler_type sampler_type; } sg_shader_image_desc; typedef struct sg_shader_stage_desc { const char* source; - const uint8_t* byte_code; - int byte_code_size; + sg_range bytecode; const char* entry; const char* d3d11_target; sg_shader_uniform_block_desc uniform_blocks[SG_MAX_SHADERSTAGE_UBS]; @@ -1725,27 +1771,25 @@ typedef struct sg_shader_desc { /* sg_pipeline_desc - The sg_pipeline_desc struct defines all creation parameters - for an sg_pipeline object, used as argument to the - sg_make_pipeline() function: + The sg_pipeline_desc struct defines all creation parameters for an + sg_pipeline object, used as argument to the sg_make_pipeline() function: - the vertex layout for all input vertex buffers - a shader object - the 3D primitive type (points, lines, triangles, ...) - the index type (none, 16- or 32-bit) - - depth-stencil state - - alpha-blending state - - rasterizer state + - all the fixed-function-pipeline state (depth-, stencil-, blend-state, etc...) If the vertex data has no gaps between vertex components, you can omit the .layout.buffers[].stride and layout.attrs[].offset items (leave them - default-initialized to 0), sokol-gfx will then compute the offsets and strides - from the vertex component formats (.layout.attrs[].format). Please note - that ALL vertex attribute offsets must be 0 in order for the + default-initialized to 0), sokol-gfx will then compute the offsets and + strides from the vertex component formats (.layout.attrs[].format). + Please note that ALL vertex attribute offsets must be 0 in order for the automatic offset computation to kick in. The default configuration is as follows: + .shader: 0 (must be initialized with a valid sg_shader id!) .layout: .buffers[]: vertex buffer layouts .stride: 0 (if no stride is given it will be computed) @@ -1755,54 +1799,60 @@ typedef struct sg_shader_desc { .buffer_index 0 the vertex buffer bind slot .offset 0 (offsets can be omitted if the vertex layout has no gaps) .format SG_VERTEXFORMAT_INVALID (must be initialized!) - .shader: 0 (must be initialized with a valid sg_shader id!) - .primitive_type: SG_PRIMITIVETYPE_TRIANGLES - .index_type: SG_INDEXTYPE_NONE - .depth_stencil: - .stencil_front, .stencil_back: - .fail_op: SG_STENCILOP_KEEP - .depth_fail_op: SG_STENCILOP_KEEP - .pass_op: SG_STENCILOP_KEEP - .compare_func SG_COMPAREFUNC_ALWAYS - .depth_compare_func: SG_COMPAREFUNC_ALWAYS - .depth_write_enabled: false - .stencil_enabled: false - .stencil_read_mask: 0 - .stencil_write_mask: 0 - .stencil_ref: 0 - .blend: - .enabled: false - .src_factor_rgb: SG_BLENDFACTOR_ONE - .dst_factor_rgb: SG_BLENDFACTOR_ZERO - .op_rgb: SG_BLENDOP_ADD - .src_factor_alpha: SG_BLENDFACTOR_ONE - .dst_factor_alpha: SG_BLENDFACTOR_ZERO - .op_alpha: SG_BLENDOP_ADD - .color_write_mask: SG_COLORMASK_RGBA - .color_attachment_count 1 - .color_format SG_PIXELFORMAT_RGBA8 - .depth_format SG_PIXELFORMAT_DEPTHSTENCIL - .blend_color: { 0.0f, 0.0f, 0.0f, 0.0f } - .rasterizer: - .alpha_to_coverage_enabled: false - .cull_mode: SG_CULLMODE_NONE - .face_winding: SG_FACEWINDING_CW - .sample_count: sg_desc.context.sample_count - .depth_bias: 0.0f - .depth_bias_slope_scale: 0.0f - .depth_bias_clamp: 0.0f + .depth: + .pixel_format: sg_desc.context.depth_format + .compare: SG_COMPAREFUNC_ALWAYS + .write_enabled: false + .bias: 0.0f + .bias_slope_scale: 0.0f + .bias_clamp: 0.0f + .stencil: + .enabled: false + .front/back: + .compare: SG_COMPAREFUNC_ALWAYS + .depth_fail_op: SG_STENCILOP_KEEP + .pass_op: SG_STENCILOP_KEEP + .compare: SG_COMPAREFUNC_ALWAYS + .read_mask: 0 + .write_mask: 0 + .ref: 0 + .color_count 1 + .colors[0..color_count] + .pixel_format sg_desc.context.color_format + .write_mask: SG_COLORMASK_RGBA + .blend: + .enabled: false + .src_factor_rgb: SG_BLENDFACTOR_ONE + .dst_factor_rgb: SG_BLENDFACTOR_ZERO + .op_rgb: SG_BLENDOP_ADD + .src_factor_alpha: SG_BLENDFACTOR_ONE + .dst_factor_alpha: SG_BLENDFACTOR_ZERO + .op_alpha: SG_BLENDOP_ADD + .primitive_type: SG_PRIMITIVETYPE_TRIANGLES + .index_type: SG_INDEXTYPE_NONE + .cull_mode: SG_CULLMODE_NONE + .face_winding: SG_FACEWINDING_CW + .sample_count: sg_desc.context.sample_count + .blend_color: (sg_color) { 0.0f, 0.0f, 0.0f, 0.0f } + .alpha_to_coverage_enabled: false .label 0 (optional string label for trace hooks) */ typedef struct sg_buffer_layout_desc { int stride; sg_vertex_step step_func; int step_rate; + #if defined(SOKOL_ZIG_BINDINGS) + uint32_t __pad[2]; + #endif } sg_buffer_layout_desc; typedef struct sg_vertex_attr_desc { int buffer_index; int offset; sg_vertex_format format; + #if defined(SOKOL_ZIG_BINDINGS) + uint32_t __pad[2]; + #endif } sg_vertex_attr_desc; typedef struct sg_layout_desc { @@ -1810,23 +1860,30 @@ typedef struct sg_layout_desc { sg_vertex_attr_desc attrs[SG_MAX_VERTEX_ATTRIBUTES]; } sg_layout_desc; -typedef struct sg_stencil_state { +typedef struct sg_stencil_face_state { + sg_compare_func compare; sg_stencil_op fail_op; sg_stencil_op depth_fail_op; sg_stencil_op pass_op; - sg_compare_func compare_func; +} sg_stencil_face_state; + +typedef struct sg_stencil_state { + bool enabled; + sg_stencil_face_state front; + sg_stencil_face_state back; + uint8_t read_mask; + uint8_t write_mask; + uint8_t ref; } sg_stencil_state; -typedef struct sg_depth_stencil_state { - sg_stencil_state stencil_front; - sg_stencil_state stencil_back; - sg_compare_func depth_compare_func; - bool depth_write_enabled; - bool stencil_enabled; - uint8_t stencil_read_mask; - uint8_t stencil_write_mask; - uint8_t stencil_ref; -} sg_depth_stencil_state; +typedef struct sg_depth_state { + sg_pixel_format pixel_format; + sg_compare_func compare; + bool write_enabled; + float bias; + float bias_slope_scale; + float bias_clamp; +} sg_depth_state; typedef struct sg_blend_state { bool enabled; @@ -1836,32 +1893,29 @@ typedef struct sg_blend_state { sg_blend_factor src_factor_alpha; sg_blend_factor dst_factor_alpha; sg_blend_op op_alpha; - uint8_t color_write_mask; - int color_attachment_count; - sg_pixel_format color_format; - sg_pixel_format depth_format; - float blend_color[4]; } sg_blend_state; -typedef struct sg_rasterizer_state { - bool alpha_to_coverage_enabled; - sg_cull_mode cull_mode; - sg_face_winding face_winding; - int sample_count; - float depth_bias; - float depth_bias_slope_scale; - float depth_bias_clamp; -} sg_rasterizer_state; +typedef struct sg_color_state { + sg_pixel_format pixel_format; + sg_color_mask write_mask; + sg_blend_state blend; +} sg_color_state; typedef struct sg_pipeline_desc { uint32_t _start_canary; - sg_layout_desc layout; sg_shader shader; + sg_layout_desc layout; + sg_depth_state depth; + sg_stencil_state stencil; + int color_count; + sg_color_state colors[SG_MAX_COLOR_ATTACHMENTS]; sg_primitive_type primitive_type; sg_index_type index_type; - sg_depth_stencil_state depth_stencil; - sg_blend_state blend; - sg_rasterizer_state rasterizer; + sg_cull_mode cull_mode; + sg_face_winding face_winding; + int sample_count; + sg_color blend_color; + bool alpha_to_coverage_enabled; const char* label; uint32_t _end_canary; } sg_pipeline_desc; @@ -1888,16 +1942,16 @@ typedef struct sg_pipeline_desc { In addition, all color-attachment images must have the same pixel format. */ -typedef struct sg_attachment_desc { +typedef struct sg_pass_attachment_desc { sg_image image; int mip_level; int slice; /* cube texture: face; array texture: layer; 3D texture: slice */ -} sg_attachment_desc; +} sg_pass_attachment_desc; typedef struct sg_pass_desc { uint32_t _start_canary; - sg_attachment_desc color_attachments[SG_MAX_COLOR_ATTACHMENTS]; - sg_attachment_desc depth_stencil_attachment; + sg_pass_attachment_desc color_attachments[SG_MAX_COLOR_ATTACHMENTS]; + sg_pass_attachment_desc depth_stencil_attachment; const char* label; uint32_t _end_canary; } sg_pass_desc; @@ -1927,16 +1981,16 @@ typedef struct sg_trace_hooks { void (*destroy_shader)(sg_shader shd, void* user_data); void (*destroy_pipeline)(sg_pipeline pip, void* user_data); void (*destroy_pass)(sg_pass pass, void* user_data); - void (*update_buffer)(sg_buffer buf, const void* data_ptr, int data_size, void* user_data); - void (*update_image)(sg_image img, const sg_image_content* data, void* user_data); - void (*append_buffer)(sg_buffer buf, const void* data_ptr, int data_size, int result, void* user_data); + void (*update_buffer)(sg_buffer buf, const sg_range* data, void* user_data); + void (*update_image)(sg_image img, const sg_image_data* data, void* user_data); + void (*append_buffer)(sg_buffer buf, const sg_range* data, int result, void* user_data); void (*begin_default_pass)(const sg_pass_action* pass_action, int width, int height, void* user_data); void (*begin_pass)(sg_pass pass, const sg_pass_action* pass_action, void* user_data); void (*apply_viewport)(int x, int y, int width, int height, bool origin_top_left, void* user_data); void (*apply_scissor_rect)(int x, int y, int width, int height, bool origin_top_left, void* user_data); void (*apply_pipeline)(sg_pipeline pip, void* user_data); void (*apply_bindings)(const sg_bindings* bindings, void* user_data); - void (*apply_uniforms)(sg_shader_stage stage, int ub_index, const void* data, int num_bytes, void* user_data); + void (*apply_uniforms)(sg_shader_stage stage, int ub_index, const sg_range* data, void* user_data); void (*draw)(int base_element, int num_elements, int num_instances, void* user_data); void (*end_pass)(void* user_data); void (*commit)(void* user_data); @@ -2226,19 +2280,22 @@ SOKOL_GFX_API_DECL void sg_destroy_image(sg_image img); SOKOL_GFX_API_DECL void sg_destroy_shader(sg_shader shd); SOKOL_GFX_API_DECL void sg_destroy_pipeline(sg_pipeline pip); SOKOL_GFX_API_DECL void sg_destroy_pass(sg_pass pass); -SOKOL_GFX_API_DECL void sg_update_buffer(sg_buffer buf, const void* data_ptr, int data_size); -SOKOL_GFX_API_DECL void sg_update_image(sg_image img, const sg_image_content* data); -SOKOL_GFX_API_DECL int sg_append_buffer(sg_buffer buf, const void* data_ptr, int data_size); +SOKOL_GFX_API_DECL void sg_update_buffer(sg_buffer buf, const sg_range* data); +SOKOL_GFX_API_DECL void sg_update_image(sg_image img, const sg_image_data* data); +SOKOL_GFX_API_DECL int sg_append_buffer(sg_buffer buf, const sg_range* data); SOKOL_GFX_API_DECL bool sg_query_buffer_overflow(sg_buffer buf); /* rendering functions */ SOKOL_GFX_API_DECL void sg_begin_default_pass(const sg_pass_action* pass_action, int width, int height); +SOKOL_GFX_API_DECL void sg_begin_default_passf(const sg_pass_action* pass_action, float width, float height); SOKOL_GFX_API_DECL void sg_begin_pass(sg_pass pass, const sg_pass_action* pass_action); SOKOL_GFX_API_DECL void sg_apply_viewport(int x, int y, int width, int height, bool origin_top_left); +SOKOL_GFX_API_DECL void sg_apply_viewportf(float x, float y, float width, float height, bool origin_top_left); SOKOL_GFX_API_DECL void sg_apply_scissor_rect(int x, int y, int width, int height, bool origin_top_left); +SOKOL_GFX_API_DECL void sg_apply_scissor_rectf(float x, float y, float width, float height, bool origin_top_left); SOKOL_GFX_API_DECL void sg_apply_pipeline(sg_pipeline pip); SOKOL_GFX_API_DECL void sg_apply_bindings(const sg_bindings* bindings); -SOKOL_GFX_API_DECL void sg_apply_uniforms(sg_shader_stage stage, int ub_index, const void* data, int num_bytes); +SOKOL_GFX_API_DECL void sg_apply_uniforms(sg_shader_stage stage, int ub_index, const sg_range* data); SOKOL_GFX_API_DECL void sg_draw(int base_element, int num_elements, int num_instances); SOKOL_GFX_API_DECL void sg_end_pass(void); SOKOL_GFX_API_DECL void sg_commit(void); @@ -2326,11 +2383,13 @@ inline sg_image sg_make_image(const sg_image_desc& desc) { return sg_make_image( inline sg_shader sg_make_shader(const sg_shader_desc& desc) { return sg_make_shader(&desc); } inline sg_pipeline sg_make_pipeline(const sg_pipeline_desc& desc) { return sg_make_pipeline(&desc); } inline sg_pass sg_make_pass(const sg_pass_desc& desc) { return sg_make_pass(&desc); } -inline void sg_update_image(sg_image img, const sg_image_content& data) { return sg_update_image(img, &data); } +inline void sg_update_image(sg_image img, const sg_image_data& data) { return sg_update_image(img, &data); } inline void sg_begin_default_pass(const sg_pass_action& pass_action, int width, int height) { return sg_begin_default_pass(&pass_action, width, height); } +inline void sg_begin_default_passf(const sg_pass_action& pass_action, float width, float height) { return sg_begin_default_passf(&pass_action, width, height); } inline void sg_begin_pass(sg_pass pass, const sg_pass_action& pass_action) { return sg_begin_pass(pass, &pass_action); } inline void sg_apply_bindings(const sg_bindings& bindings) { return sg_apply_bindings(&bindings); } +inline void sg_apply_uniforms(sg_shader_stage stage, int ub_index, const sg_range& data) { return sg_apply_uniforms(stage, ub_index, &data); } inline sg_buffer_desc sg_query_buffer_defaults(const sg_buffer_desc& desc) { return sg_query_buffer_defaults(&desc); } inline sg_image_desc sg_query_image_defaults(const sg_image_desc& desc) { return sg_query_image_defaults(&desc); } @@ -2344,6 +2403,8 @@ inline void sg_init_shader(sg_shader shd_id, const sg_shader_desc& desc) { retur inline void sg_init_pipeline(sg_pipeline pip_id, const sg_pipeline_desc& desc) { return sg_init_pipeline(pip_id, &desc); } inline void sg_init_pass(sg_pass pass_id, const sg_pass_desc& desc) { return sg_init_pass(pass_id, &desc); } +inline void sg_update_buffer(sg_buffer buf_id, const sg_range& data) { return sg_update_buffer(buf_id, &data); } +inline int sg_append_buffer(sg_buffer buf_id, const sg_range& data) { return sg_append_buffer(buf_id, &data); } #endif #endif // SOKOL_GFX_INCLUDED @@ -2439,6 +2500,7 @@ inline void sg_init_pass(sg_pass pass_id, const sg_pass_desc& desc) { return sg_ #pragma warning(push) #pragma warning(disable:4115) /* named type definition in parentheses */ #pragma warning(disable:4505) /* unreferenced local function has been removed */ +#pragma warning(disable:4201) /* nonstandard extension used: nameless struct/union (needed by d3d11.h) */ #endif #if defined(SOKOL_GLCORE33) || defined(SOKOL_GLES2) || defined(SOKOL_GLES3) @@ -2651,7 +2713,7 @@ typedef struct { } _sg_buffer_common_t; _SOKOL_PRIVATE void _sg_buffer_common_init(_sg_buffer_common_t* cmn, const sg_buffer_desc* desc) { - cmn->size = desc->size; + cmn->size = (int)desc->size; cmn->append_pos = 0; cmn->append_overflow = false; cmn->type = desc->type; @@ -2707,11 +2769,11 @@ _SOKOL_PRIVATE void _sg_image_common_init(_sg_image_common_t* cmn, const sg_imag } typedef struct { - int size; + size_t size; } _sg_uniform_block_t; typedef struct { - sg_image_type type; + sg_image_type image_type; sg_sampler_type sampler_type; } _sg_shader_image_t; @@ -2742,10 +2804,10 @@ _SOKOL_PRIVATE void _sg_shader_common_init(_sg_shader_common_t* cmn, const sg_sh SOKOL_ASSERT(stage->num_images == 0); for (int img_index = 0; img_index < SG_MAX_SHADERSTAGE_IMAGES; img_index++) { const sg_shader_image_desc* img_desc = &stage_desc->images[img_index]; - if (img_desc->type == _SG_IMAGETYPE_DEFAULT) { + if (img_desc->image_type == _SG_IMAGETYPE_DEFAULT) { break; } - stage->images[img_index].type = img_desc->type; + stage->images[img_index].image_type = img_desc->image_type; stage->images[img_index].sampler_type = img_desc->sampler_type; stage->num_images++; } @@ -2757,48 +2819,49 @@ typedef struct { sg_index_type index_type; bool vertex_layout_valid[SG_MAX_SHADERSTAGE_BUFFERS]; int color_attachment_count; - sg_pixel_format color_format; + sg_pixel_format color_formats[SG_MAX_COLOR_ATTACHMENTS]; sg_pixel_format depth_format; int sample_count; float depth_bias; float depth_bias_slope_scale; float depth_bias_clamp; - float blend_color[4]; + sg_color blend_color; } _sg_pipeline_common_t; _SOKOL_PRIVATE void _sg_pipeline_common_init(_sg_pipeline_common_t* cmn, const sg_pipeline_desc* desc) { + SOKOL_ASSERT(desc->color_count < SG_MAX_COLOR_ATTACHMENTS); cmn->shader_id = desc->shader; cmn->index_type = desc->index_type; for (int i = 0; i < SG_MAX_SHADERSTAGE_BUFFERS; i++) { cmn->vertex_layout_valid[i] = false; } - cmn->color_attachment_count = desc->blend.color_attachment_count; - cmn->color_format = desc->blend.color_format; - cmn->depth_format = desc->blend.depth_format; - cmn->sample_count = desc->rasterizer.sample_count; - cmn->depth_bias = desc->rasterizer.depth_bias; - cmn->depth_bias_slope_scale = desc->rasterizer.depth_bias_slope_scale; - cmn->depth_bias_clamp = desc->rasterizer.depth_bias_clamp; - for (int i = 0; i < 4; i++) { - cmn->blend_color[i] = desc->blend.blend_color[i]; + cmn->color_attachment_count = desc->color_count; + for (int i = 0; i < cmn->color_attachment_count; i++) { + cmn->color_formats[i] = desc->colors[i].pixel_format; } + cmn->depth_format = desc->depth.pixel_format; + cmn->sample_count = desc->sample_count; + cmn->depth_bias = desc->depth.bias; + cmn->depth_bias_slope_scale = desc->depth.bias_slope_scale; + cmn->depth_bias_clamp = desc->depth.bias_clamp; + cmn->blend_color = desc->blend_color; } typedef struct { sg_image image_id; int mip_level; int slice; -} _sg_attachment_common_t; +} _sg_pass_attachment_common_t; typedef struct { int num_color_atts; - _sg_attachment_common_t color_atts[SG_MAX_COLOR_ATTACHMENTS]; - _sg_attachment_common_t ds_att; + _sg_pass_attachment_common_t color_atts[SG_MAX_COLOR_ATTACHMENTS]; + _sg_pass_attachment_common_t ds_att; } _sg_pass_common_t; _SOKOL_PRIVATE void _sg_pass_common_init(_sg_pass_common_t* cmn, const sg_pass_desc* desc) { - const sg_attachment_desc* att_desc; - _sg_attachment_common_t* att; + const sg_pass_attachment_desc* att_desc; + _sg_pass_attachment_common_t* att; for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { att_desc = &desc->color_attachments[i]; if (att_desc->image.id != SG_INVALID_ID) { @@ -2847,8 +2910,9 @@ _SOKOL_PRIVATE void _sg_smpcache_init(_sg_sampler_cache_t* cache, int capacity) SOKOL_ASSERT(cache && (capacity > 0)); memset(cache, 0, sizeof(_sg_sampler_cache_t)); cache->capacity = capacity; - const int size = cache->capacity * sizeof(_sg_sampler_cache_item_t); + const size_t size = (size_t)cache->capacity * sizeof(_sg_sampler_cache_item_t); cache->items = (_sg_sampler_cache_item_t*) SOKOL_MALLOC(size); + SOKOL_ASSERT(cache->items); memset(cache->items, 0, size); } @@ -2913,7 +2977,7 @@ _SOKOL_PRIVATE void _sg_smpcache_add_item(_sg_sampler_cache_t* cache, const sg_i _SOKOL_PRIVATE uintptr_t _sg_smpcache_sampler(_sg_sampler_cache_t* cache, int item_index) { SOKOL_ASSERT(cache && cache->items); - SOKOL_ASSERT((item_index >= 0) && (item_index < cache->num_items)); + SOKOL_ASSERT(item_index < cache->num_items); return cache->items[item_index].sampler_handle; } @@ -2957,7 +3021,7 @@ typedef struct { } dmy; } _sg_dummy_pass_t; typedef _sg_dummy_pass_t _sg_pass_t; -typedef _sg_attachment_common_t _sg_attachment_t; +typedef _sg_pass_attachment_common_t _sg_pass_attachment_t; typedef struct { _sg_slot_t slot; @@ -3041,10 +3105,15 @@ typedef struct { _sg_shader_t* shader; struct { _sg_gl_attr_t attrs[SG_MAX_VERTEX_ATTRIBUTES]; - sg_depth_stencil_state depth_stencil; + sg_depth_state depth; + sg_stencil_state stencil; sg_primitive_type primitive_type; sg_blend_state blend; - sg_rasterizer_state rast; + sg_color_mask color_write_mask[SG_MAX_COLOR_ATTACHMENTS]; + sg_cull_mode cull_mode; + sg_face_winding face_winding; + int sample_count; + bool alpha_to_coverage_enabled; } gl; } _sg_gl_pipeline_t; typedef _sg_gl_pipeline_t _sg_pipeline_t; @@ -3064,7 +3133,7 @@ typedef struct { } gl; } _sg_gl_pass_t; typedef _sg_gl_pass_t _sg_pass_t; -typedef _sg_attachment_common_t _sg_attachment_t; +typedef _sg_pass_attachment_common_t _sg_pass_attachment_t; typedef struct { _sg_slot_t slot; @@ -3086,10 +3155,16 @@ typedef struct { } _sg_gl_texture_bind_slot; typedef struct { - sg_depth_stencil_state ds; + sg_depth_state depth; + sg_stencil_state stencil; sg_blend_state blend; - sg_rasterizer_state rast; + sg_color_mask color_write_mask[SG_MAX_COLOR_ATTACHMENTS]; + sg_cull_mode cull_mode; + sg_face_winding face_winding; bool polygon_offset_enabled; + int sample_count; + sg_color blend_color; + bool alpha_to_coverage_enabled; _sg_gl_cache_attr_t attrs[SG_MAX_VERTEX_ATTRIBUTES]; GLuint vertex_buffer; GLuint index_buffer; @@ -3166,7 +3241,7 @@ typedef struct { ID3D11VertexShader* vs; ID3D11PixelShader* fs; void* vs_blob; - int vs_blob_length; + size_t vs_blob_length; } d3d11; } _sg_d3d11_shader_t; typedef _sg_d3d11_shader_t _sg_shader_t; @@ -3207,7 +3282,7 @@ typedef struct { } d3d11; } _sg_d3d11_pass_t; typedef _sg_d3d11_pass_t _sg_pass_t; -typedef _sg_attachment_common_t _sg_attachment_t; +typedef _sg_pass_attachment_common_t _sg_pass_attachment_t; typedef struct { _sg_slot_t slot; @@ -3262,16 +3337,16 @@ typedef struct { typedef struct { uint32_t frame_index; /* frame index at which it is safe to release this resource */ - uint32_t slot_index; + int slot_index; } _sg_mtl_release_item_t; typedef struct { NSMutableArray* pool; - uint32_t num_slots; - uint32_t free_queue_top; - uint32_t* free_queue; - uint32_t release_queue_front; - uint32_t release_queue_back; + int num_slots; + int free_queue_top; + int* free_queue; + int release_queue_front; + int release_queue_back; _sg_mtl_release_item_t* release_queue; } _sg_mtl_idpool_t; @@ -3279,7 +3354,7 @@ typedef struct { _sg_slot_t slot; _sg_buffer_common_t cmn; struct { - uint32_t buf[SG_NUM_INFLIGHT_FRAMES]; /* index into _sg_mtl_pool */ + int buf[SG_NUM_INFLIGHT_FRAMES]; /* index into _sg_mtl_pool */ } mtl; } _sg_mtl_buffer_t; typedef _sg_mtl_buffer_t _sg_buffer_t; @@ -3288,17 +3363,17 @@ typedef struct { _sg_slot_t slot; _sg_image_common_t cmn; struct { - uint32_t tex[SG_NUM_INFLIGHT_FRAMES]; - uint32_t depth_tex; - uint32_t msaa_tex; - uint32_t sampler_state; + int tex[SG_NUM_INFLIGHT_FRAMES]; + int depth_tex; + int msaa_tex; + int sampler_state; } mtl; } _sg_mtl_image_t; typedef _sg_mtl_image_t _sg_image_t; typedef struct { - uint32_t mtl_lib; - uint32_t mtl_func; + int mtl_lib; + int mtl_func; } _sg_mtl_shader_stage_t; typedef struct { @@ -3316,13 +3391,13 @@ typedef struct { _sg_shader_t* shader; struct { MTLPrimitiveType prim_type; - NSUInteger index_size; + int index_size; MTLIndexType index_type; MTLCullMode cull_mode; MTLWinding winding; uint32_t stencil_ref; - uint32_t rps; - uint32_t dss; + int rps; + int dss; } mtl; } _sg_mtl_pipeline_t; typedef _sg_mtl_pipeline_t _sg_pipeline_t; @@ -3340,7 +3415,7 @@ typedef struct { } mtl; } _sg_mtl_pass_t; typedef _sg_mtl_pass_t _sg_pass_t; -typedef _sg_attachment_common_t _sg_attachment_t; +typedef _sg_pass_attachment_common_t _sg_pass_attachment_t; typedef struct { _sg_slot_t slot; @@ -3372,8 +3447,8 @@ typedef struct { void* user_data; uint32_t frame_index; uint32_t cur_frame_rotate_index; - uint32_t ub_size; - uint32_t cur_ub_offset; + int ub_size; + int cur_ub_offset; uint8_t* cur_ub_base_ptr; bool in_pass; bool pass_valid; @@ -3461,7 +3536,7 @@ typedef struct { } wgpu; } _sg_wgpu_pass_t; typedef _sg_wgpu_pass_t _sg_pass_t; -typedef _sg_attachment_common_t _sg_attachment_t; +typedef _sg_pass_attachment_common_t _sg_pass_attachment_t; typedef struct { _sg_slot_t slot; @@ -3557,8 +3632,9 @@ typedef enum { /* buffer creation */ _SG_VALIDATE_BUFFERDESC_CANARY, _SG_VALIDATE_BUFFERDESC_SIZE, - _SG_VALIDATE_BUFFERDESC_CONTENT, - _SG_VALIDATE_BUFFERDESC_NO_CONTENT, + _SG_VALIDATE_BUFFERDESC_DATA, + _SG_VALIDATE_BUFFERDESC_DATA_SIZE, + _SG_VALIDATE_BUFFERDESC_NO_DATA, /* image creation */ _SG_VALIDATE_IMAGEDESC_CANARY, @@ -3569,9 +3645,9 @@ typedef enum { _SG_VALIDATE_IMAGEDESC_MSAA_BUT_NO_RT, _SG_VALIDATE_IMAGEDESC_NO_MSAA_RT_SUPPORT, _SG_VALIDATE_IMAGEDESC_RT_IMMUTABLE, - _SG_VALIDATE_IMAGEDESC_RT_NO_CONTENT, - _SG_VALIDATE_IMAGEDESC_CONTENT, - _SG_VALIDATE_IMAGEDESC_NO_CONTENT, + _SG_VALIDATE_IMAGEDESC_RT_NO_DATA, + _SG_VALIDATE_IMAGEDESC_DATA, + _SG_VALIDATE_IMAGEDESC_NO_DATA, /* shader creation */ _SG_VALIDATE_SHADERDESC_CANARY, @@ -3608,7 +3684,6 @@ typedef enum { _SG_VALIDATE_PASSDESC_LAYER, _SG_VALIDATE_PASSDESC_SLICE, _SG_VALIDATE_PASSDESC_IMAGE_NO_RT, - _SG_VALIDATE_PASSDESC_COLOR_PIXELFORMATS, _SG_VALIDATE_PASSDESC_COLOR_INV_PIXELFORMAT, _SG_VALIDATE_PASSDESC_DEPTH_INV_PIXELFORMAT, _SG_VALIDATE_PASSDESC_IMAGE_SIZES, @@ -3878,13 +3953,15 @@ _SOKOL_PRIVATE int _sg_pixelformat_bytesize(sg_pixel_format fmt) { } } -#define _sg_roundup(val, round_to) (((val)+((round_to)-1))&~((round_to)-1)) +_SOKOL_PRIVATE int _sg_roundup(int val, int round_to) { + return (val+(round_to-1)) & ~(round_to-1); +} /* return row pitch for an image see ComputePitch in https://github.com/microsoft/DirectXTex/blob/master/DirectXTex/DirectXTexUtil.cpp */ -_SOKOL_PRIVATE uint32_t _sg_row_pitch(sg_pixel_format fmt, uint32_t width, uint32_t row_align) { - uint32_t pitch; +_SOKOL_PRIVATE int _sg_row_pitch(sg_pixel_format fmt, int width, int row_align) { + int pitch; switch (fmt) { case SG_PIXELFORMAT_BC1_RGBA: case SG_PIXELFORMAT_BC4_R: @@ -3973,7 +4050,7 @@ _SOKOL_PRIVATE int _sg_num_rows(sg_pixel_format fmt, int height) { /* return pitch of a 2D subimage / texture slice see ComputePitch in https://github.com/microsoft/DirectXTex/blob/master/DirectXTex/DirectXTexUtil.cpp */ -_SOKOL_PRIVATE uint32_t _sg_surface_pitch(sg_pixel_format fmt, uint32_t width, uint32_t height, uint32_t row_align) { +_SOKOL_PRIVATE int _sg_surface_pitch(sg_pixel_format fmt, int width, int height, int row_align) { int num_rows = _sg_num_rows(fmt, height); return num_rows * _sg_row_pitch(fmt, width, row_align); } @@ -4047,19 +4124,19 @@ _SOKOL_PRIVATE void _sg_resolve_default_pass_action(const sg_pass_action* from, for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { if (to->colors[i].action == _SG_ACTION_DEFAULT) { to->colors[i].action = SG_ACTION_CLEAR; - to->colors[i].val[0] = SG_DEFAULT_CLEAR_RED; - to->colors[i].val[1] = SG_DEFAULT_CLEAR_GREEN; - to->colors[i].val[2] = SG_DEFAULT_CLEAR_BLUE; - to->colors[i].val[3] = SG_DEFAULT_CLEAR_ALPHA; + to->colors[i].value.r = SG_DEFAULT_CLEAR_RED; + to->colors[i].value.g = SG_DEFAULT_CLEAR_GREEN; + to->colors[i].value.b = SG_DEFAULT_CLEAR_BLUE; + to->colors[i].value.a = SG_DEFAULT_CLEAR_ALPHA; } } if (to->depth.action == _SG_ACTION_DEFAULT) { to->depth.action = SG_ACTION_CLEAR; - to->depth.val = SG_DEFAULT_CLEAR_DEPTH; + to->depth.value = SG_DEFAULT_CLEAR_DEPTH; } if (to->stencil.action == _SG_ACTION_DEFAULT) { to->stencil.action = SG_ACTION_CLEAR; - to->stencil.val = SG_DEFAULT_CLEAR_STENCIL; + to->stencil.value = SG_DEFAULT_CLEAR_STENCIL; } } @@ -4147,7 +4224,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_dummy_create_pipeline(_sg_pipeline_t* pip, if (a_desc->format == SG_VERTEXFORMAT_INVALID) { break; } - SOKOL_ASSERT((a_desc->buffer_index >= 0) && (a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS)); + SOKOL_ASSERT(a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS); pip->cmn.vertex_layout_valid[a_desc->buffer_index] = true; } return SG_RESOURCESTATE_VALID; @@ -4164,7 +4241,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_dummy_create_pass(_sg_pass_t* pass, _sg_ima _sg_pass_common_init(&pass->cmn, desc); - const sg_attachment_desc* att_desc; + const sg_pass_attachment_desc* att_desc; for (int i = 0; i < pass->cmn.num_color_atts; i++) { att_desc = &desc->color_attachments[i]; SOKOL_ASSERT(att_desc->image.id != SG_INVALID_ID); @@ -4257,14 +4334,10 @@ _SOKOL_PRIVATE void _sg_dummy_apply_bindings( _SOKOL_UNUSED(fs_imgs); _SOKOL_UNUSED(num_fs_imgs); } -_SOKOL_PRIVATE void _sg_dummy_apply_uniforms(sg_shader_stage stage_index, int ub_index, const void* data, int num_bytes) { - SOKOL_ASSERT(data && (num_bytes > 0)); - SOKOL_ASSERT((stage_index >= 0) && ((int)stage_index < SG_NUM_SHADER_STAGES)); - SOKOL_ASSERT((ub_index >= 0) && (ub_index < SG_MAX_SHADERSTAGE_UBS)); +_SOKOL_PRIVATE void _sg_dummy_apply_uniforms(sg_shader_stage stage_index, int ub_index, const sg_range* data) { _SOKOL_UNUSED(stage_index); _SOKOL_UNUSED(ub_index); _SOKOL_UNUSED(data); - _SOKOL_UNUSED(num_bytes); } _SOKOL_PRIVATE void _sg_dummy_draw(int base_element, int num_elements, int num_instances) { @@ -4273,29 +4346,27 @@ _SOKOL_PRIVATE void _sg_dummy_draw(int base_element, int num_elements, int num_i _SOKOL_UNUSED(num_instances); } -_SOKOL_PRIVATE void _sg_dummy_update_buffer(_sg_buffer_t* buf, const void* data, uint32_t data_size) { - SOKOL_ASSERT(buf && data && (data_size > 0)); +_SOKOL_PRIVATE void _sg_dummy_update_buffer(_sg_buffer_t* buf, const sg_range* data) { + SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); _SOKOL_UNUSED(data); - _SOKOL_UNUSED(data_size); if (++buf->cmn.active_slot >= buf->cmn.num_slots) { buf->cmn.active_slot = 0; } } -_SOKOL_PRIVATE uint32_t _sg_dummy_append_buffer(_sg_buffer_t* buf, const void* data, uint32_t data_size, bool new_frame) { - SOKOL_ASSERT(buf && data && (data_size > 0)); +_SOKOL_PRIVATE int _sg_dummy_append_buffer(_sg_buffer_t* buf, const sg_range* data, bool new_frame) { + SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); _SOKOL_UNUSED(data); - _SOKOL_UNUSED(data_size); if (new_frame) { if (++buf->cmn.active_slot >= buf->cmn.num_slots) { buf->cmn.active_slot = 0; } } /* NOTE: this is a requirement from WebGPU, but we want identical behaviour across all backend */ - return _sg_roundup(data_size, 4); + return _sg_roundup((int)data->size, 4); } -_SOKOL_PRIVATE void _sg_dummy_update_image(_sg_image_t* img, const sg_image_content* data) { +_SOKOL_PRIVATE void _sg_dummy_update_image(_sg_image_t* img, const sg_image_data* data) { SOKOL_ASSERT(img && data); _SOKOL_UNUSED(data); if (++img->cmn.active_slot >= img->cmn.num_slots) { @@ -4781,44 +4852,6 @@ _SOKOL_PRIVATE GLenum _sg_gl_depth_attachment_format(sg_pixel_format fmt) { } } -_SOKOL_PRIVATE void _sg_gl_init_attr(_sg_gl_attr_t* attr) { - attr->vb_index = -1; - attr->divisor = -1; -} - -_SOKOL_PRIVATE void _sg_gl_init_stencil_state(sg_stencil_state* s) { - SOKOL_ASSERT(s); - s->fail_op = SG_STENCILOP_KEEP; - s->depth_fail_op = SG_STENCILOP_KEEP; - s->pass_op = SG_STENCILOP_KEEP; - s->compare_func = SG_COMPAREFUNC_ALWAYS; -} - -_SOKOL_PRIVATE void _sg_gl_init_depth_stencil_state(sg_depth_stencil_state* s) { - SOKOL_ASSERT(s); - _sg_gl_init_stencil_state(&s->stencil_front); - _sg_gl_init_stencil_state(&s->stencil_back); - s->depth_compare_func = SG_COMPAREFUNC_ALWAYS; -} - -_SOKOL_PRIVATE void _sg_gl_init_blend_state(sg_blend_state* s) { - SOKOL_ASSERT(s); - s->src_factor_rgb = SG_BLENDFACTOR_ONE; - s->dst_factor_rgb = SG_BLENDFACTOR_ZERO; - s->op_rgb = SG_BLENDOP_ADD; - s->src_factor_alpha = SG_BLENDFACTOR_ONE; - s->dst_factor_alpha = SG_BLENDFACTOR_ZERO; - s->op_alpha = SG_BLENDOP_ADD; - s->color_write_mask = SG_COLORMASK_RGBA; -} - -_SOKOL_PRIVATE void _sg_gl_init_rasterizer_state(sg_rasterizer_state* s) { - SOKOL_ASSERT(s); - s->cull_mode = SG_CULLMODE_NONE; - s->face_winding = SG_FACEWINDING_CW; - s->sample_count = 1; -} - /* see: https://www.khronos.org/registry/OpenGL-Refpages/es3.0/html/glTexImage2D.xhtml */ _SOKOL_PRIVATE void _sg_gl_init_pixelformats(bool has_bgra) { #if !defined(SOKOL_GLES2) @@ -5093,6 +5126,8 @@ _SOKOL_PRIVATE void _sg_gl_init_caps_glcore33(void) { _sg.features.imagetype_3d = true; _sg.features.imagetype_array = true; _sg.features.image_clamp_to_border = true; + _sg.features.mrt_independent_blend_state = false; + _sg.features.mrt_independent_write_mask = true; /* scan extensions */ bool has_s3tc = false; /* BC1..BC3 */ @@ -5103,7 +5138,7 @@ _SOKOL_PRIVATE void _sg_gl_init_caps_glcore33(void) { GLint num_ext = 0; glGetIntegerv(GL_NUM_EXTENSIONS, &num_ext); for (int i = 0; i < num_ext; i++) { - const char* ext = (const char*) glGetStringi(GL_EXTENSIONS, i); + const char* ext = (const char*) glGetStringi(GL_EXTENSIONS, (GLuint)i); if (ext) { if (strstr(ext, "_texture_compression_s3tc")) { has_s3tc = true; @@ -5168,6 +5203,8 @@ _SOKOL_PRIVATE void _sg_gl_init_caps_gles3(void) { _sg.features.imagetype_3d = true; _sg.features.imagetype_array = true; _sg.features.image_clamp_to_border = false; + _sg.features.mrt_independent_blend_state = false; + _sg.features.mrt_independent_write_mask = false; bool has_s3tc = false; /* BC1..BC3 */ bool has_rgtc = false; /* BC4 and BC5 */ @@ -5185,7 +5222,7 @@ _SOKOL_PRIVATE void _sg_gl_init_caps_gles3(void) { GLint num_ext = 0; glGetIntegerv(GL_NUM_EXTENSIONS, &num_ext); for (int i = 0; i < num_ext; i++) { - const char* ext = (const char*) glGetStringi(GL_EXTENSIONS, i); + const char* ext = (const char*) glGetStringi(GL_EXTENSIONS, (GLuint)i); if (ext) { if (strstr(ext, "_texture_compression_s3tc")) { has_s3tc = true; @@ -5226,6 +5263,13 @@ _SOKOL_PRIVATE void _sg_gl_init_caps_gles3(void) { } } + /* on WebGL2, color_buffer_float also includes 16-bit formats + see: https://developer.mozilla.org/en-US/docs/Web/API/EXT_color_buffer_float + */ + #if defined(__EMSCRIPTEN__) + has_colorbuffer_half_float = has_colorbuffer_float; + #endif + /* limits */ _sg_gl_init_limits(); @@ -5296,6 +5340,8 @@ _SOKOL_PRIVATE void _sg_gl_init_caps_gles2(void) { _sg.features.imagetype_3d = false; _sg.features.imagetype_array = false; _sg.features.image_clamp_to_border = false; + _sg.features.mrt_independent_blend_state = false; + _sg.features.mrt_independent_write_mask = false; /* limits */ _sg_gl_init_limits(); @@ -5421,7 +5467,7 @@ _SOKOL_PRIVATE void _sg_gl_cache_active_texture(GLenum texture) { _SOKOL_PRIVATE void _sg_gl_cache_clear_texture_bindings(bool force) { for (int i = 0; (i < SG_MAX_SHADERSTAGE_IMAGES) && (i < _sg.gl.max_combined_texture_image_units); i++) { if (force || (_sg.gl.cache.textures[i].texture != 0)) { - GLenum gl_texture_slot = GL_TEXTURE0 + i; + GLenum gl_texture_slot = (GLenum) (GL_TEXTURE0 + i); glActiveTexture(gl_texture_slot); glBindTexture(GL_TEXTURE_2D, 0); glBindTexture(GL_TEXTURE_CUBE_MAP, 0); @@ -5449,7 +5495,7 @@ _SOKOL_PRIVATE void _sg_gl_cache_bind_texture(int slot_index, GLenum target, GLu } _sg_gl_texture_bind_slot* slot = &_sg.gl.cache.textures[slot_index]; if ((slot->target != target) || (slot->texture != texture)) { - _sg_gl_cache_active_texture(GL_TEXTURE0 + slot_index); + _sg_gl_cache_active_texture((GLenum)(GL_TEXTURE0 + slot_index)); /* if the target has changed, clear the previous binding on that target */ if ((target != slot->target) && (slot->target != 0)) { glBindTexture(slot->target, 0); @@ -5485,7 +5531,7 @@ _SOKOL_PRIVATE void _sg_gl_cache_invalidate_texture(GLuint tex) { for (int i = 0; i < SG_MAX_SHADERSTAGE_IMAGES; i++) { _sg_gl_texture_bind_slot* slot = &_sg.gl.cache.textures[i]; if (tex == slot->texture) { - _sg_gl_cache_active_texture(GL_TEXTURE0 + i); + _sg_gl_cache_active_texture((GLenum)(GL_TEXTURE0 + i)); glBindTexture(slot->target, 0); slot->target = 0; slot->texture = 0; @@ -5519,9 +5565,11 @@ _SOKOL_PRIVATE void _sg_gl_reset_state_cache(void) { _SG_GL_CHECK_ERROR(); _sg_gl_cache_clear_texture_bindings(true); _SG_GL_CHECK_ERROR(); - for (uint32_t i = 0; i < _sg.limits.max_vertex_attrs; i++) { - _sg_gl_init_attr(&_sg.gl.cache.attrs[i].gl_attr); - glDisableVertexAttribArray(i); + for (int i = 0; i < _sg.limits.max_vertex_attrs; i++) { + _sg_gl_attr_t* attr = &_sg.gl.cache.attrs[i].gl_attr; + attr->vb_index = -1; + attr->divisor = -1; + glDisableVertexAttribArray((GLuint)i); _SG_GL_CHECK_ERROR(); } _sg.gl.cache.cur_primitive_type = GL_TRIANGLES; @@ -5530,8 +5578,16 @@ _SOKOL_PRIVATE void _sg_gl_reset_state_cache(void) { glGetIntegerv(GL_CURRENT_PROGRAM, (GLint*)&_sg.gl.cache.prog); _SG_GL_CHECK_ERROR(); - /* depth-stencil state */ - _sg_gl_init_depth_stencil_state(&_sg.gl.cache.ds); + /* depth and stencil state */ + _sg.gl.cache.depth.compare = SG_COMPAREFUNC_ALWAYS; + _sg.gl.cache.stencil.front.compare = SG_COMPAREFUNC_ALWAYS; + _sg.gl.cache.stencil.front.fail_op = SG_STENCILOP_KEEP; + _sg.gl.cache.stencil.front.depth_fail_op = SG_STENCILOP_KEEP; + _sg.gl.cache.stencil.front.pass_op = SG_STENCILOP_KEEP; + _sg.gl.cache.stencil.back.compare = SG_COMPAREFUNC_ALWAYS; + _sg.gl.cache.stencil.back.fail_op = SG_STENCILOP_KEEP; + _sg.gl.cache.stencil.back.depth_fail_op = SG_STENCILOP_KEEP; + _sg.gl.cache.stencil.back.pass_op = SG_STENCILOP_KEEP; glEnable(GL_DEPTH_TEST); glDepthFunc(GL_ALWAYS); glDepthMask(GL_FALSE); @@ -5541,15 +5597,25 @@ _SOKOL_PRIVATE void _sg_gl_reset_state_cache(void) { glStencilMask(0); /* blend state */ - _sg_gl_init_blend_state(&_sg.gl.cache.blend); + _sg.gl.cache.blend.src_factor_rgb = SG_BLENDFACTOR_ONE; + _sg.gl.cache.blend.dst_factor_rgb = SG_BLENDFACTOR_ZERO; + _sg.gl.cache.blend.op_rgb = SG_BLENDOP_ADD; + _sg.gl.cache.blend.src_factor_alpha = SG_BLENDFACTOR_ONE; + _sg.gl.cache.blend.dst_factor_alpha = SG_BLENDFACTOR_ZERO; + _sg.gl.cache.blend.op_alpha = SG_BLENDOP_ADD; glDisable(GL_BLEND); glBlendFuncSeparate(GL_ONE, GL_ZERO, GL_ONE, GL_ZERO); glBlendEquationSeparate(GL_FUNC_ADD, GL_FUNC_ADD); - glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); glBlendColor(0.0f, 0.0f, 0.0f, 0.0f); - /* rasterizer state */ - _sg_gl_init_rasterizer_state(&_sg.gl.cache.rast); + /* standalone state */ + for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { + _sg.gl.cache.color_write_mask[i] = SG_COLORMASK_RGBA; + } + _sg.gl.cache.cull_mode = SG_CULLMODE_NONE; + _sg.gl.cache.face_winding = SG_FACEWINDING_CW; + _sg.gl.cache.sample_count = 1; + glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); glPolygonOffset(0.0f, 0.0f); glDisable(GL_POLYGON_OFFSET_FILL); glDisable(GL_CULL_FACE); @@ -5657,8 +5723,8 @@ _SOKOL_PRIVATE sg_resource_state _sg_gl_create_buffer(_sg_buffer_t* buf, const s _sg_gl_cache_bind_buffer(gl_target, gl_buf); glBufferData(gl_target, buf->cmn.size, 0, gl_usage); if (buf->cmn.usage == SG_USAGE_IMMUTABLE) { - SOKOL_ASSERT(desc->content); - glBufferSubData(gl_target, 0, buf->cmn.size, desc->content); + SOKOL_ASSERT(desc->data.ptr); + glBufferSubData(gl_target, 0, buf->cmn.size, desc->data.ptr); } _sg_gl_cache_restore_buffer_binding(gl_target); } @@ -5768,8 +5834,8 @@ _SOKOL_PRIVATE sg_resource_state _sg_gl_create_image(_sg_image_t* img, const sg_ _sg_gl_cache_bind_texture(0, img->gl.target, img->gl.tex[slot]); GLenum gl_min_filter = _sg_gl_filter(img->cmn.min_filter); GLenum gl_mag_filter = _sg_gl_filter(img->cmn.mag_filter); - glTexParameteri(img->gl.target, GL_TEXTURE_MIN_FILTER, gl_min_filter); - glTexParameteri(img->gl.target, GL_TEXTURE_MAG_FILTER, gl_mag_filter); + glTexParameteri(img->gl.target, GL_TEXTURE_MIN_FILTER, (GLint)gl_min_filter); + glTexParameteri(img->gl.target, GL_TEXTURE_MAG_FILTER, (GLint)gl_mag_filter); if (_sg.gl.ext_anisotropic && (img->cmn.max_anisotropy > 1)) { GLint max_aniso = (GLint) img->cmn.max_anisotropy; if (max_aniso > _sg.gl.max_anisotropy) { @@ -5782,11 +5848,11 @@ _SOKOL_PRIVATE sg_resource_state _sg_gl_create_image(_sg_image_t* img, const sg_ glTexParameteri(img->gl.target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); } else { - glTexParameteri(img->gl.target, GL_TEXTURE_WRAP_S, _sg_gl_wrap(img->cmn.wrap_u)); - glTexParameteri(img->gl.target, GL_TEXTURE_WRAP_T, _sg_gl_wrap(img->cmn.wrap_v)); + glTexParameteri(img->gl.target, GL_TEXTURE_WRAP_S, (GLint)_sg_gl_wrap(img->cmn.wrap_u)); + glTexParameteri(img->gl.target, GL_TEXTURE_WRAP_T, (GLint)_sg_gl_wrap(img->cmn.wrap_v)); #if !defined(SOKOL_GLES2) if (!_sg.gl.gles2 && (img->cmn.type == SG_IMAGETYPE_3D)) { - glTexParameteri(img->gl.target, GL_TEXTURE_WRAP_R, _sg_gl_wrap(img->cmn.wrap_w)); + glTexParameteri(img->gl.target, GL_TEXTURE_WRAP_R, (GLint)_sg_gl_wrap(img->cmn.wrap_w)); } #endif #if defined(SOKOL_GLCORE33) @@ -5822,8 +5888,8 @@ _SOKOL_PRIVATE sg_resource_state _sg_gl_create_image(_sg_image_t* img, const sg_ if (SG_IMAGETYPE_CUBE == img->cmn.type) { gl_img_target = _sg_gl_cubeface_target(face_index); } - const GLvoid* data_ptr = desc->content.subimage[face_index][mip_index].ptr; - const int data_size = desc->content.subimage[face_index][mip_index].size; + const GLvoid* data_ptr = desc->data.subimage[face_index][mip_index].ptr; + const GLsizei data_size = (GLsizei) desc->data.subimage[face_index][mip_index].size; int mip_width = img->cmn.width >> mip_index; if (mip_width == 0) { mip_width = 1; @@ -5839,7 +5905,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_gl_create_image(_sg_image_t* img, const sg_ } else { const GLenum gl_type = _sg_gl_teximage_type(img->cmn.pixel_format); - glTexImage2D(gl_img_target, mip_index, gl_internal_format, + glTexImage2D(gl_img_target, mip_index, (GLint)gl_internal_format, mip_width, mip_height, 0, gl_format, gl_type, data_ptr); } } @@ -5858,7 +5924,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_gl_create_image(_sg_image_t* img, const sg_ } else { const GLenum gl_type = _sg_gl_teximage_type(img->cmn.pixel_format); - glTexImage3D(gl_img_target, mip_index, gl_internal_format, + glTexImage3D(gl_img_target, mip_index, (GLint)gl_internal_format, mip_width, mip_height, mip_depth, 0, gl_format, gl_type, data_ptr); } } @@ -5906,7 +5972,7 @@ _SOKOL_PRIVATE GLuint _sg_gl_compile_shader(sg_shader_stage stage, const char* s GLint log_len = 0; glGetShaderiv(gl_shd, GL_INFO_LOG_LENGTH, &log_len); if (log_len > 0) { - GLchar* log_buf = (GLchar*) SOKOL_MALLOC(log_len); + GLchar* log_buf = (GLchar*) SOKOL_MALLOC((size_t)log_len); glGetShaderInfoLog(gl_shd, log_len, &log_len, log_buf); SOKOL_LOG(log_buf); SOKOL_FREE(log_buf); @@ -5949,7 +6015,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_gl_create_shader(_sg_shader_t* shd, const s GLint log_len = 0; glGetProgramiv(gl_prog, GL_INFO_LOG_LENGTH, &log_len); if (log_len > 0) { - GLchar* log_buf = (GLchar*) SOKOL_MALLOC(log_len); + GLchar* log_buf = (GLchar*) SOKOL_MALLOC((size_t)log_len); glGetProgramInfoLog(gl_prog, log_len, &log_len, log_buf); SOKOL_LOG(log_buf); SOKOL_FREE(log_buf); @@ -5988,7 +6054,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_gl_create_shader(_sg_shader_t* shd, const s } ub->num_uniforms++; } - SOKOL_ASSERT(ub_desc->size == cur_uniform_offset); + SOKOL_ASSERT(ub_desc->size == (size_t)cur_uniform_offset); } } @@ -6003,7 +6069,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_gl_create_shader(_sg_shader_t* shd, const s _sg_gl_shader_stage_t* gl_stage = &shd->gl.stage[stage_index]; for (int img_index = 0; img_index < shd->cmn.stage[stage_index].num_images; img_index++) { const sg_shader_image_desc* img_desc = &stage_desc->images[img_index]; - SOKOL_ASSERT(img_desc->type != _SG_IMAGETYPE_DEFAULT); + SOKOL_ASSERT(img_desc->image_type != _SG_IMAGETYPE_DEFAULT); _sg_gl_shader_image_t* gl_img = &gl_stage->images[img_index]; GLint gl_loc = img_index; if (img_desc->name) { @@ -6042,20 +6108,28 @@ _SOKOL_PRIVATE sg_resource_state _sg_gl_create_pipeline(_sg_pipeline_t* pip, _sg pip->shader = shd; _sg_pipeline_common_init(&pip->cmn, desc); pip->gl.primitive_type = desc->primitive_type; - pip->gl.depth_stencil = desc->depth_stencil; - pip->gl.blend = desc->blend; - pip->gl.rast = desc->rasterizer; + pip->gl.depth = desc->depth; + pip->gl.stencil = desc->stencil; + // FIXME: blend color and write mask per draw-buffer-attachment (requires GL4) + pip->gl.blend = desc->colors[0].blend; + for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { + pip->gl.color_write_mask[i] = desc->colors[i].write_mask; + } + pip->gl.cull_mode = desc->cull_mode; + pip->gl.face_winding = desc->face_winding; + pip->gl.sample_count = desc->sample_count; + pip->gl.alpha_to_coverage_enabled = desc->alpha_to_coverage_enabled; /* resolve vertex attributes */ for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { pip->gl.attrs[attr_index].vb_index = -1; } - for (uint32_t attr_index = 0; attr_index < _sg.limits.max_vertex_attrs; attr_index++) { + for (int attr_index = 0; attr_index < _sg.limits.max_vertex_attrs; attr_index++) { const sg_vertex_attr_desc* a_desc = &desc->layout.attrs[attr_index]; if (a_desc->format == SG_VERTEXFORMAT_INVALID) { break; } - SOKOL_ASSERT((a_desc->buffer_index >= 0) && (a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS)); + SOKOL_ASSERT(a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS); const sg_buffer_layout_desc* l_desc = &desc->layout.buffers[a_desc->buffer_index]; const sg_vertex_step step_func = l_desc->step_func; const int step_rate = l_desc->step_rate; @@ -6111,7 +6185,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_gl_create_pass(_sg_pass_t* pass, _sg_image_ _sg_pass_common_init(&pass->cmn, desc); /* copy image pointers */ - const sg_attachment_desc* att_desc; + const sg_pass_attachment_desc* att_desc; for (int i = 0; i < pass->cmn.num_color_atts; i++) { att_desc = &desc->color_attachments[i]; SOKOL_ASSERT(att_desc->image.id != SG_INVALID_ID); @@ -6145,7 +6219,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_gl_create_pass(_sg_pass_t* pass, _sg_image_ if (att_img) { const GLuint gl_render_buffer = att_img->gl.msaa_render_buffer; SOKOL_ASSERT(gl_render_buffer); - glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0+i, GL_RENDERBUFFER, gl_render_buffer); + glFramebufferRenderbuffer(GL_FRAMEBUFFER, (GLenum)(GL_COLOR_ATTACHMENT0+i), GL_RENDERBUFFER, gl_render_buffer); } } } @@ -6157,7 +6231,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_gl_create_pass(_sg_pass_t* pass, _sg_image_ if (att_img) { const GLuint gl_tex = att_img->gl.tex[0]; SOKOL_ASSERT(gl_tex); - const GLenum gl_att = GL_COLOR_ATTACHMENT0 + i; + const GLenum gl_att = (GLenum)(GL_COLOR_ATTACHMENT0 + i); switch (att_img->cmn.type) { case SG_IMAGETYPE_2D: glFramebufferTexture2D(GL_FRAMEBUFFER, gl_att, GL_TEXTURE_2D, gl_tex, mip_level); @@ -6211,7 +6285,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_gl_create_pass(_sg_pass_t* pass, _sg_image_ if (is_msaa) { for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { _sg_gl_attachment_t* gl_att = &pass->gl.color_atts[i]; - _sg_attachment_t* cmn_att = &pass->cmn.color_atts[i]; + _sg_pass_attachment_t* cmn_att = &pass->cmn.color_atts[i]; if (gl_att->image) { SOKOL_ASSERT(0 == gl_att->gl_msaa_resolve_buffer); glGenFramebuffers(1, &gl_att->gl_msaa_resolve_buffer); @@ -6333,28 +6407,35 @@ _SOKOL_PRIVATE void _sg_gl_begin_pass(_sg_pass_t* pass, const sg_pass_action* ac bool need_pip_cache_flush = false; if (clear_color) { - if (_sg.gl.cache.blend.color_write_mask != SG_COLORMASK_RGBA) { - need_pip_cache_flush = true; - _sg.gl.cache.blend.color_write_mask = SG_COLORMASK_RGBA; + bool need_color_mask_flush = false; + // NOTE: not a bug to iterate over all possible color attachments + for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { + if (SG_COLORMASK_RGBA != _sg.gl.cache.color_write_mask[i]) { + need_pip_cache_flush = true; + need_color_mask_flush = true; + _sg.gl.cache.color_write_mask[i] = SG_COLORMASK_RGBA; + } + } + if (need_color_mask_flush) { glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); } } if (clear_depth) { - if (!_sg.gl.cache.ds.depth_write_enabled) { + if (!_sg.gl.cache.depth.write_enabled) { need_pip_cache_flush = true; - _sg.gl.cache.ds.depth_write_enabled = true; + _sg.gl.cache.depth.write_enabled = true; glDepthMask(GL_TRUE); } - if (_sg.gl.cache.ds.depth_compare_func != SG_COMPAREFUNC_ALWAYS) { + if (_sg.gl.cache.depth.compare != SG_COMPAREFUNC_ALWAYS) { need_pip_cache_flush = true; - _sg.gl.cache.ds.depth_compare_func = SG_COMPAREFUNC_ALWAYS; + _sg.gl.cache.depth.compare = SG_COMPAREFUNC_ALWAYS; glDepthFunc(GL_ALWAYS); } } if (clear_stencil) { - if (_sg.gl.cache.ds.stencil_write_mask != 0xFF) { + if (_sg.gl.cache.stencil.write_mask != 0xFF) { need_pip_cache_flush = true; - _sg.gl.cache.ds.stencil_write_mask = 0xFF; + _sg.gl.cache.stencil.write_mask = 0xFF; glStencilMask(0xFF); } } @@ -6376,20 +6457,20 @@ _SOKOL_PRIVATE void _sg_gl_begin_pass(_sg_pass_t* pass, const sg_pass_action* ac GLbitfield clear_mask = 0; if (clear_color) { clear_mask |= GL_COLOR_BUFFER_BIT; - const float* c = action->colors[0].val; - glClearColor(c[0], c[1], c[2], c[3]); + const sg_color c = action->colors[0].value; + glClearColor(c.r, c.g, c.b, c.a); } if (clear_depth) { clear_mask |= GL_DEPTH_BUFFER_BIT; #ifdef SOKOL_GLCORE33 - glClearDepth(action->depth.val); + glClearDepth(action->depth.value); #else - glClearDepthf(action->depth.val); + glClearDepthf(action->depth.value); #endif } if (clear_stencil) { clear_mask |= GL_STENCIL_BUFFER_BIT; - glClearStencil(action->stencil.val); + glClearStencil(action->stencil.value); } if (0 != clear_mask) { glClear(clear_mask); @@ -6400,18 +6481,18 @@ _SOKOL_PRIVATE void _sg_gl_begin_pass(_sg_pass_t* pass, const sg_pass_action* ac SOKOL_ASSERT(pass); for (int i = 0; i < num_color_atts; i++) { if (action->colors[i].action == SG_ACTION_CLEAR) { - glClearBufferfv(GL_COLOR, i, action->colors[i].val); + glClearBufferfv(GL_COLOR, i, &action->colors[i].value.r); } } if (pass->gl.ds_att.image) { if (clear_depth && clear_stencil) { - glClearBufferfi(GL_DEPTH_STENCIL, 0, action->depth.val, action->stencil.val); + glClearBufferfi(GL_DEPTH_STENCIL, 0, action->depth.value, action->stencil.value); } else if (clear_depth) { - glClearBufferfv(GL_DEPTH, 0, &action->depth.val); + glClearBufferfv(GL_DEPTH, 0, &action->depth.value); } else if (clear_stencil) { - GLint val = (GLint) action->stencil.val; + GLint val = (GLint) action->stencil.value; glClearBufferiv(GL_STENCIL, 0, &val); } } @@ -6443,7 +6524,7 @@ _SOKOL_PRIVATE void _sg_gl_end_pass(void) { if (gl_att->image) { SOKOL_ASSERT(gl_att->gl_msaa_resolve_buffer); glBindFramebuffer(GL_DRAW_FRAMEBUFFER, gl_att->gl_msaa_resolve_buffer); - glReadBuffer(GL_COLOR_ATTACHMENT0 + att_index); + glReadBuffer((GLenum)(GL_COLOR_ATTACHMENT0 + att_index)); glBlitFramebuffer(0, 0, w, h, 0, 0, w, h, GL_COLOR_BUFFER_BIT, GL_NEAREST); } else { @@ -6486,156 +6567,198 @@ _SOKOL_PRIVATE void _sg_gl_apply_pipeline(_sg_pipeline_t* pip) { _sg.gl.cache.cur_primitive_type = _sg_gl_primitive_type(pip->gl.primitive_type); _sg.gl.cache.cur_index_type = _sg_gl_index_type(pip->cmn.index_type); - /* update depth-stencil state */ - const sg_depth_stencil_state* new_ds = &pip->gl.depth_stencil; - sg_depth_stencil_state* cache_ds = &_sg.gl.cache.ds; - if (new_ds->depth_compare_func != cache_ds->depth_compare_func) { - cache_ds->depth_compare_func = new_ds->depth_compare_func; - glDepthFunc(_sg_gl_compare_func(new_ds->depth_compare_func)); - } - if (new_ds->depth_write_enabled != cache_ds->depth_write_enabled) { - cache_ds->depth_write_enabled = new_ds->depth_write_enabled; - glDepthMask(new_ds->depth_write_enabled); - } - if (new_ds->stencil_enabled != cache_ds->stencil_enabled) { - cache_ds->stencil_enabled = new_ds->stencil_enabled; - if (new_ds->stencil_enabled) glEnable(GL_STENCIL_TEST); - else glDisable(GL_STENCIL_TEST); - } - if (new_ds->stencil_write_mask != cache_ds->stencil_write_mask) { - cache_ds->stencil_write_mask = new_ds->stencil_write_mask; - glStencilMask(new_ds->stencil_write_mask); - } - for (int i = 0; i < 2; i++) { - const sg_stencil_state* new_ss = (i==0)? &new_ds->stencil_front : &new_ds->stencil_back; - sg_stencil_state* cache_ss = (i==0)? &cache_ds->stencil_front : &cache_ds->stencil_back; - GLenum gl_face = (i==0)? GL_FRONT : GL_BACK; - if ((new_ss->compare_func != cache_ss->compare_func) || - (new_ds->stencil_read_mask != cache_ds->stencil_read_mask) || - (new_ds->stencil_ref != cache_ds->stencil_ref)) - { - cache_ss->compare_func = new_ss->compare_func; - glStencilFuncSeparate(gl_face, - _sg_gl_compare_func(new_ss->compare_func), - new_ds->stencil_ref, - new_ds->stencil_read_mask); - } - if ((new_ss->fail_op != cache_ss->fail_op) || - (new_ss->depth_fail_op != cache_ss->depth_fail_op) || - (new_ss->pass_op != cache_ss->pass_op)) + /* update depth state */ + { + const sg_depth_state* state_ds = &pip->gl.depth; + sg_depth_state* cache_ds = &_sg.gl.cache.depth; + if (state_ds->compare != cache_ds->compare) { + cache_ds->compare = state_ds->compare; + glDepthFunc(_sg_gl_compare_func(state_ds->compare)); + } + if (state_ds->write_enabled != cache_ds->write_enabled) { + cache_ds->write_enabled = state_ds->write_enabled; + glDepthMask(state_ds->write_enabled); + } + if (!_sg_fequal(state_ds->bias, cache_ds->bias, 0.000001f) || + !_sg_fequal(state_ds->bias_slope_scale, cache_ds->bias_slope_scale, 0.000001f)) { - cache_ss->fail_op = new_ss->fail_op; - cache_ss->depth_fail_op = new_ss->depth_fail_op; - cache_ss->pass_op = new_ss->pass_op; - glStencilOpSeparate(gl_face, - _sg_gl_stencil_op(new_ss->fail_op), - _sg_gl_stencil_op(new_ss->depth_fail_op), - _sg_gl_stencil_op(new_ss->pass_op)); - } - } - cache_ds->stencil_read_mask = new_ds->stencil_read_mask; - cache_ds->stencil_ref = new_ds->stencil_ref; - - /* update blend state */ - const sg_blend_state* new_b = &pip->gl.blend; - sg_blend_state* cache_b = &_sg.gl.cache.blend; - if (new_b->enabled != cache_b->enabled) { - cache_b->enabled = new_b->enabled; - if (new_b->enabled) glEnable(GL_BLEND); - else glDisable(GL_BLEND); - } - if ((new_b->src_factor_rgb != cache_b->src_factor_rgb) || - (new_b->dst_factor_rgb != cache_b->dst_factor_rgb) || - (new_b->src_factor_alpha != cache_b->src_factor_alpha) || - (new_b->dst_factor_alpha != cache_b->dst_factor_alpha)) + /* according to ANGLE's D3D11 backend: + D3D11 SlopeScaledDepthBias ==> GL polygonOffsetFactor + D3D11 DepthBias ==> GL polygonOffsetUnits + DepthBiasClamp has no meaning on GL + */ + cache_ds->bias = state_ds->bias; + cache_ds->bias_slope_scale = state_ds->bias_slope_scale; + glPolygonOffset(state_ds->bias_slope_scale, state_ds->bias); + bool po_enabled = true; + if (_sg_fequal(state_ds->bias, 0.0f, 0.000001f) && + _sg_fequal(state_ds->bias_slope_scale, 0.0f, 0.000001f)) + { + po_enabled = false; + } + if (po_enabled != _sg.gl.cache.polygon_offset_enabled) { + _sg.gl.cache.polygon_offset_enabled = po_enabled; + if (po_enabled) { + glEnable(GL_POLYGON_OFFSET_FILL); + } + else { + glDisable(GL_POLYGON_OFFSET_FILL); + } + } + } + } + + /* update stencil state */ { - cache_b->src_factor_rgb = new_b->src_factor_rgb; - cache_b->dst_factor_rgb = new_b->dst_factor_rgb; - cache_b->src_factor_alpha = new_b->src_factor_alpha; - cache_b->dst_factor_alpha = new_b->dst_factor_alpha; - glBlendFuncSeparate(_sg_gl_blend_factor(new_b->src_factor_rgb), - _sg_gl_blend_factor(new_b->dst_factor_rgb), - _sg_gl_blend_factor(new_b->src_factor_alpha), - _sg_gl_blend_factor(new_b->dst_factor_alpha)); - } - if ((new_b->op_rgb != cache_b->op_rgb) || (new_b->op_alpha != cache_b->op_alpha)) { - cache_b->op_rgb = new_b->op_rgb; - cache_b->op_alpha = new_b->op_alpha; - glBlendEquationSeparate(_sg_gl_blend_op(new_b->op_rgb), _sg_gl_blend_op(new_b->op_alpha)); - } - if (new_b->color_write_mask != cache_b->color_write_mask) { - cache_b->color_write_mask = new_b->color_write_mask; - glColorMask((new_b->color_write_mask & SG_COLORMASK_R) != 0, - (new_b->color_write_mask & SG_COLORMASK_G) != 0, - (new_b->color_write_mask & SG_COLORMASK_B) != 0, - (new_b->color_write_mask & SG_COLORMASK_A) != 0); - } - if (!_sg_fequal(new_b->blend_color[0], cache_b->blend_color[0], 0.0001f) || - !_sg_fequal(new_b->blend_color[1], cache_b->blend_color[1], 0.0001f) || - !_sg_fequal(new_b->blend_color[2], cache_b->blend_color[2], 0.0001f) || - !_sg_fequal(new_b->blend_color[3], cache_b->blend_color[3], 0.0001f)) + const sg_stencil_state* state_ss = &pip->gl.stencil; + sg_stencil_state* cache_ss = &_sg.gl.cache.stencil; + if (state_ss->enabled != cache_ss->enabled) { + cache_ss->enabled = state_ss->enabled; + if (state_ss->enabled) { + glEnable(GL_STENCIL_TEST); + } + else { + glDisable(GL_STENCIL_TEST); + } + } + if (state_ss->write_mask != cache_ss->write_mask) { + cache_ss->write_mask = state_ss->write_mask; + glStencilMask(state_ss->write_mask); + } + for (int i = 0; i < 2; i++) { + const sg_stencil_face_state* state_sfs = (i==0)? &state_ss->front : &state_ss->back; + sg_stencil_face_state* cache_sfs = (i==0)? &cache_ss->front : &cache_ss->back; + GLenum gl_face = (i==0)? GL_FRONT : GL_BACK; + if ((state_sfs->compare != cache_sfs->compare) || + (state_ss->read_mask != cache_ss->read_mask) || + (state_ss->ref != cache_ss->ref)) + { + cache_sfs->compare = state_sfs->compare; + glStencilFuncSeparate(gl_face, + _sg_gl_compare_func(state_sfs->compare), + state_ss->ref, + state_ss->read_mask); + } + if ((state_sfs->fail_op != cache_sfs->fail_op) || + (state_sfs->depth_fail_op != cache_sfs->depth_fail_op) || + (state_sfs->pass_op != cache_sfs->pass_op)) + { + cache_sfs->fail_op = state_sfs->fail_op; + cache_sfs->depth_fail_op = state_sfs->depth_fail_op; + cache_sfs->pass_op = state_sfs->pass_op; + glStencilOpSeparate(gl_face, + _sg_gl_stencil_op(state_sfs->fail_op), + _sg_gl_stencil_op(state_sfs->depth_fail_op), + _sg_gl_stencil_op(state_sfs->pass_op)); + } + } + cache_ss->read_mask = state_ss->read_mask; + cache_ss->ref = state_ss->ref; + } + + /* update blend state + FIXME: separate blend state per color attachment not support, needs GL4 + */ { - const float* bc = new_b->blend_color; - for (int i=0; i<4; i++) { - cache_b->blend_color[i] = bc[i]; + const sg_blend_state* state_bs = &pip->gl.blend; + sg_blend_state* cache_bs = &_sg.gl.cache.blend; + if (state_bs->enabled != cache_bs->enabled) { + cache_bs->enabled = state_bs->enabled; + if (state_bs->enabled) { + glEnable(GL_BLEND); + } + else { + glDisable(GL_BLEND); + } + } + if ((state_bs->src_factor_rgb != cache_bs->src_factor_rgb) || + (state_bs->dst_factor_rgb != cache_bs->dst_factor_rgb) || + (state_bs->src_factor_alpha != cache_bs->src_factor_alpha) || + (state_bs->dst_factor_alpha != cache_bs->dst_factor_alpha)) + { + cache_bs->src_factor_rgb = state_bs->src_factor_rgb; + cache_bs->dst_factor_rgb = state_bs->dst_factor_rgb; + cache_bs->src_factor_alpha = state_bs->src_factor_alpha; + cache_bs->dst_factor_alpha = state_bs->dst_factor_alpha; + glBlendFuncSeparate(_sg_gl_blend_factor(state_bs->src_factor_rgb), + _sg_gl_blend_factor(state_bs->dst_factor_rgb), + _sg_gl_blend_factor(state_bs->src_factor_alpha), + _sg_gl_blend_factor(state_bs->dst_factor_alpha)); + } + if ((state_bs->op_rgb != cache_bs->op_rgb) || (state_bs->op_alpha != cache_bs->op_alpha)) { + cache_bs->op_rgb = state_bs->op_rgb; + cache_bs->op_alpha = state_bs->op_alpha; + glBlendEquationSeparate(_sg_gl_blend_op(state_bs->op_rgb), _sg_gl_blend_op(state_bs->op_alpha)); + } + } + + /* standalone state */ + for (GLuint i = 0; i < (GLuint)pip->cmn.color_attachment_count; i++) { + if (pip->gl.color_write_mask[i] != _sg.gl.cache.color_write_mask[i]) { + const sg_color_mask cm = pip->gl.color_write_mask[i]; + _sg.gl.cache.color_write_mask[i] = cm; + #ifdef SOKOL_GLCORE33 + glColorMaski(i, + (cm & SG_COLORMASK_R) != 0, + (cm & SG_COLORMASK_G) != 0, + (cm & SG_COLORMASK_B) != 0, + (cm & SG_COLORMASK_A) != 0); + #else + if (0 == i) { + glColorMask((cm & SG_COLORMASK_R) != 0, + (cm & SG_COLORMASK_G) != 0, + (cm & SG_COLORMASK_B) != 0, + (cm & SG_COLORMASK_A) != 0); + } + #endif } - glBlendColor(bc[0], bc[1], bc[2], bc[3]); } - /* update rasterizer state */ - const sg_rasterizer_state* new_r = &pip->gl.rast; - sg_rasterizer_state* cache_r = &_sg.gl.cache.rast; - if (new_r->cull_mode != cache_r->cull_mode) { - cache_r->cull_mode = new_r->cull_mode; - if (SG_CULLMODE_NONE == new_r->cull_mode) { + if (!_sg_fequal(pip->cmn.blend_color.r, _sg.gl.cache.blend_color.r, 0.0001f) || + !_sg_fequal(pip->cmn.blend_color.g, _sg.gl.cache.blend_color.g, 0.0001f) || + !_sg_fequal(pip->cmn.blend_color.b, _sg.gl.cache.blend_color.b, 0.0001f) || + !_sg_fequal(pip->cmn.blend_color.a, _sg.gl.cache.blend_color.a, 0.0001f)) + { + sg_color c = pip->cmn.blend_color; + _sg.gl.cache.blend_color = c; + glBlendColor(c.r, c.g, c.b, c.a); + } + if (pip->gl.cull_mode != _sg.gl.cache.cull_mode) { + _sg.gl.cache.cull_mode = pip->gl.cull_mode; + if (SG_CULLMODE_NONE == pip->gl.cull_mode) { glDisable(GL_CULL_FACE); } else { glEnable(GL_CULL_FACE); - GLenum gl_mode = (SG_CULLMODE_FRONT == new_r->cull_mode) ? GL_FRONT : GL_BACK; + GLenum gl_mode = (SG_CULLMODE_FRONT == pip->gl.cull_mode) ? GL_FRONT : GL_BACK; glCullFace(gl_mode); } } - if (new_r->face_winding != cache_r->face_winding) { - cache_r->face_winding = new_r->face_winding; - GLenum gl_winding = (SG_FACEWINDING_CW == new_r->face_winding) ? GL_CW : GL_CCW; + if (pip->gl.face_winding != _sg.gl.cache.face_winding) { + _sg.gl.cache.face_winding = pip->gl.face_winding; + GLenum gl_winding = (SG_FACEWINDING_CW == pip->gl.face_winding) ? GL_CW : GL_CCW; glFrontFace(gl_winding); } - if (new_r->alpha_to_coverage_enabled != cache_r->alpha_to_coverage_enabled) { - cache_r->alpha_to_coverage_enabled = new_r->alpha_to_coverage_enabled; - if (new_r->alpha_to_coverage_enabled) glEnable(GL_SAMPLE_ALPHA_TO_COVERAGE); - else glDisable(GL_SAMPLE_ALPHA_TO_COVERAGE); + if (pip->gl.alpha_to_coverage_enabled != _sg.gl.cache.alpha_to_coverage_enabled) { + _sg.gl.cache.alpha_to_coverage_enabled = pip->gl.alpha_to_coverage_enabled; + if (pip->gl.alpha_to_coverage_enabled) { + glEnable(GL_SAMPLE_ALPHA_TO_COVERAGE); + } + else { + glDisable(GL_SAMPLE_ALPHA_TO_COVERAGE); + } } #ifdef SOKOL_GLCORE33 - if (new_r->sample_count != cache_r->sample_count) { - cache_r->sample_count = new_r->sample_count; - if (new_r->sample_count > 1) glEnable(GL_MULTISAMPLE); - else glDisable(GL_MULTISAMPLE); - } - #endif - if (!_sg_fequal(new_r->depth_bias, cache_r->depth_bias, 0.000001f) || - !_sg_fequal(new_r->depth_bias_slope_scale, cache_r->depth_bias_slope_scale, 0.000001f)) - { - /* according to ANGLE's D3D11 backend: - D3D11 SlopeScaledDepthBias ==> GL polygonOffsetFactor - D3D11 DepthBias ==> GL polygonOffsetUnits - DepthBiasClamp has no meaning on GL - */ - cache_r->depth_bias = new_r->depth_bias; - cache_r->depth_bias_slope_scale = new_r->depth_bias_slope_scale; - glPolygonOffset(new_r->depth_bias_slope_scale, new_r->depth_bias); - bool po_enabled = true; - if (_sg_fequal(new_r->depth_bias, 0.0f, 0.000001f) && - _sg_fequal(new_r->depth_bias_slope_scale, 0.0f, 0.000001f)) - { - po_enabled = false; + if (pip->gl.sample_count != _sg.gl.cache.sample_count) { + _sg.gl.cache.sample_count = pip->gl.sample_count; + if (pip->gl.sample_count > 1) { + glEnable(GL_MULTISAMPLE); } - if (po_enabled != _sg.gl.cache.polygon_offset_enabled) { - _sg.gl.cache.polygon_offset_enabled = po_enabled; - if (po_enabled) glEnable(GL_POLYGON_OFFSET_FILL); - else glDisable(GL_POLYGON_OFFSET_FILL); + else { + glDisable(GL_MULTISAMPLE); } } + #endif /* bind shader program */ if (pip->shader->gl.prog != _sg.gl.cache.prog) { @@ -6643,6 +6766,7 @@ _SOKOL_PRIVATE void _sg_gl_apply_pipeline(_sg_pipeline_t* pip) { glUseProgram(pip->shader->gl.prog); } } + _SG_GL_CHECK_ERROR(); } _SOKOL_PRIVATE void _sg_gl_apply_bindings( @@ -6684,7 +6808,7 @@ _SOKOL_PRIVATE void _sg_gl_apply_bindings( _sg.gl.cache.cur_ib_offset = ib_offset; /* vertex attributes */ - for (uint32_t attr_index = 0; attr_index < _sg.limits.max_vertex_attrs; attr_index++) { + for (GLuint attr_index = 0; attr_index < (GLuint)_sg.limits.max_vertex_attrs; attr_index++) { _sg_gl_attr_t* attr = &pip->gl.attrs[attr_index]; _sg_gl_cache_attr_t* cache_attr = &_sg.gl.cache.attrs[attr_index]; bool cache_attr_dirty = false; @@ -6711,7 +6835,7 @@ _SOKOL_PRIVATE void _sg_gl_apply_bindings( (const GLvoid*)(GLintptr)vb_offset); #ifdef SOKOL_INSTANCING_ENABLED if (_sg.features.instancing) { - glVertexAttribDivisor(attr_index, attr->divisor); + glVertexAttribDivisor(attr_index, (GLuint)attr->divisor); } #endif cache_attr_dirty = true; @@ -6737,15 +6861,12 @@ _SOKOL_PRIVATE void _sg_gl_apply_bindings( _SG_GL_CHECK_ERROR(); } -_SOKOL_PRIVATE void _sg_gl_apply_uniforms(sg_shader_stage stage_index, int ub_index, const void* data, int num_bytes) { - _SOKOL_UNUSED(num_bytes); - SOKOL_ASSERT(data && (num_bytes > 0)); - SOKOL_ASSERT((stage_index >= 0) && ((int)stage_index < SG_NUM_SHADER_STAGES)); +_SOKOL_PRIVATE void _sg_gl_apply_uniforms(sg_shader_stage stage_index, int ub_index, const sg_range* data) { SOKOL_ASSERT(_sg.gl.cache.cur_pipeline); SOKOL_ASSERT(_sg.gl.cache.cur_pipeline->slot.id == _sg.gl.cache.cur_pipeline_id.id); SOKOL_ASSERT(_sg.gl.cache.cur_pipeline->shader->slot.id == _sg.gl.cache.cur_pipeline->cmn.shader_id.id); SOKOL_ASSERT(_sg.gl.cache.cur_pipeline->shader->cmn.stage[stage_index].num_uniform_blocks > ub_index); - SOKOL_ASSERT(_sg.gl.cache.cur_pipeline->shader->cmn.stage[stage_index].uniform_blocks[ub_index].size == num_bytes); + SOKOL_ASSERT(_sg.gl.cache.cur_pipeline->shader->cmn.stage[stage_index].uniform_blocks[ub_index].size == data->size); const _sg_gl_shader_stage_t* gl_stage = &_sg.gl.cache.cur_pipeline->shader->gl.stage[stage_index]; const _sg_gl_uniform_block_t* gl_ub = &gl_stage->uniform_blocks[ub_index]; for (int u_index = 0; u_index < gl_ub->num_uniforms; u_index++) { @@ -6754,7 +6875,7 @@ _SOKOL_PRIVATE void _sg_gl_apply_uniforms(sg_shader_stage stage_index, int ub_in if (u->gl_loc == -1) { continue; } - GLfloat* ptr = (GLfloat*) (((uint8_t*)data) + u->offset); + GLfloat* ptr = (GLfloat*) (((uint8_t*)data->ptr) + u->offset); switch (u->type) { case SG_UNIFORMTYPE_INVALID: break; @@ -6817,8 +6938,8 @@ _SOKOL_PRIVATE void _sg_gl_commit(void) { _sg_gl_cache_clear_texture_bindings(false); } -_SOKOL_PRIVATE void _sg_gl_update_buffer(_sg_buffer_t* buf, const void* data_ptr, uint32_t data_size) { - SOKOL_ASSERT(buf && data_ptr && (data_size > 0)); +_SOKOL_PRIVATE void _sg_gl_update_buffer(_sg_buffer_t* buf, const sg_range* data) { + SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); /* only one update per buffer per frame allowed */ if (++buf->cmn.active_slot >= buf->cmn.num_slots) { buf->cmn.active_slot = 0; @@ -6830,13 +6951,13 @@ _SOKOL_PRIVATE void _sg_gl_update_buffer(_sg_buffer_t* buf, const void* data_ptr _SG_GL_CHECK_ERROR(); _sg_gl_cache_store_buffer_binding(gl_tgt); _sg_gl_cache_bind_buffer(gl_tgt, gl_buf); - glBufferSubData(gl_tgt, 0, data_size, data_ptr); + glBufferSubData(gl_tgt, 0, (GLsizeiptr)data->size, data->ptr); _sg_gl_cache_restore_buffer_binding(gl_tgt); _SG_GL_CHECK_ERROR(); } -_SOKOL_PRIVATE uint32_t _sg_gl_append_buffer(_sg_buffer_t* buf, const void* data_ptr, uint32_t data_size, bool new_frame) { - SOKOL_ASSERT(buf && data_ptr && (data_size > 0)); +_SOKOL_PRIVATE int _sg_gl_append_buffer(_sg_buffer_t* buf, const sg_range* data, bool new_frame) { + SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); if (new_frame) { if (++buf->cmn.active_slot >= buf->cmn.num_slots) { buf->cmn.active_slot = 0; @@ -6849,14 +6970,14 @@ _SOKOL_PRIVATE uint32_t _sg_gl_append_buffer(_sg_buffer_t* buf, const void* data _SG_GL_CHECK_ERROR(); _sg_gl_cache_store_buffer_binding(gl_tgt); _sg_gl_cache_bind_buffer(gl_tgt, gl_buf); - glBufferSubData(gl_tgt, buf->cmn.append_pos, data_size, data_ptr); + glBufferSubData(gl_tgt, buf->cmn.append_pos, (GLsizeiptr)data->size, data->ptr); _sg_gl_cache_restore_buffer_binding(gl_tgt); _SG_GL_CHECK_ERROR(); /* NOTE: this is a requirement from WebGPU, but we want identical behaviour across all backend */ - return _sg_roundup(data_size, 4); + return _sg_roundup((int)data->size, 4); } -_SOKOL_PRIVATE void _sg_gl_update_image(_sg_image_t* img, const sg_image_content* data) { +_SOKOL_PRIVATE void _sg_gl_update_image(_sg_image_t* img, const sg_image_data* data) { SOKOL_ASSERT(img && data); /* only one update per image per frame allowed */ if (++img->cmn.active_slot >= img->cmn.num_slots) { @@ -7566,6 +7687,8 @@ _SOKOL_PRIVATE void _sg_d3d11_init_caps(void) { _sg.features.imagetype_3d = true; _sg.features.imagetype_array = true; _sg.features.image_clamp_to_border = true; + _sg.features.mrt_independent_blend_state = true; + _sg.features.mrt_independent_write_mask = true; _sg.limits.max_image_size_2d = 16 * 1024; _sg.limits.max_image_size_cube = 16 * 1024; @@ -7674,7 +7797,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_buffer(_sg_buffer_t* buf, cons else { D3D11_BUFFER_DESC d3d11_desc; memset(&d3d11_desc, 0, sizeof(d3d11_desc)); - d3d11_desc.ByteWidth = buf->cmn.size; + d3d11_desc.ByteWidth = (UINT)buf->cmn.size; d3d11_desc.Usage = _sg_d3d11_usage(buf->cmn.usage); d3d11_desc.BindFlags = buf->cmn.type == SG_BUFFERTYPE_VERTEXBUFFER ? D3D11_BIND_VERTEX_BUFFER : D3D11_BIND_INDEX_BUFFER; d3d11_desc.CPUAccessFlags = _sg_d3d11_cpu_access_flags(buf->cmn.usage); @@ -7682,8 +7805,8 @@ _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_buffer(_sg_buffer_t* buf, cons D3D11_SUBRESOURCE_DATA init_data; memset(&init_data, 0, sizeof(init_data)); if (buf->cmn.usage == SG_USAGE_IMMUTABLE) { - SOKOL_ASSERT(desc->content); - init_data.pSysMem = desc->content; + SOKOL_ASSERT(desc->data.ptr); + init_data.pSysMem = desc->data.ptr; init_data_ptr = &init_data; } HRESULT hr = _sg_d3d11_CreateBuffer(_sg.d3d11.dev, &d3d11_desc, init_data_ptr, &buf->d3d11.buf); @@ -7700,7 +7823,7 @@ _SOKOL_PRIVATE void _sg_d3d11_destroy_buffer(_sg_buffer_t* buf) { } } -_SOKOL_PRIVATE void _sg_d3d11_fill_subres_data(const _sg_image_t* img, const sg_image_content* content) { +_SOKOL_PRIVATE void _sg_d3d11_fill_subres_data(const _sg_image_t* img, const sg_image_data* data) { const int num_faces = (img->cmn.type == SG_IMAGETYPE_CUBE) ? 6:1; const int num_slices = (img->cmn.type == SG_IMAGETYPE_ARRAY) ? img->cmn.num_slices:1; int subres_index = 0; @@ -7711,15 +7834,15 @@ _SOKOL_PRIVATE void _sg_d3d11_fill_subres_data(const _sg_image_t* img, const sg_ D3D11_SUBRESOURCE_DATA* subres_data = &_sg.d3d11.subres_data[subres_index]; const int mip_width = ((img->cmn.width>>mip_index)>0) ? img->cmn.width>>mip_index : 1; const int mip_height = ((img->cmn.height>>mip_index)>0) ? img->cmn.height>>mip_index : 1; - const sg_subimage_content* subimg_content = &(content->subimage[face_index][mip_index]); - const int slice_size = subimg_content->size / num_slices; - const int slice_offset = slice_size * slice_index; - const uint8_t* ptr = (const uint8_t*) subimg_content->ptr; + const sg_range* subimg_data = &(data->subimage[face_index][mip_index]); + const size_t slice_size = subimg_data->size / (size_t)num_slices; + const size_t slice_offset = slice_size * (size_t)slice_index; + const uint8_t* ptr = (const uint8_t*) subimg_data->ptr; subres_data->pSysMem = ptr + slice_offset; - subres_data->SysMemPitch = _sg_row_pitch(img->cmn.pixel_format, mip_width, 1); + subres_data->SysMemPitch = (UINT)_sg_row_pitch(img->cmn.pixel_format, mip_width, 1); if (img->cmn.type == SG_IMAGETYPE_3D) { /* FIXME? const int mip_depth = ((img->depth>>mip_index)>0) ? img->depth>>mip_index : 1; */ - subres_data->SysMemSlicePitch = _sg_surface_pitch(img->cmn.pixel_format, mip_width, mip_height, 1); + subres_data->SysMemSlicePitch = (UINT)_sg_surface_pitch(img->cmn.pixel_format, mip_width, mip_height, 1); } else { subres_data->SysMemSlicePitch = 0; @@ -7751,15 +7874,15 @@ _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_image(_sg_image_t* img, const } D3D11_TEXTURE2D_DESC d3d11_desc; memset(&d3d11_desc, 0, sizeof(d3d11_desc)); - d3d11_desc.Width = img->cmn.width; - d3d11_desc.Height = img->cmn.height; + d3d11_desc.Width = (UINT)img->cmn.width; + d3d11_desc.Height = (UINT)img->cmn.height; d3d11_desc.MipLevels = 1; d3d11_desc.ArraySize = 1; d3d11_desc.Format = img->d3d11.format; d3d11_desc.Usage = D3D11_USAGE_DEFAULT; d3d11_desc.BindFlags = D3D11_BIND_DEPTH_STENCIL; - d3d11_desc.SampleDesc.Count = img->cmn.sample_count; - d3d11_desc.SampleDesc.Quality = msaa ? D3D11_STANDARD_MULTISAMPLE_PATTERN : 0; + d3d11_desc.SampleDesc.Count = (UINT)img->cmn.sample_count; + d3d11_desc.SampleDesc.Quality = (UINT) (msaa ? D3D11_STANDARD_MULTISAMPLE_PATTERN : 0); hr = _sg_d3d11_CreateTexture2D(_sg.d3d11.dev, &d3d11_desc, NULL, &img->d3d11.texds); SOKOL_ASSERT(SUCCEEDED(hr) && img->d3d11.texds); } @@ -7769,7 +7892,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_image(_sg_image_t* img, const /* prepare initial content pointers */ D3D11_SUBRESOURCE_DATA* init_data = 0; if (!injected && (img->cmn.usage == SG_USAGE_IMMUTABLE) && !img->cmn.render_target) { - _sg_d3d11_fill_subres_data(img, &desc->content); + _sg_d3d11_fill_subres_data(img, &desc->data); init_data = _sg.d3d11.subres_data; } if (img->cmn.type != SG_IMAGETYPE_3D) { @@ -7800,11 +7923,11 @@ _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_image(_sg_image_t* img, const if (0 == img->d3d11.tex2d) { D3D11_TEXTURE2D_DESC d3d11_tex_desc; memset(&d3d11_tex_desc, 0, sizeof(d3d11_tex_desc)); - d3d11_tex_desc.Width = img->cmn.width; - d3d11_tex_desc.Height = img->cmn.height; - d3d11_tex_desc.MipLevels = img->cmn.num_mipmaps; + d3d11_tex_desc.Width = (UINT)img->cmn.width; + d3d11_tex_desc.Height = (UINT)img->cmn.height; + d3d11_tex_desc.MipLevels = (UINT)img->cmn.num_mipmaps; switch (img->cmn.type) { - case SG_IMAGETYPE_ARRAY: d3d11_tex_desc.ArraySize = img->cmn.num_slices; break; + case SG_IMAGETYPE_ARRAY: d3d11_tex_desc.ArraySize = (UINT)img->cmn.num_slices; break; case SG_IMAGETYPE_CUBE: d3d11_tex_desc.ArraySize = 6; break; default: d3d11_tex_desc.ArraySize = 1; break; } @@ -7842,16 +7965,16 @@ _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_image(_sg_image_t* img, const switch (img->cmn.type) { case SG_IMAGETYPE_2D: d3d11_srv_desc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D; - d3d11_srv_desc.Texture2D.MipLevels = img->cmn.num_mipmaps; + d3d11_srv_desc.Texture2D.MipLevels = (UINT)img->cmn.num_mipmaps; break; case SG_IMAGETYPE_CUBE: d3d11_srv_desc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURECUBE; - d3d11_srv_desc.TextureCube.MipLevels = img->cmn.num_mipmaps; + d3d11_srv_desc.TextureCube.MipLevels = (UINT)img->cmn.num_mipmaps; break; case SG_IMAGETYPE_ARRAY: d3d11_srv_desc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2DARRAY; - d3d11_srv_desc.Texture2DArray.MipLevels = img->cmn.num_mipmaps; - d3d11_srv_desc.Texture2DArray.ArraySize = img->cmn.num_slices; + d3d11_srv_desc.Texture2DArray.MipLevels = (UINT)img->cmn.num_mipmaps; + d3d11_srv_desc.Texture2DArray.ArraySize = (UINT)img->cmn.num_slices; break; default: SOKOL_UNREACHABLE; break; @@ -7881,10 +8004,10 @@ _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_image(_sg_image_t* img, const if (0 == img->d3d11.tex3d) { D3D11_TEXTURE3D_DESC d3d11_tex_desc; memset(&d3d11_tex_desc, 0, sizeof(d3d11_tex_desc)); - d3d11_tex_desc.Width = img->cmn.width; - d3d11_tex_desc.Height = img->cmn.height; - d3d11_tex_desc.Depth = img->cmn.num_slices; - d3d11_tex_desc.MipLevels = img->cmn.num_mipmaps; + d3d11_tex_desc.Width = (UINT)img->cmn.width; + d3d11_tex_desc.Height = (UINT)img->cmn.height; + d3d11_tex_desc.Depth = (UINT)img->cmn.num_slices; + d3d11_tex_desc.MipLevels = (UINT)img->cmn.num_mipmaps; d3d11_tex_desc.BindFlags = D3D11_BIND_SHADER_RESOURCE; d3d11_tex_desc.Format = img->d3d11.format; if (img->cmn.render_target) { @@ -7912,7 +8035,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_image(_sg_image_t* img, const memset(&d3d11_srv_desc, 0, sizeof(d3d11_srv_desc)); d3d11_srv_desc.Format = img->d3d11.format; d3d11_srv_desc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE3D; - d3d11_srv_desc.Texture3D.MipLevels = img->cmn.num_mipmaps; + d3d11_srv_desc.Texture3D.MipLevels = (UINT)img->cmn.num_mipmaps; hr = _sg_d3d11_CreateShaderResourceView(_sg.d3d11.dev, (ID3D11Resource*)img->d3d11.tex3d, &d3d11_srv_desc, &img->d3d11.srv); SOKOL_ASSERT(SUCCEEDED(hr) && img->d3d11.srv); } @@ -7922,15 +8045,15 @@ _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_image(_sg_image_t* img, const if (msaa) { D3D11_TEXTURE2D_DESC d3d11_tex_desc; memset(&d3d11_tex_desc, 0, sizeof(d3d11_tex_desc)); - d3d11_tex_desc.Width = img->cmn.width; - d3d11_tex_desc.Height = img->cmn.height; + d3d11_tex_desc.Width = (UINT)img->cmn.width; + d3d11_tex_desc.Height = (UINT)img->cmn.height; d3d11_tex_desc.MipLevels = 1; d3d11_tex_desc.ArraySize = 1; d3d11_tex_desc.Format = img->d3d11.format; d3d11_tex_desc.Usage = D3D11_USAGE_DEFAULT; d3d11_tex_desc.BindFlags = D3D11_BIND_RENDER_TARGET; d3d11_tex_desc.CPUAccessFlags = 0; - d3d11_tex_desc.SampleDesc.Count = img->cmn.sample_count; + d3d11_tex_desc.SampleDesc.Count = (UINT)img->cmn.sample_count; d3d11_tex_desc.SampleDesc.Quality = (UINT)D3D11_STANDARD_MULTISAMPLE_PATTERN; hr = _sg_d3d11_CreateTexture2D(_sg.d3d11.dev, &d3d11_tex_desc, NULL, &img->d3d11.texmsaa); SOKOL_ASSERT(SUCCEEDED(hr) && img->d3d11.texmsaa); @@ -8050,8 +8173,6 @@ _SOKOL_PRIVATE ID3DBlob* _sg_d3d11_compile_shader(const sg_shader_stage_desc* st return output; } -#define _sg_d3d11_roundup(val, round_to) (((val)+((round_to)-1))&~((round_to)-1)) - _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_shader(_sg_shader_t* shd, const sg_shader_desc* desc) { SOKOL_ASSERT(shd && desc); SOKOL_ASSERT(!shd->d3d11.vs && !shd->d3d11.fs && !shd->d3d11.vs_blob); @@ -8077,7 +8198,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_shader(_sg_shader_t* shd, cons SOKOL_ASSERT(0 == d3d11_stage->cbufs[ub_index]); D3D11_BUFFER_DESC cb_desc; memset(&cb_desc, 0, sizeof(cb_desc)); - cb_desc.ByteWidth = _sg_d3d11_roundup(ub->size, 16); + cb_desc.ByteWidth = (UINT)_sg_roundup((int)ub->size, 16); cb_desc.Usage = D3D11_USAGE_DEFAULT; cb_desc.BindFlags = D3D11_BIND_CONSTANT_BUFFER; hr = _sg_d3d11_CreateBuffer(_sg.d3d11.dev, &cb_desc, NULL, &d3d11_stage->cbufs[ub_index]); @@ -8088,12 +8209,12 @@ _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_shader(_sg_shader_t* shd, cons const void* vs_ptr = 0, *fs_ptr = 0; SIZE_T vs_length = 0, fs_length = 0; ID3DBlob* vs_blob = 0, *fs_blob = 0; - if (desc->vs.byte_code && desc->fs.byte_code) { + if (desc->vs.bytecode.ptr && desc->fs.bytecode.ptr) { /* create from shader byte code */ - vs_ptr = desc->vs.byte_code; - fs_ptr = desc->fs.byte_code; - vs_length = desc->vs.byte_code_size; - fs_length = desc->fs.byte_code_size; + vs_ptr = desc->vs.bytecode.ptr; + fs_ptr = desc->fs.bytecode.ptr; + vs_length = desc->vs.bytecode.size; + fs_length = desc->fs.bytecode.size; } else { /* compile from shader source code */ @@ -8116,8 +8237,8 @@ _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_shader(_sg_shader_t* shd, cons /* need to store the vertex shader byte code, this is needed later in sg_create_pipeline */ if (vs_succeeded && fs_succeeded) { - shd->d3d11.vs_blob_length = (int)vs_length; - shd->d3d11.vs_blob = SOKOL_MALLOC((int)vs_length); + shd->d3d11.vs_blob_length = vs_length; + shd->d3d11.vs_blob = SOKOL_MALLOC((size_t)vs_length); SOKOL_ASSERT(shd->d3d11.vs_blob); memcpy(shd->d3d11.vs_blob, vs_ptr, vs_length); result = SG_RESOURCESTATE_VALID; @@ -8165,7 +8286,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_pipeline(_sg_pipeline_t* pip, _sg_pipeline_common_init(&pip->cmn, desc); pip->d3d11.index_format = _sg_d3d11_index_format(pip->cmn.index_type); pip->d3d11.topology = _sg_d3d11_primitive_topology(desc->primitive_type); - pip->d3d11.stencil_ref = desc->depth_stencil.stencil_ref; + pip->d3d11.stencil_ref = desc->stencil.ref; /* create input layout object */ HRESULT hr; @@ -8178,19 +8299,19 @@ _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_pipeline(_sg_pipeline_t* pip, if (a_desc->format == SG_VERTEXFORMAT_INVALID) { break; } - SOKOL_ASSERT((a_desc->buffer_index >= 0) && (a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS)); + SOKOL_ASSERT(a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS); const sg_buffer_layout_desc* l_desc = &desc->layout.buffers[a_desc->buffer_index]; const sg_vertex_step step_func = l_desc->step_func; const int step_rate = l_desc->step_rate; D3D11_INPUT_ELEMENT_DESC* d3d11_comp = &d3d11_comps[attr_index]; d3d11_comp->SemanticName = _sg_strptr(&shd->d3d11.attrs[attr_index].sem_name); - d3d11_comp->SemanticIndex = shd->d3d11.attrs[attr_index].sem_index; + d3d11_comp->SemanticIndex = (UINT)shd->d3d11.attrs[attr_index].sem_index; d3d11_comp->Format = _sg_d3d11_vertex_format(a_desc->format); - d3d11_comp->InputSlot = a_desc->buffer_index; - d3d11_comp->AlignedByteOffset = a_desc->offset; + d3d11_comp->InputSlot = (UINT)a_desc->buffer_index; + d3d11_comp->AlignedByteOffset = (UINT)a_desc->offset; d3d11_comp->InputSlotClass = _sg_d3d11_input_classification(step_func); if (SG_VERTEXSTEP_PER_INSTANCE == step_func) { - d3d11_comp->InstanceDataStepRate = step_rate; + d3d11_comp->InstanceDataStepRate = (UINT)step_rate; } pip->cmn.vertex_layout_valid[a_desc->buffer_index] = true; } @@ -8198,7 +8319,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_pipeline(_sg_pipeline_t* pip, if (pip->cmn.vertex_layout_valid[layout_index]) { const sg_buffer_layout_desc* l_desc = &desc->layout.buffers[layout_index]; SOKOL_ASSERT(l_desc->stride > 0); - pip->d3d11.vb_strides[layout_index] = l_desc->stride; + pip->d3d11.vb_strides[layout_index] = (UINT)l_desc->stride; } else { pip->d3d11.vb_strides[layout_index] = 0; @@ -8206,7 +8327,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_pipeline(_sg_pipeline_t* pip, } hr = _sg_d3d11_CreateInputLayout(_sg.d3d11.dev, d3d11_comps, /* pInputElementDesc */ - attr_index, /* NumElements */ + (UINT)attr_index, /* NumElements */ shd->d3d11.vs_blob, /* pShaderByteCodeWithInputSignature */ shd->d3d11.vs_blob_length, /* BytecodeLength */ &pip->d3d11.il); @@ -8216,14 +8337,14 @@ _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_pipeline(_sg_pipeline_t* pip, D3D11_RASTERIZER_DESC rs_desc; memset(&rs_desc, 0, sizeof(rs_desc)); rs_desc.FillMode = D3D11_FILL_SOLID; - rs_desc.CullMode = _sg_d3d11_cull_mode(desc->rasterizer.cull_mode); - rs_desc.FrontCounterClockwise = desc->rasterizer.face_winding == SG_FACEWINDING_CCW; + rs_desc.CullMode = _sg_d3d11_cull_mode(desc->cull_mode); + rs_desc.FrontCounterClockwise = desc->face_winding == SG_FACEWINDING_CCW; rs_desc.DepthBias = (INT) pip->cmn.depth_bias; rs_desc.DepthBiasClamp = pip->cmn.depth_bias_clamp; rs_desc.SlopeScaledDepthBias = pip->cmn.depth_bias_slope_scale; rs_desc.DepthClipEnable = TRUE; rs_desc.ScissorEnable = TRUE; - rs_desc.MultisampleEnable = desc->rasterizer.sample_count > 1; + rs_desc.MultisampleEnable = desc->sample_count > 1; rs_desc.AntialiasedLineEnable = FALSE; hr = _sg_d3d11_CreateRasterizerState(_sg.d3d11.dev, &rs_desc, &pip->d3d11.rs); SOKOL_ASSERT(SUCCEEDED(hr) && pip->d3d11.rs); @@ -8232,37 +8353,52 @@ _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_pipeline(_sg_pipeline_t* pip, D3D11_DEPTH_STENCIL_DESC dss_desc; memset(&dss_desc, 0, sizeof(dss_desc)); dss_desc.DepthEnable = TRUE; - dss_desc.DepthWriteMask = desc->depth_stencil.depth_write_enabled ? D3D11_DEPTH_WRITE_MASK_ALL : D3D11_DEPTH_WRITE_MASK_ZERO; - dss_desc.DepthFunc = _sg_d3d11_compare_func(desc->depth_stencil.depth_compare_func); - dss_desc.StencilEnable = desc->depth_stencil.stencil_enabled; - dss_desc.StencilReadMask = desc->depth_stencil.stencil_read_mask; - dss_desc.StencilWriteMask = desc->depth_stencil.stencil_write_mask; - const sg_stencil_state* sf = &desc->depth_stencil.stencil_front; + dss_desc.DepthWriteMask = desc->depth.write_enabled ? D3D11_DEPTH_WRITE_MASK_ALL : D3D11_DEPTH_WRITE_MASK_ZERO; + dss_desc.DepthFunc = _sg_d3d11_compare_func(desc->depth.compare); + dss_desc.StencilEnable = desc->stencil.enabled; + dss_desc.StencilReadMask = desc->stencil.read_mask; + dss_desc.StencilWriteMask = desc->stencil.write_mask; + const sg_stencil_face_state* sf = &desc->stencil.front; dss_desc.FrontFace.StencilFailOp = _sg_d3d11_stencil_op(sf->fail_op); dss_desc.FrontFace.StencilDepthFailOp = _sg_d3d11_stencil_op(sf->depth_fail_op); dss_desc.FrontFace.StencilPassOp = _sg_d3d11_stencil_op(sf->pass_op); - dss_desc.FrontFace.StencilFunc = _sg_d3d11_compare_func(sf->compare_func); - const sg_stencil_state* sb = &desc->depth_stencil.stencil_back; + dss_desc.FrontFace.StencilFunc = _sg_d3d11_compare_func(sf->compare); + const sg_stencil_face_state* sb = &desc->stencil.back; dss_desc.BackFace.StencilFailOp = _sg_d3d11_stencil_op(sb->fail_op); dss_desc.BackFace.StencilDepthFailOp = _sg_d3d11_stencil_op(sb->depth_fail_op); dss_desc.BackFace.StencilPassOp = _sg_d3d11_stencil_op(sb->pass_op); - dss_desc.BackFace.StencilFunc = _sg_d3d11_compare_func(sb->compare_func); + dss_desc.BackFace.StencilFunc = _sg_d3d11_compare_func(sb->compare); hr = _sg_d3d11_CreateDepthStencilState(_sg.d3d11.dev, &dss_desc, &pip->d3d11.dss); SOKOL_ASSERT(SUCCEEDED(hr) && pip->d3d11.dss); /* create blend state */ D3D11_BLEND_DESC bs_desc; memset(&bs_desc, 0, sizeof(bs_desc)); - bs_desc.AlphaToCoverageEnable = desc->rasterizer.alpha_to_coverage_enabled; - bs_desc.IndependentBlendEnable = FALSE; - bs_desc.RenderTarget[0].BlendEnable = desc->blend.enabled; - bs_desc.RenderTarget[0].SrcBlend = _sg_d3d11_blend_factor(desc->blend.src_factor_rgb); - bs_desc.RenderTarget[0].DestBlend = _sg_d3d11_blend_factor(desc->blend.dst_factor_rgb); - bs_desc.RenderTarget[0].BlendOp = _sg_d3d11_blend_op(desc->blend.op_rgb); - bs_desc.RenderTarget[0].SrcBlendAlpha = _sg_d3d11_blend_factor(desc->blend.src_factor_alpha); - bs_desc.RenderTarget[0].DestBlendAlpha = _sg_d3d11_blend_factor(desc->blend.dst_factor_alpha); - bs_desc.RenderTarget[0].BlendOpAlpha = _sg_d3d11_blend_op(desc->blend.op_alpha); - bs_desc.RenderTarget[0].RenderTargetWriteMask = _sg_d3d11_color_write_mask((sg_color_mask)desc->blend.color_write_mask); + bs_desc.AlphaToCoverageEnable = desc->alpha_to_coverage_enabled; + bs_desc.IndependentBlendEnable = TRUE; + { + int i = 0; + for (i = 0; i < desc->color_count; i++) { + const sg_blend_state* src = &desc->colors[i].blend; + D3D11_RENDER_TARGET_BLEND_DESC* dst = &bs_desc.RenderTarget[i]; + dst->BlendEnable = src->enabled; + dst->SrcBlend = _sg_d3d11_blend_factor(src->src_factor_rgb); + dst->DestBlend = _sg_d3d11_blend_factor(src->dst_factor_rgb); + dst->BlendOp = _sg_d3d11_blend_op(src->op_rgb); + dst->SrcBlendAlpha = _sg_d3d11_blend_factor(src->src_factor_alpha); + dst->DestBlendAlpha = _sg_d3d11_blend_factor(src->dst_factor_alpha); + dst->BlendOpAlpha = _sg_d3d11_blend_op(src->op_alpha); + dst->RenderTargetWriteMask = _sg_d3d11_color_write_mask(desc->colors[i].write_mask); + } + for (; i < 8; i++) { + D3D11_RENDER_TARGET_BLEND_DESC* dst = &bs_desc.RenderTarget[i]; + dst->BlendEnable = FALSE; + dst->SrcBlend = dst->SrcBlendAlpha = D3D11_BLEND_ONE; + dst->DestBlend = dst->DestBlendAlpha = D3D11_BLEND_ZERO; + dst->BlendOp = dst->BlendOpAlpha = D3D11_BLEND_OP_ADD; + dst->RenderTargetWriteMask = D3D11_COLOR_WRITE_ENABLE_ALL; + } + } hr = _sg_d3d11_CreateBlendState(_sg.d3d11.dev, &bs_desc, &pip->d3d11.bs); SOKOL_ASSERT(SUCCEEDED(hr) && pip->d3d11.bs); @@ -8293,7 +8429,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_pass(_sg_pass_t* pass, _sg_ima _sg_pass_common_init(&pass->cmn, desc); for (int i = 0; i < pass->cmn.num_color_atts; i++) { - const sg_attachment_desc* att_desc = &desc->color_attachments[i]; + const sg_pass_attachment_desc* att_desc = &desc->color_attachments[i]; _SOKOL_UNUSED(att_desc); SOKOL_ASSERT(att_desc->image.id != SG_INVALID_ID); _sg_image_t* att_img = att_images[i]; @@ -8303,7 +8439,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_pass(_sg_pass_t* pass, _sg_ima pass->d3d11.color_atts[i].image = att_img; /* create D3D11 render-target-view */ - const _sg_attachment_t* cmn_att = &pass->cmn.color_atts[i]; + const _sg_pass_attachment_t* cmn_att = &pass->cmn.color_atts[i]; SOKOL_ASSERT(0 == pass->d3d11.color_atts[i].rtv); ID3D11Resource* d3d11_res = 0; const bool is_msaa = att_img->cmn.sample_count > 1; @@ -8318,22 +8454,22 @@ _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_pass(_sg_pass_t* pass, _sg_ima else { d3d11_res = (ID3D11Resource*) att_img->d3d11.tex2d; d3d11_rtv_desc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D; - d3d11_rtv_desc.Texture2D.MipSlice = cmn_att->mip_level; + d3d11_rtv_desc.Texture2D.MipSlice = (UINT)cmn_att->mip_level; } } else if ((att_img->cmn.type == SG_IMAGETYPE_CUBE) || (att_img->cmn.type == SG_IMAGETYPE_ARRAY)) { d3d11_res = (ID3D11Resource*) att_img->d3d11.tex2d; d3d11_rtv_desc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2DARRAY; - d3d11_rtv_desc.Texture2DArray.MipSlice = cmn_att->mip_level; - d3d11_rtv_desc.Texture2DArray.FirstArraySlice = cmn_att->slice; + d3d11_rtv_desc.Texture2DArray.MipSlice = (UINT)cmn_att->mip_level; + d3d11_rtv_desc.Texture2DArray.FirstArraySlice = (UINT)cmn_att->slice; d3d11_rtv_desc.Texture2DArray.ArraySize = 1; } else { SOKOL_ASSERT(att_img->cmn.type == SG_IMAGETYPE_3D); d3d11_res = (ID3D11Resource*) att_img->d3d11.tex3d; d3d11_rtv_desc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE3D; - d3d11_rtv_desc.Texture3D.MipSlice = cmn_att->mip_level; - d3d11_rtv_desc.Texture3D.FirstWSlice = cmn_att->slice; + d3d11_rtv_desc.Texture3D.MipSlice = (UINT)cmn_att->mip_level; + d3d11_rtv_desc.Texture3D.FirstWSlice = (UINT)cmn_att->slice; d3d11_rtv_desc.Texture3D.WSize = 1; } SOKOL_ASSERT(d3d11_res); @@ -8347,7 +8483,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_pass(_sg_pass_t* pass, _sg_ima SOKOL_ASSERT(0 == pass->d3d11.ds_att.dsv); if (desc->depth_stencil_attachment.image.id != SG_INVALID_ID) { const int ds_img_index = SG_MAX_COLOR_ATTACHMENTS; - const sg_attachment_desc* att_desc = &desc->depth_stencil_attachment; + const sg_pass_attachment_desc* att_desc = &desc->depth_stencil_attachment; _SOKOL_UNUSED(att_desc); _sg_image_t* att_img = att_images[ds_img_index]; SOKOL_ASSERT(att_img && (att_img->slot.id == att_desc->image.id)); @@ -8461,7 +8597,7 @@ _SOKOL_PRIVATE void _sg_d3d11_begin_pass(_sg_pass_t* pass, const sg_pass_action* /* perform clear action */ for (int i = 0; i < _sg.d3d11.num_rtvs; i++) { if (action->colors[i].action == SG_ACTION_CLEAR) { - _sg_d3d11_ClearRenderTargetView(_sg.d3d11.ctx, _sg.d3d11.cur_rtvs[i], action->colors[i].val); + _sg_d3d11_ClearRenderTargetView(_sg.d3d11.ctx, _sg.d3d11.cur_rtvs[i], &action->colors[i].value.r); } } UINT ds_flags = 0; @@ -8472,7 +8608,7 @@ _SOKOL_PRIVATE void _sg_d3d11_begin_pass(_sg_pass_t* pass, const sg_pass_action* ds_flags |= D3D11_CLEAR_STENCIL; } if ((0 != ds_flags) && _sg.d3d11.cur_dsv) { - _sg_d3d11_ClearDepthStencilView(_sg.d3d11.ctx, _sg.d3d11.cur_dsv, ds_flags, action->depth.val, action->stencil.val); + _sg_d3d11_ClearDepthStencilView(_sg.d3d11.ctx, _sg.d3d11.cur_dsv, ds_flags, action->depth.value, action->stencil.value); } } @@ -8489,14 +8625,14 @@ _SOKOL_PRIVATE void _sg_d3d11_end_pass(void) { if (_sg.d3d11.cur_pass) { SOKOL_ASSERT(_sg.d3d11.cur_pass->slot.id == _sg.d3d11.cur_pass_id.id); for (int i = 0; i < _sg.d3d11.num_rtvs; i++) { - _sg_attachment_t* cmn_att = &_sg.d3d11.cur_pass->cmn.color_atts[i]; + _sg_pass_attachment_t* cmn_att = &_sg.d3d11.cur_pass->cmn.color_atts[i]; _sg_image_t* att_img = _sg.d3d11.cur_pass->d3d11.color_atts[i].image; SOKOL_ASSERT(att_img && (att_img->slot.id == cmn_att->image_id.id)); if (att_img->cmn.sample_count > 1) { /* FIXME: support MSAA resolve into 3D texture */ SOKOL_ASSERT(att_img->d3d11.tex2d && att_img->d3d11.texmsaa && !att_img->d3d11.tex3d); SOKOL_ASSERT(DXGI_FORMAT_UNKNOWN != att_img->d3d11.format); - UINT dst_subres = _sg_d3d11_calcsubresource(cmn_att->mip_level, cmn_att->slice, att_img->cmn.num_mipmaps); + UINT dst_subres = _sg_d3d11_calcsubresource((UINT)cmn_att->mip_level, (UINT)cmn_att->slice, (UINT)att_img->cmn.num_mipmaps); _sg_d3d11_ResolveSubresource(_sg.d3d11.ctx, (ID3D11Resource*) att_img->d3d11.tex2d, /* pDstResource */ dst_subres, /* DstSubresource */ @@ -8554,7 +8690,7 @@ _SOKOL_PRIVATE void _sg_d3d11_apply_pipeline(_sg_pipeline_t* pip) { _sg_d3d11_RSSetState(_sg.d3d11.ctx, pip->d3d11.rs); _sg_d3d11_OMSetDepthStencilState(_sg.d3d11.ctx, pip->d3d11.dss, pip->d3d11.stencil_ref); - _sg_d3d11_OMSetBlendState(_sg.d3d11.ctx, pip->d3d11.bs, pip->cmn.blend_color, 0xFFFFFFFF); + _sg_d3d11_OMSetBlendState(_sg.d3d11.ctx, pip->d3d11.bs, &pip->cmn.blend_color.r, 0xFFFFFFFF); _sg_d3d11_IASetPrimitiveTopology(_sg.d3d11.ctx, pip->d3d11.topology); _sg_d3d11_IASetInputLayout(_sg.d3d11.ctx, pip->d3d11.il); _sg_d3d11_VSSetShader(_sg.d3d11.ctx, pip->shader->d3d11.vs, NULL, 0); @@ -8586,7 +8722,7 @@ _SOKOL_PRIVATE void _sg_d3d11_apply_bindings( for (i = 0; i < num_vbs; i++) { SOKOL_ASSERT(vbs[i]->d3d11.buf); d3d11_vbs[i] = vbs[i]->d3d11.buf; - d3d11_vb_offsets[i] = vb_offsets[i]; + d3d11_vb_offsets[i] = (UINT)vb_offsets[i]; } for (; i < SG_MAX_SHADERSTAGE_BUFFERS; i++) { d3d11_vbs[i] = 0; @@ -8614,44 +8750,40 @@ _SOKOL_PRIVATE void _sg_d3d11_apply_bindings( } _sg_d3d11_IASetVertexBuffers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_BUFFERS, d3d11_vbs, pip->d3d11.vb_strides, d3d11_vb_offsets); - _sg_d3d11_IASetIndexBuffer(_sg.d3d11.ctx, d3d11_ib, pip->d3d11.index_format, ib_offset); + _sg_d3d11_IASetIndexBuffer(_sg.d3d11.ctx, d3d11_ib, pip->d3d11.index_format, (UINT)ib_offset); _sg_d3d11_VSSetShaderResources(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, d3d11_vs_srvs); _sg_d3d11_VSSetSamplers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, d3d11_vs_smps); _sg_d3d11_PSSetShaderResources(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, d3d11_fs_srvs); _sg_d3d11_PSSetSamplers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, d3d11_fs_smps); } -_SOKOL_PRIVATE void _sg_d3d11_apply_uniforms(sg_shader_stage stage_index, int ub_index, const void* data, int num_bytes) { - _SOKOL_UNUSED(num_bytes); +_SOKOL_PRIVATE void _sg_d3d11_apply_uniforms(sg_shader_stage stage_index, int ub_index, const sg_range* data) { SOKOL_ASSERT(_sg.d3d11.ctx && _sg.d3d11.in_pass); - SOKOL_ASSERT(data && (num_bytes > 0)); - SOKOL_ASSERT((stage_index >= 0) && ((int)stage_index < SG_NUM_SHADER_STAGES)); - SOKOL_ASSERT((ub_index >= 0) && (ub_index < SG_MAX_SHADERSTAGE_UBS)); SOKOL_ASSERT(_sg.d3d11.cur_pipeline && _sg.d3d11.cur_pipeline->slot.id == _sg.d3d11.cur_pipeline_id.id); SOKOL_ASSERT(_sg.d3d11.cur_pipeline->shader && _sg.d3d11.cur_pipeline->shader->slot.id == _sg.d3d11.cur_pipeline->cmn.shader_id.id); SOKOL_ASSERT(ub_index < _sg.d3d11.cur_pipeline->shader->cmn.stage[stage_index].num_uniform_blocks); - SOKOL_ASSERT(num_bytes == _sg.d3d11.cur_pipeline->shader->cmn.stage[stage_index].uniform_blocks[ub_index].size); + SOKOL_ASSERT(data->size == _sg.d3d11.cur_pipeline->shader->cmn.stage[stage_index].uniform_blocks[ub_index].size); ID3D11Buffer* cb = _sg.d3d11.cur_pipeline->shader->d3d11.stage[stage_index].cbufs[ub_index]; SOKOL_ASSERT(cb); - _sg_d3d11_UpdateSubresource(_sg.d3d11.ctx, (ID3D11Resource*)cb, 0, NULL, data, 0, 0); + _sg_d3d11_UpdateSubresource(_sg.d3d11.ctx, (ID3D11Resource*)cb, 0, NULL, data->ptr, 0, 0); } _SOKOL_PRIVATE void _sg_d3d11_draw(int base_element, int num_elements, int num_instances) { SOKOL_ASSERT(_sg.d3d11.in_pass); if (_sg.d3d11.use_indexed_draw) { if (1 == num_instances) { - _sg_d3d11_DrawIndexed(_sg.d3d11.ctx, num_elements, base_element, 0); + _sg_d3d11_DrawIndexed(_sg.d3d11.ctx, (UINT)num_elements, (UINT)base_element, 0); } else { - _sg_d3d11_DrawIndexedInstanced(_sg.d3d11.ctx, num_elements, num_instances, base_element, 0, 0); + _sg_d3d11_DrawIndexedInstanced(_sg.d3d11.ctx, (UINT)num_elements, (UINT)num_instances, (UINT)base_element, 0, 0); } } else { if (1 == num_instances) { - _sg_d3d11_Draw(_sg.d3d11.ctx, num_elements, base_element); + _sg_d3d11_Draw(_sg.d3d11.ctx, (UINT)num_elements, (UINT)base_element); } else { - _sg_d3d11_DrawInstanced(_sg.d3d11.ctx, num_elements, num_instances, base_element, 0); + _sg_d3d11_DrawInstanced(_sg.d3d11.ctx, (UINT)num_elements, (UINT)num_instances, (UINT)base_element, 0); } } } @@ -8660,20 +8792,20 @@ _SOKOL_PRIVATE void _sg_d3d11_commit(void) { SOKOL_ASSERT(!_sg.d3d11.in_pass); } -_SOKOL_PRIVATE void _sg_d3d11_update_buffer(_sg_buffer_t* buf, const void* data_ptr, uint32_t data_size) { - SOKOL_ASSERT(buf && data_ptr && (data_size > 0)); +_SOKOL_PRIVATE void _sg_d3d11_update_buffer(_sg_buffer_t* buf, const sg_range* data) { + SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); SOKOL_ASSERT(_sg.d3d11.ctx); SOKOL_ASSERT(buf->d3d11.buf); D3D11_MAPPED_SUBRESOURCE d3d11_msr; HRESULT hr = _sg_d3d11_Map(_sg.d3d11.ctx, (ID3D11Resource*)buf->d3d11.buf, 0, D3D11_MAP_WRITE_DISCARD, 0, &d3d11_msr); _SOKOL_UNUSED(hr); SOKOL_ASSERT(SUCCEEDED(hr)); - memcpy(d3d11_msr.pData, data_ptr, data_size); + memcpy(d3d11_msr.pData, data->ptr, data->size); _sg_d3d11_Unmap(_sg.d3d11.ctx, (ID3D11Resource*)buf->d3d11.buf, 0); } -_SOKOL_PRIVATE uint32_t _sg_d3d11_append_buffer(_sg_buffer_t* buf, const void* data_ptr, uint32_t data_size, bool new_frame) { - SOKOL_ASSERT(buf && data_ptr && (data_size > 0)); +_SOKOL_PRIVATE int _sg_d3d11_append_buffer(_sg_buffer_t* buf, const sg_range* data, bool new_frame) { + SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); SOKOL_ASSERT(_sg.d3d11.ctx); SOKOL_ASSERT(buf->d3d11.buf); D3D11_MAP map_type = new_frame ? D3D11_MAP_WRITE_DISCARD : D3D11_MAP_WRITE_NO_OVERWRITE; @@ -8682,13 +8814,13 @@ _SOKOL_PRIVATE uint32_t _sg_d3d11_append_buffer(_sg_buffer_t* buf, const void* d _SOKOL_UNUSED(hr); SOKOL_ASSERT(SUCCEEDED(hr)); uint8_t* dst_ptr = (uint8_t*)d3d11_msr.pData + buf->cmn.append_pos; - memcpy(dst_ptr, data_ptr, data_size); + memcpy(dst_ptr, data->ptr, data->size); _sg_d3d11_Unmap(_sg.d3d11.ctx, (ID3D11Resource*)buf->d3d11.buf, 0); /* NOTE: this is a requirement from WebGPU, but we want identical behaviour across all backend */ - return _sg_roundup(data_size, 4); + return _sg_roundup((int)data->size, 4); } -_SOKOL_PRIVATE void _sg_d3d11_update_image(_sg_image_t* img, const sg_image_content* data) { +_SOKOL_PRIVATE void _sg_d3d11_update_image(_sg_image_t* img, const sg_image_data* data) { SOKOL_ASSERT(img && data); SOKOL_ASSERT(_sg.d3d11.ctx); SOKOL_ASSERT(img->d3d11.tex2d || img->d3d11.tex3d); @@ -8702,7 +8834,7 @@ _SOKOL_PRIVATE void _sg_d3d11_update_image(_sg_image_t* img, const sg_image_cont SOKOL_ASSERT(d3d11_res); const int num_faces = (img->cmn.type == SG_IMAGETYPE_CUBE) ? 6:1; const int num_slices = (img->cmn.type == SG_IMAGETYPE_ARRAY) ? img->cmn.num_slices:1; - int subres_index = 0; + UINT subres_index = 0; HRESULT hr; _SOKOL_UNUSED(hr); D3D11_MAPPED_SUBRESOURCE d3d11_msr; @@ -8713,10 +8845,10 @@ _SOKOL_PRIVATE void _sg_d3d11_update_image(_sg_image_t* img, const sg_image_cont const int mip_width = ((img->cmn.width>>mip_index)>0) ? img->cmn.width>>mip_index : 1; const int mip_height = ((img->cmn.height>>mip_index)>0) ? img->cmn.height>>mip_index : 1; const int src_pitch = _sg_row_pitch(img->cmn.pixel_format, mip_width, 1); - const sg_subimage_content* subimg_content = &(data->subimage[face_index][mip_index]); - const int slice_size = subimg_content->size / num_slices; - const int slice_offset = slice_size * slice_index; - const uint8_t* slice_ptr = ((const uint8_t*)subimg_content->ptr) + slice_offset; + const sg_range* subimg_data = &(data->subimage[face_index][mip_index]); + const size_t slice_size = subimg_data->size / (size_t)num_slices; + const size_t slice_offset = slice_size * (size_t)slice_index; + const uint8_t* slice_ptr = ((const uint8_t*)subimg_data->ptr) + slice_offset; hr = _sg_d3d11_Map(_sg.d3d11.ctx, d3d11_res, subres_index, D3D11_MAP_WRITE_DISCARD, 0, &d3d11_msr); SOKOL_ASSERT(SUCCEEDED(hr)); /* FIXME: need to handle difference in depth-pitch for 3D textures as well! */ @@ -8728,7 +8860,7 @@ _SOKOL_PRIVATE void _sg_d3d11_update_image(_sg_image_t* img, const sg_image_cont const uint8_t* src_ptr = slice_ptr; uint8_t* dst_ptr = (uint8_t*) d3d11_msr.pData; for (int row_index = 0; row_index < mip_height; row_index++) { - memcpy(dst_ptr, src_ptr, src_pitch); + memcpy(dst_ptr, src_ptr, (size_t)src_pitch); src_ptr += src_pitch; dst_ptr += d3d11_msr.RowPitch; } @@ -8988,7 +9120,7 @@ _SOKOL_PRIVATE MTLIndexType _sg_mtl_index_type(sg_index_type t) { } } -_SOKOL_PRIVATE NSUInteger _sg_mtl_index_size(sg_index_type t) { +_SOKOL_PRIVATE int _sg_mtl_index_size(sg_index_type t) { switch (t) { case SG_INDEXTYPE_NONE: return 0; case SG_INDEXTYPE_UINT16: return 2; @@ -9087,19 +9219,19 @@ _SOKOL_PRIVATE void _sg_mtl_init_pool(const sg_desc* desc) { 2 * desc->pipeline_pool_size + desc->pass_pool_size ); - _sg.mtl.idpool.pool = [NSMutableArray arrayWithCapacity:_sg.mtl.idpool.num_slots]; + _sg.mtl.idpool.pool = [NSMutableArray arrayWithCapacity:(NSUInteger)_sg.mtl.idpool.num_slots]; _SG_OBJC_RETAIN(_sg.mtl.idpool.pool); NSNull* null = [NSNull null]; - for (uint32_t i = 0; i < _sg.mtl.idpool.num_slots; i++) { + for (int i = 0; i < _sg.mtl.idpool.num_slots; i++) { [_sg.mtl.idpool.pool addObject:null]; } - SOKOL_ASSERT([_sg.mtl.idpool.pool count] == _sg.mtl.idpool.num_slots); + SOKOL_ASSERT([_sg.mtl.idpool.pool count] == (NSUInteger)_sg.mtl.idpool.num_slots); /* a queue of currently free slot indices */ _sg.mtl.idpool.free_queue_top = 0; - _sg.mtl.idpool.free_queue = (uint32_t*)SOKOL_MALLOC(_sg.mtl.idpool.num_slots * sizeof(uint32_t)); + _sg.mtl.idpool.free_queue = (int*)SOKOL_MALLOC((size_t)_sg.mtl.idpool.num_slots * sizeof(int)); /* pool slot 0 is reserved! */ for (int i = _sg.mtl.idpool.num_slots-1; i >= 1; i--) { - _sg.mtl.idpool.free_queue[_sg.mtl.idpool.free_queue_top++] = (uint32_t)i; + _sg.mtl.idpool.free_queue[_sg.mtl.idpool.free_queue_top++] = i; } /* a circular queue which holds release items (frame index when a resource is to be released, and the resource's @@ -9107,8 +9239,8 @@ _SOKOL_PRIVATE void _sg_mtl_init_pool(const sg_desc* desc) { */ _sg.mtl.idpool.release_queue_front = 0; _sg.mtl.idpool.release_queue_back = 0; - _sg.mtl.idpool.release_queue = (_sg_mtl_release_item_t*)SOKOL_MALLOC(_sg.mtl.idpool.num_slots * sizeof(_sg_mtl_release_item_t)); - for (uint32_t i = 0; i < _sg.mtl.idpool.num_slots; i++) { + _sg.mtl.idpool.release_queue = (_sg_mtl_release_item_t*)SOKOL_MALLOC((size_t)_sg.mtl.idpool.num_slots * sizeof(_sg_mtl_release_item_t)); + for (int i = 0; i < _sg.mtl.idpool.num_slots; i++) { _sg.mtl.idpool.release_queue[i].frame_index = 0; _sg.mtl.idpool.release_queue[i].slot_index = _SG_MTL_INVALID_SLOT_INDEX; } @@ -9121,28 +9253,28 @@ _SOKOL_PRIVATE void _sg_mtl_destroy_pool(void) { } /* get a new free resource pool slot */ -_SOKOL_PRIVATE uint32_t _sg_mtl_alloc_pool_slot(void) { +_SOKOL_PRIVATE int _sg_mtl_alloc_pool_slot(void) { SOKOL_ASSERT(_sg.mtl.idpool.free_queue_top > 0); - const uint32_t slot_index = _sg.mtl.idpool.free_queue[--_sg.mtl.idpool.free_queue_top]; + const int slot_index = _sg.mtl.idpool.free_queue[--_sg.mtl.idpool.free_queue_top]; SOKOL_ASSERT((slot_index > 0) && (slot_index < _sg.mtl.idpool.num_slots)); return slot_index; } /* put a free resource pool slot back into the free-queue */ -_SOKOL_PRIVATE void _sg_mtl_free_pool_slot(uint32_t slot_index) { +_SOKOL_PRIVATE void _sg_mtl_free_pool_slot(int slot_index) { SOKOL_ASSERT(_sg.mtl.idpool.free_queue_top < _sg.mtl.idpool.num_slots); SOKOL_ASSERT((slot_index > 0) && (slot_index < _sg.mtl.idpool.num_slots)); _sg.mtl.idpool.free_queue[_sg.mtl.idpool.free_queue_top++] = slot_index; } /* add an MTLResource to the pool, return pool index or 0 if input was 'nil' */ -_SOKOL_PRIVATE uint32_t _sg_mtl_add_resource(id res) { +_SOKOL_PRIVATE int _sg_mtl_add_resource(id res) { if (nil == res) { return _SG_MTL_INVALID_SLOT_INDEX; } - const uint32_t slot_index = _sg_mtl_alloc_pool_slot(); - SOKOL_ASSERT([NSNull null] == _sg.mtl.idpool.pool[slot_index]); - _sg.mtl.idpool.pool[slot_index] = res; + const int slot_index = _sg_mtl_alloc_pool_slot(); + SOKOL_ASSERT([NSNull null] == _sg.mtl.idpool.pool[(NSUInteger)slot_index]); + _sg.mtl.idpool.pool[(NSUInteger)slot_index] = res; return slot_index; } @@ -9151,12 +9283,12 @@ _SOKOL_PRIVATE uint32_t _sg_mtl_add_resource(id res) { the special pool index 0 will be ignored (this means that a nil value was provided to _sg_mtl_add_resource() */ -_SOKOL_PRIVATE void _sg_mtl_release_resource(uint32_t frame_index, uint32_t slot_index) { +_SOKOL_PRIVATE void _sg_mtl_release_resource(uint32_t frame_index, int slot_index) { if (slot_index == _SG_MTL_INVALID_SLOT_INDEX) { return; } SOKOL_ASSERT((slot_index > 0) && (slot_index < _sg.mtl.idpool.num_slots)); - SOKOL_ASSERT([NSNull null] != _sg.mtl.idpool.pool[slot_index]); + SOKOL_ASSERT([NSNull null] != _sg.mtl.idpool.pool[(NSUInteger)slot_index]); int release_index = _sg.mtl.idpool.release_queue_front++; if (_sg.mtl.idpool.release_queue_front >= _sg.mtl.idpool.num_slots) { /* wrap-around */ @@ -9178,10 +9310,10 @@ _SOKOL_PRIVATE void _sg_mtl_garbage_collect(uint32_t frame_index) { break; } /* safe to release this resource */ - const uint32_t slot_index = _sg.mtl.idpool.release_queue[_sg.mtl.idpool.release_queue_back].slot_index; + const int slot_index = _sg.mtl.idpool.release_queue[_sg.mtl.idpool.release_queue_back].slot_index; SOKOL_ASSERT((slot_index > 0) && (slot_index < _sg.mtl.idpool.num_slots)); - SOKOL_ASSERT(_sg.mtl.idpool.pool[slot_index] != [NSNull null]); - _SG_OBJC_RELEASE_WITH_NULL(_sg.mtl.idpool.pool[slot_index]); + SOKOL_ASSERT(_sg.mtl.idpool.pool[(NSUInteger)slot_index] != [NSNull null]); + _SG_OBJC_RELEASE_WITH_NULL(_sg.mtl.idpool.pool[(NSUInteger)slot_index]); /* put the now free pool index back on the free queue */ _sg_mtl_free_pool_slot(slot_index); /* reset the release queue slot and advance the back index */ @@ -9195,8 +9327,8 @@ _SOKOL_PRIVATE void _sg_mtl_garbage_collect(uint32_t frame_index) { } } -_SOKOL_PRIVATE id _sg_mtl_id(uint32_t slot_index) { - return _sg.mtl.idpool.pool[slot_index]; +_SOKOL_PRIVATE id _sg_mtl_id(int slot_index) { + return _sg.mtl.idpool.pool[(NSUInteger)slot_index]; } _SOKOL_PRIVATE void _sg_mtl_init_sampler_cache(const sg_desc* desc) { @@ -9209,7 +9341,7 @@ _SOKOL_PRIVATE void _sg_mtl_destroy_sampler_cache(uint32_t frame_index) { SOKOL_ASSERT(_sg.mtl.sampler_cache.items); SOKOL_ASSERT(_sg.mtl.sampler_cache.num_items <= _sg.mtl.sampler_cache.capacity); for (int i = 0; i < _sg.mtl.sampler_cache.num_items; i++) { - _sg_mtl_release_resource(frame_index, (uint32_t)_sg_smpcache_sampler(&_sg.mtl.sampler_cache, i)); + _sg_mtl_release_resource(frame_index, (int)_sg_smpcache_sampler(&_sg.mtl.sampler_cache, i)); } _sg_smpcache_discard(&_sg.mtl.sampler_cache); } @@ -9218,12 +9350,12 @@ _SOKOL_PRIVATE void _sg_mtl_destroy_sampler_cache(uint32_t frame_index) { create and add an MTLSamplerStateObject and return its resource pool index, reuse identical sampler state if one exists */ -_SOKOL_PRIVATE uint32_t _sg_mtl_create_sampler(id mtl_device, const sg_image_desc* img_desc) { +_SOKOL_PRIVATE int _sg_mtl_create_sampler(id mtl_device, const sg_image_desc* img_desc) { SOKOL_ASSERT(img_desc); int index = _sg_smpcache_find_item(&_sg.mtl.sampler_cache, img_desc); if (index >= 0) { /* reuse existing sampler */ - return (uint32_t) _sg_smpcache_sampler(&_sg.mtl.sampler_cache, index); + return (int)_sg_smpcache_sampler(&_sg.mtl.sampler_cache, index); } else { /* create a new Metal sampler state object and add to sampler cache */ @@ -9245,8 +9377,8 @@ _SOKOL_PRIVATE uint32_t _sg_mtl_create_sampler(id mtl_device, const s mtl_desc.normalizedCoordinates = YES; id mtl_sampler = [mtl_device newSamplerStateWithDescriptor:mtl_desc]; _SG_OBJC_RELEASE(mtl_desc); - uint32_t sampler_handle = _sg_mtl_add_resource(mtl_sampler); - _sg_smpcache_add_item(&_sg.mtl.sampler_cache, img_desc, sampler_handle); + int sampler_handle = _sg_mtl_add_resource(mtl_sampler); + _sg_smpcache_add_item(&_sg.mtl.sampler_cache, img_desc, (uintptr_t)sampler_handle); return sampler_handle; } } @@ -9277,6 +9409,8 @@ _SOKOL_PRIVATE void _sg_mtl_init_caps(void) { #else _sg.features.image_clamp_to_border = false; #endif + _sg.features.mrt_independent_blend_state = true; + _sg.features.mrt_independent_write_mask = true; #if defined(_SG_TARGET_MACOS) _sg.limits.max_image_size_2d = 16 * 1024; @@ -9421,7 +9555,7 @@ _SOKOL_PRIVATE void _sg_mtl_setup_backend(const sg_desc* desc) { #endif for (int i = 0; i < SG_NUM_INFLIGHT_FRAMES; i++) { _sg.mtl.uniform_buffers[i] = [_sg.mtl.device - newBufferWithLength:_sg.mtl.ub_size + newBufferWithLength:(NSUInteger)_sg.mtl.ub_size options:res_opts ]; } @@ -9460,11 +9594,11 @@ _SOKOL_PRIVATE void _sg_mtl_bind_uniform_buffers(void) { [_sg.mtl.cmd_encoder setVertexBuffer:_sg.mtl.uniform_buffers[_sg.mtl.cur_frame_rotate_index] offset:0 - atIndex:slot]; + atIndex:(NSUInteger)slot]; [_sg.mtl.cmd_encoder setFragmentBuffer:_sg.mtl.uniform_buffers[_sg.mtl.cur_frame_rotate_index] offset:0 - atIndex:slot]; + atIndex:(NSUInteger)slot]; } } @@ -9509,11 +9643,11 @@ _SOKOL_PRIVATE sg_resource_state _sg_mtl_create_buffer(_sg_buffer_t* buf, const } else { if (buf->cmn.usage == SG_USAGE_IMMUTABLE) { - SOKOL_ASSERT(desc->content); - mtl_buf = [_sg.mtl.device newBufferWithBytes:desc->content length:buf->cmn.size options:mtl_options]; + SOKOL_ASSERT(desc->data.ptr); + mtl_buf = [_sg.mtl.device newBufferWithBytes:desc->data.ptr length:(NSUInteger)buf->cmn.size options:mtl_options]; } else { - mtl_buf = [_sg.mtl.device newBufferWithLength:buf->cmn.size options:mtl_options]; + mtl_buf = [_sg.mtl.device newBufferWithLength:(NSUInteger)buf->cmn.size options:mtl_options]; } } buf->mtl.buf[slot] = _sg_mtl_add_resource(mtl_buf); @@ -9529,14 +9663,14 @@ _SOKOL_PRIVATE void _sg_mtl_destroy_buffer(_sg_buffer_t* buf) { } } -_SOKOL_PRIVATE void _sg_mtl_copy_image_content(const _sg_image_t* img, __unsafe_unretained id mtl_tex, const sg_image_content* content) { +_SOKOL_PRIVATE void _sg_mtl_copy_image_data(const _sg_image_t* img, __unsafe_unretained id mtl_tex, const sg_image_data* data) { const int num_faces = (img->cmn.type == SG_IMAGETYPE_CUBE) ? 6:1; const int num_slices = (img->cmn.type == SG_IMAGETYPE_ARRAY) ? img->cmn.num_slices : 1; for (int face_index = 0; face_index < num_faces; face_index++) { for (int mip_index = 0; mip_index < img->cmn.num_mipmaps; mip_index++) { - SOKOL_ASSERT(content->subimage[face_index][mip_index].ptr); - SOKOL_ASSERT(content->subimage[face_index][mip_index].size > 0); - const uint8_t* data_ptr = (const uint8_t*)content->subimage[face_index][mip_index].ptr; + SOKOL_ASSERT(data->subimage[face_index][mip_index].ptr); + SOKOL_ASSERT(data->subimage[face_index][mip_index].size > 0); + const uint8_t* data_ptr = (const uint8_t*)data->subimage[face_index][mip_index].ptr; const int mip_width = _sg_max(img->cmn.width >> mip_index, 1); const int mip_height = _sg_max(img->cmn.height >> mip_index, 1); /* special case PVRTC formats: bytePerRow must be 0 */ @@ -9548,23 +9682,23 @@ _SOKOL_PRIVATE void _sg_mtl_copy_image_content(const _sg_image_t* img, __unsafe_ MTLRegion region; if (img->cmn.type == SG_IMAGETYPE_3D) { const int mip_depth = _sg_max(img->cmn.num_slices >> mip_index, 1); - region = MTLRegionMake3D(0, 0, 0, mip_width, mip_height, mip_depth); + region = MTLRegionMake3D(0, 0, 0, (NSUInteger)mip_width, (NSUInteger)mip_height, (NSUInteger)mip_depth); /* FIXME: apparently the minimal bytes_per_image size for 3D texture is 4 KByte... somehow need to handle this */ } else { - region = MTLRegionMake2D(0, 0, mip_width, mip_height); + region = MTLRegionMake2D(0, 0, (NSUInteger)mip_width, (NSUInteger)mip_height); } for (int slice_index = 0; slice_index < num_slices; slice_index++) { const int mtl_slice_index = (img->cmn.type == SG_IMAGETYPE_CUBE) ? face_index : slice_index; const int slice_offset = slice_index * bytes_per_slice; - SOKOL_ASSERT((slice_offset + bytes_per_slice) <= (int)content->subimage[face_index][mip_index].size); + SOKOL_ASSERT((slice_offset + bytes_per_slice) <= (int)data->subimage[face_index][mip_index].size); [mtl_tex replaceRegion:region - mipmapLevel:mip_index - slice:mtl_slice_index + mipmapLevel:(NSUInteger)mip_index + slice:(NSUInteger)mtl_slice_index withBytes:data_ptr + slice_offset - bytesPerRow:bytes_per_row - bytesPerImage:bytes_per_slice]; + bytesPerRow:(NSUInteger)bytes_per_row + bytesPerImage:(NSUInteger)bytes_per_slice]; } } } @@ -9593,17 +9727,17 @@ _SOKOL_PRIVATE bool _sg_mtl_init_texdesc_common(MTLTextureDescriptor* mtl_desc, SOKOL_LOG("Unsupported texture pixel format!\n"); return false; } - mtl_desc.width = img->cmn.width; - mtl_desc.height = img->cmn.height; + mtl_desc.width = (NSUInteger)img->cmn.width; + mtl_desc.height = (NSUInteger)img->cmn.height; if (SG_IMAGETYPE_3D == img->cmn.type) { - mtl_desc.depth = img->cmn.num_slices; + mtl_desc.depth = (NSUInteger)img->cmn.num_slices; } else { mtl_desc.depth = 1; } - mtl_desc.mipmapLevelCount = img->cmn.num_mipmaps; + mtl_desc.mipmapLevelCount = (NSUInteger)img->cmn.num_mipmaps; if (SG_IMAGETYPE_ARRAY == img->cmn.type) { - mtl_desc.arrayLength = img->cmn.num_slices; + mtl_desc.arrayLength = (NSUInteger)img->cmn.num_slices; } else { mtl_desc.arrayLength = 1; @@ -9651,7 +9785,7 @@ _SOKOL_PRIVATE void _sg_mtl_init_texdesc_rt_msaa(MTLTextureDescriptor* mtl_desc, mtl_desc.depth = 1; mtl_desc.arrayLength = 1; mtl_desc.mipmapLevelCount = 1; - mtl_desc.sampleCount = img->cmn.sample_count; + mtl_desc.sampleCount = (NSUInteger)img->cmn.sample_count; } _SOKOL_PRIVATE sg_resource_state _sg_mtl_create_image(_sg_image_t* img, const sg_image_desc* desc) { @@ -9713,7 +9847,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_mtl_create_image(_sg_image_t* img, const sg else { tex = [_sg.mtl.device newTextureWithDescriptor:mtl_desc]; if ((img->cmn.usage == SG_USAGE_IMMUTABLE) && !img->cmn.render_target) { - _sg_mtl_copy_image_content(img, tex, &desc->content); + _sg_mtl_copy_image_data(img, tex, &desc->data); } } img->mtl.tex[slot] = _sg_mtl_add_resource(tex); @@ -9757,7 +9891,7 @@ _SOKOL_PRIVATE id _sg_mtl_compile_library(const char* src) { return lib; } -_SOKOL_PRIVATE id _sg_mtl_library_from_bytecode(const uint8_t* ptr, int num_bytes) { +_SOKOL_PRIVATE id _sg_mtl_library_from_bytecode(const void* ptr, size_t num_bytes) { NSError* err = NULL; dispatch_data_t lib_data = dispatch_data_create(ptr, num_bytes, NULL, DISPATCH_DATA_DESTRUCTOR_DEFAULT); id lib = [_sg.mtl.device newLibraryWithData:lib_data error:&err]; @@ -9780,10 +9914,10 @@ _SOKOL_PRIVATE sg_resource_state _sg_mtl_create_shader(_sg_shader_t* shd, const id fs_func; const char* vs_entry = desc->vs.entry; const char* fs_entry = desc->fs.entry; - if (desc->vs.byte_code && desc->fs.byte_code) { + if (desc->vs.bytecode.ptr && desc->fs.bytecode.ptr) { /* separate byte code provided */ - vs_lib = _sg_mtl_library_from_bytecode(desc->vs.byte_code, desc->vs.byte_code_size); - fs_lib = _sg_mtl_library_from_bytecode(desc->fs.byte_code, desc->fs.byte_code_size); + vs_lib = _sg_mtl_library_from_bytecode(desc->vs.bytecode.ptr, desc->vs.bytecode.size); + fs_lib = _sg_mtl_library_from_bytecode(desc->fs.bytecode.ptr, desc->fs.bytecode.size); if (nil == vs_lib || nil == fs_lib) { return SG_RESOURCESTATE_FAILED; } @@ -9841,31 +9975,31 @@ _SOKOL_PRIVATE sg_resource_state _sg_mtl_create_pipeline(_sg_pipeline_t* pip, _s if (SG_INDEXTYPE_NONE != pip->cmn.index_type) { pip->mtl.index_type = _sg_mtl_index_type(pip->cmn.index_type); } - pip->mtl.cull_mode = _sg_mtl_cull_mode(desc->rasterizer.cull_mode); - pip->mtl.winding = _sg_mtl_winding(desc->rasterizer.face_winding); - pip->mtl.stencil_ref = desc->depth_stencil.stencil_ref; + pip->mtl.cull_mode = _sg_mtl_cull_mode(desc->cull_mode); + pip->mtl.winding = _sg_mtl_winding(desc->face_winding); + pip->mtl.stencil_ref = desc->stencil.ref; /* create vertex-descriptor */ MTLVertexDescriptor* vtx_desc = [MTLVertexDescriptor vertexDescriptor]; - for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { + for (NSUInteger attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { const sg_vertex_attr_desc* a_desc = &desc->layout.attrs[attr_index]; if (a_desc->format == SG_VERTEXFORMAT_INVALID) { break; } - SOKOL_ASSERT((a_desc->buffer_index >= 0) && (a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS)); + SOKOL_ASSERT(a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS); vtx_desc.attributes[attr_index].format = _sg_mtl_vertex_format(a_desc->format); - vtx_desc.attributes[attr_index].offset = a_desc->offset; - vtx_desc.attributes[attr_index].bufferIndex = a_desc->buffer_index + SG_MAX_SHADERSTAGE_UBS; + vtx_desc.attributes[attr_index].offset = (NSUInteger)a_desc->offset; + vtx_desc.attributes[attr_index].bufferIndex = (NSUInteger)(a_desc->buffer_index + SG_MAX_SHADERSTAGE_UBS); pip->cmn.vertex_layout_valid[a_desc->buffer_index] = true; } - for (int layout_index = 0; layout_index < SG_MAX_SHADERSTAGE_BUFFERS; layout_index++) { + for (NSUInteger layout_index = 0; layout_index < SG_MAX_SHADERSTAGE_BUFFERS; layout_index++) { if (pip->cmn.vertex_layout_valid[layout_index]) { const sg_buffer_layout_desc* l_desc = &desc->layout.buffers[layout_index]; - const int mtl_vb_slot = layout_index + SG_MAX_SHADERSTAGE_UBS; + const NSUInteger mtl_vb_slot = layout_index + SG_MAX_SHADERSTAGE_UBS; SOKOL_ASSERT(l_desc->stride > 0); - vtx_desc.layouts[mtl_vb_slot].stride = l_desc->stride; + vtx_desc.layouts[mtl_vb_slot].stride = (NSUInteger)l_desc->stride; vtx_desc.layouts[mtl_vb_slot].stepFunction = _sg_mtl_step_function(l_desc->step_func); - vtx_desc.layouts[mtl_vb_slot].stepRate = l_desc->step_rate; + vtx_desc.layouts[mtl_vb_slot].stepRate = (NSUInteger)l_desc->step_rate; } } @@ -9876,13 +10010,13 @@ _SOKOL_PRIVATE sg_resource_state _sg_mtl_create_pipeline(_sg_pipeline_t* pip, _s rp_desc.vertexFunction = _sg_mtl_id(shd->mtl.stage[SG_SHADERSTAGE_VS].mtl_func); SOKOL_ASSERT(shd->mtl.stage[SG_SHADERSTAGE_FS].mtl_func != _SG_MTL_INVALID_SLOT_INDEX); rp_desc.fragmentFunction = _sg_mtl_id(shd->mtl.stage[SG_SHADERSTAGE_FS].mtl_func); - rp_desc.sampleCount = desc->rasterizer.sample_count; - rp_desc.alphaToCoverageEnabled = desc->rasterizer.alpha_to_coverage_enabled; + rp_desc.sampleCount = (NSUInteger)desc->sample_count; + rp_desc.alphaToCoverageEnabled = desc->alpha_to_coverage_enabled; rp_desc.alphaToOneEnabled = NO; rp_desc.rasterizationEnabled = YES; - rp_desc.depthAttachmentPixelFormat = _sg_mtl_pixel_format(desc->blend.depth_format); - if (desc->blend.depth_format == SG_PIXELFORMAT_DEPTH_STENCIL) { - rp_desc.stencilAttachmentPixelFormat = _sg_mtl_pixel_format(desc->blend.depth_format); + rp_desc.depthAttachmentPixelFormat = _sg_mtl_pixel_format(desc->depth.pixel_format); + if (desc->depth.pixel_format == SG_PIXELFORMAT_DEPTH_STENCIL) { + rp_desc.stencilAttachmentPixelFormat = _sg_mtl_pixel_format(desc->depth.pixel_format); } /* FIXME: this only works on macOS 10.13! for (int i = 0; i < (SG_MAX_SHADERSTAGE_UBS+SG_MAX_SHADERSTAGE_BUFFERS); i++) { @@ -9892,17 +10026,18 @@ _SOKOL_PRIVATE sg_resource_state _sg_mtl_create_pipeline(_sg_pipeline_t* pip, _s rp_desc.fragmentBuffers[i].mutability = MTLMutabilityImmutable; } */ - const int att_count = desc->blend.color_attachment_count; - for (int i = 0; i < att_count; i++) { - rp_desc.colorAttachments[i].pixelFormat = _sg_mtl_pixel_format(desc->blend.color_format); - rp_desc.colorAttachments[i].writeMask = _sg_mtl_color_write_mask((sg_color_mask)desc->blend.color_write_mask); - rp_desc.colorAttachments[i].blendingEnabled = desc->blend.enabled; - rp_desc.colorAttachments[i].alphaBlendOperation = _sg_mtl_blend_op(desc->blend.op_alpha); - rp_desc.colorAttachments[i].rgbBlendOperation = _sg_mtl_blend_op(desc->blend.op_rgb); - rp_desc.colorAttachments[i].destinationAlphaBlendFactor = _sg_mtl_blend_factor(desc->blend.dst_factor_alpha); - rp_desc.colorAttachments[i].destinationRGBBlendFactor = _sg_mtl_blend_factor(desc->blend.dst_factor_rgb); - rp_desc.colorAttachments[i].sourceAlphaBlendFactor = _sg_mtl_blend_factor(desc->blend.src_factor_alpha); - rp_desc.colorAttachments[i].sourceRGBBlendFactor = _sg_mtl_blend_factor(desc->blend.src_factor_rgb); + for (NSUInteger i = 0; i < (NSUInteger)desc->color_count; i++) { + SOKOL_ASSERT(i < SG_MAX_COLOR_ATTACHMENTS); + const sg_color_state* cs = &desc->colors[i]; + rp_desc.colorAttachments[i].pixelFormat = _sg_mtl_pixel_format(cs->pixel_format); + rp_desc.colorAttachments[i].writeMask = _sg_mtl_color_write_mask(cs->write_mask); + rp_desc.colorAttachments[i].blendingEnabled = cs->blend.enabled; + rp_desc.colorAttachments[i].alphaBlendOperation = _sg_mtl_blend_op(cs->blend.op_alpha); + rp_desc.colorAttachments[i].rgbBlendOperation = _sg_mtl_blend_op(cs->blend.op_rgb); + rp_desc.colorAttachments[i].destinationAlphaBlendFactor = _sg_mtl_blend_factor(cs->blend.dst_factor_alpha); + rp_desc.colorAttachments[i].destinationRGBBlendFactor = _sg_mtl_blend_factor(cs->blend.dst_factor_rgb); + rp_desc.colorAttachments[i].sourceAlphaBlendFactor = _sg_mtl_blend_factor(cs->blend.src_factor_alpha); + rp_desc.colorAttachments[i].sourceRGBBlendFactor = _sg_mtl_blend_factor(cs->blend.src_factor_rgb); } NSError* err = NULL; id mtl_rps = [_sg.mtl.device newRenderPipelineStateWithDescriptor:rp_desc error:&err]; @@ -9915,25 +10050,25 @@ _SOKOL_PRIVATE sg_resource_state _sg_mtl_create_pipeline(_sg_pipeline_t* pip, _s /* depth-stencil-state */ MTLDepthStencilDescriptor* ds_desc = [[MTLDepthStencilDescriptor alloc] init]; - ds_desc.depthCompareFunction = _sg_mtl_compare_func(desc->depth_stencil.depth_compare_func); - ds_desc.depthWriteEnabled = desc->depth_stencil.depth_write_enabled; - if (desc->depth_stencil.stencil_enabled) { - const sg_stencil_state* sb = &desc->depth_stencil.stencil_back; + ds_desc.depthCompareFunction = _sg_mtl_compare_func(desc->depth.compare); + ds_desc.depthWriteEnabled = desc->depth.write_enabled; + if (desc->stencil.enabled) { + const sg_stencil_face_state* sb = &desc->stencil.back; ds_desc.backFaceStencil = [[MTLStencilDescriptor alloc] init]; ds_desc.backFaceStencil.stencilFailureOperation = _sg_mtl_stencil_op(sb->fail_op); ds_desc.backFaceStencil.depthFailureOperation = _sg_mtl_stencil_op(sb->depth_fail_op); ds_desc.backFaceStencil.depthStencilPassOperation = _sg_mtl_stencil_op(sb->pass_op); - ds_desc.backFaceStencil.stencilCompareFunction = _sg_mtl_compare_func(sb->compare_func); - ds_desc.backFaceStencil.readMask = desc->depth_stencil.stencil_read_mask; - ds_desc.backFaceStencil.writeMask = desc->depth_stencil.stencil_write_mask; - const sg_stencil_state* sf = &desc->depth_stencil.stencil_front; + ds_desc.backFaceStencil.stencilCompareFunction = _sg_mtl_compare_func(sb->compare); + ds_desc.backFaceStencil.readMask = desc->stencil.read_mask; + ds_desc.backFaceStencil.writeMask = desc->stencil.write_mask; + const sg_stencil_face_state* sf = &desc->stencil.front; ds_desc.frontFaceStencil = [[MTLStencilDescriptor alloc] init]; ds_desc.frontFaceStencil.stencilFailureOperation = _sg_mtl_stencil_op(sf->fail_op); ds_desc.frontFaceStencil.depthFailureOperation = _sg_mtl_stencil_op(sf->depth_fail_op); ds_desc.frontFaceStencil.depthStencilPassOperation = _sg_mtl_stencil_op(sf->pass_op); - ds_desc.frontFaceStencil.stencilCompareFunction = _sg_mtl_compare_func(sf->compare_func); - ds_desc.frontFaceStencil.readMask = desc->depth_stencil.stencil_read_mask; - ds_desc.frontFaceStencil.writeMask = desc->depth_stencil.stencil_write_mask; + ds_desc.frontFaceStencil.stencilCompareFunction = _sg_mtl_compare_func(sf->compare); + ds_desc.frontFaceStencil.readMask = desc->stencil.read_mask; + ds_desc.frontFaceStencil.writeMask = desc->stencil.write_mask; } id mtl_dss = [_sg.mtl.device newDepthStencilStateWithDescriptor:ds_desc]; _SG_OBJC_RELEASE(ds_desc); @@ -9956,7 +10091,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_mtl_create_pass(_sg_pass_t* pass, _sg_image _sg_pass_common_init(&pass->cmn, desc); /* copy image pointers */ - const sg_attachment_desc* att_desc; + const sg_pass_attachment_desc* att_desc; for (int i = 0; i < pass->cmn.num_color_atts; i++) { att_desc = &desc->color_attachments[i]; if (att_desc->image.id != SG_INVALID_ID) { @@ -10046,8 +10181,8 @@ _SOKOL_PRIVATE void _sg_mtl_begin_pass(_sg_pass_t* pass, const sg_pass_action* a if (pass) { /* setup pass descriptor for offscreen rendering */ SOKOL_ASSERT(pass->slot.state == SG_RESOURCESTATE_VALID); - for (int i = 0; i < pass->cmn.num_color_atts; i++) { - const _sg_attachment_t* cmn_att = &pass->cmn.color_atts[i]; + for (NSUInteger i = 0; i < (NSUInteger)pass->cmn.num_color_atts; i++) { + const _sg_pass_attachment_t* cmn_att = &pass->cmn.color_atts[i]; const _sg_mtl_attachment_t* mtl_att = &pass->mtl.color_atts[i]; const _sg_image_t* att_img = mtl_att->image; SOKOL_ASSERT(att_img->slot.state == SG_RESOURCESTATE_VALID); @@ -10055,21 +10190,21 @@ _SOKOL_PRIVATE void _sg_mtl_begin_pass(_sg_pass_t* pass, const sg_pass_action* a const bool is_msaa = (att_img->cmn.sample_count > 1); pass_desc.colorAttachments[i].loadAction = _sg_mtl_load_action(action->colors[i].action); pass_desc.colorAttachments[i].storeAction = is_msaa ? MTLStoreActionMultisampleResolve : MTLStoreActionStore; - const float* c = &(action->colors[i].val[0]); - pass_desc.colorAttachments[i].clearColor = MTLClearColorMake(c[0], c[1], c[2], c[3]); + sg_color c = action->colors[i].value; + pass_desc.colorAttachments[i].clearColor = MTLClearColorMake(c.r, c.g, c.b, c.a); if (is_msaa) { SOKOL_ASSERT(att_img->mtl.msaa_tex != _SG_MTL_INVALID_SLOT_INDEX); SOKOL_ASSERT(att_img->mtl.tex[mtl_att->image->cmn.active_slot] != _SG_MTL_INVALID_SLOT_INDEX); pass_desc.colorAttachments[i].texture = _sg_mtl_id(att_img->mtl.msaa_tex); pass_desc.colorAttachments[i].resolveTexture = _sg_mtl_id(att_img->mtl.tex[att_img->cmn.active_slot]); - pass_desc.colorAttachments[i].resolveLevel = cmn_att->mip_level; + pass_desc.colorAttachments[i].resolveLevel = (NSUInteger)cmn_att->mip_level; switch (att_img->cmn.type) { case SG_IMAGETYPE_CUBE: case SG_IMAGETYPE_ARRAY: - pass_desc.colorAttachments[i].resolveSlice = cmn_att->slice; + pass_desc.colorAttachments[i].resolveSlice = (NSUInteger)cmn_att->slice; break; case SG_IMAGETYPE_3D: - pass_desc.colorAttachments[i].resolveDepthPlane = cmn_att->slice; + pass_desc.colorAttachments[i].resolveDepthPlane = (NSUInteger)cmn_att->slice; break; default: break; } @@ -10077,14 +10212,14 @@ _SOKOL_PRIVATE void _sg_mtl_begin_pass(_sg_pass_t* pass, const sg_pass_action* a else { SOKOL_ASSERT(att_img->mtl.tex[att_img->cmn.active_slot] != _SG_MTL_INVALID_SLOT_INDEX); pass_desc.colorAttachments[i].texture = _sg_mtl_id(att_img->mtl.tex[att_img->cmn.active_slot]); - pass_desc.colorAttachments[i].level = cmn_att->mip_level; + pass_desc.colorAttachments[i].level = (NSUInteger)cmn_att->mip_level; switch (att_img->cmn.type) { case SG_IMAGETYPE_CUBE: case SG_IMAGETYPE_ARRAY: - pass_desc.colorAttachments[i].slice = cmn_att->slice; + pass_desc.colorAttachments[i].slice = (NSUInteger)cmn_att->slice; break; case SG_IMAGETYPE_3D: - pass_desc.colorAttachments[i].depthPlane = cmn_att->slice; + pass_desc.colorAttachments[i].depthPlane = (NSUInteger)cmn_att->slice; break; default: break; } @@ -10097,23 +10232,23 @@ _SOKOL_PRIVATE void _sg_mtl_begin_pass(_sg_pass_t* pass, const sg_pass_action* a SOKOL_ASSERT(ds_att_img->mtl.depth_tex != _SG_MTL_INVALID_SLOT_INDEX); pass_desc.depthAttachment.texture = _sg_mtl_id(ds_att_img->mtl.depth_tex); pass_desc.depthAttachment.loadAction = _sg_mtl_load_action(action->depth.action); - pass_desc.depthAttachment.clearDepth = action->depth.val; + pass_desc.depthAttachment.clearDepth = action->depth.value; if (_sg_is_depth_stencil_format(ds_att_img->cmn.pixel_format)) { pass_desc.stencilAttachment.texture = _sg_mtl_id(ds_att_img->mtl.depth_tex); pass_desc.stencilAttachment.loadAction = _sg_mtl_load_action(action->stencil.action); - pass_desc.stencilAttachment.clearStencil = action->stencil.val; + pass_desc.stencilAttachment.clearStencil = action->stencil.value; } } } else { /* setup pass descriptor for default rendering */ pass_desc.colorAttachments[0].loadAction = _sg_mtl_load_action(action->colors[0].action); - const float* c = &(action->colors[0].val[0]); - pass_desc.colorAttachments[0].clearColor = MTLClearColorMake(c[0], c[1], c[2], c[3]); + sg_color c = action->colors[0].value; + pass_desc.colorAttachments[0].clearColor = MTLClearColorMake(c.r, c.g, c.b, c.a); pass_desc.depthAttachment.loadAction = _sg_mtl_load_action(action->depth.action); - pass_desc.depthAttachment.clearDepth = action->depth.val; + pass_desc.depthAttachment.clearDepth = action->depth.value; pass_desc.stencilAttachment.loadAction = _sg_mtl_load_action(action->stencil.action); - pass_desc.stencilAttachment.clearStencil = action->stencil.val; + pass_desc.stencilAttachment.clearStencil = action->stencil.value; } /* create a render command encoder, this might return nil if window is minimized */ @@ -10146,7 +10281,7 @@ _SOKOL_PRIVATE void _sg_mtl_commit(void) { SOKOL_ASSERT(nil != _sg.mtl.cmd_buffer); #if defined(_SG_TARGET_MACOS) - [_sg.mtl.uniform_buffers[_sg.mtl.cur_frame_rotate_index] didModifyRange:NSMakeRange(0, _sg.mtl.cur_ub_offset)]; + [_sg.mtl.uniform_buffers[_sg.mtl.cur_frame_rotate_index] didModifyRange:NSMakeRange(0, (NSUInteger)_sg.mtl.cur_ub_offset)]; #endif /* present, commit and signal semaphore when done */ @@ -10213,10 +10348,10 @@ _SOKOL_PRIVATE void _sg_mtl_apply_scissor_rect(int x, int y, int w, int h, bool h = _sg_max(h, 1); MTLScissorRect r; - r.x = x; - r.y = origin_top_left ? y : (_sg.mtl.cur_height - (y + h)); - r.width = w; - r.height = h; + r.x = (NSUInteger)x; + r.y = (NSUInteger) (origin_top_left ? y : (_sg.mtl.cur_height - (y + h))); + r.width = (NSUInteger)w; + r.height = (NSUInteger)h; [_sg.mtl.cmd_encoder setScissorRect:r]; } @@ -10232,8 +10367,8 @@ _SOKOL_PRIVATE void _sg_mtl_apply_pipeline(_sg_pipeline_t* pip) { if ((_sg.mtl.state_cache.cur_pipeline != pip) || (_sg.mtl.state_cache.cur_pipeline_id.id != pip->slot.id)) { _sg.mtl.state_cache.cur_pipeline = pip; _sg.mtl.state_cache.cur_pipeline_id.id = pip->slot.id; - const float* c = pip->cmn.blend_color; - [_sg.mtl.cmd_encoder setBlendColorRed:c[0] green:c[1] blue:c[2] alpha:c[3]]; + sg_color c = pip->cmn.blend_color; + [_sg.mtl.cmd_encoder setBlendColorRed:c.r green:c.g blue:c.b alpha:c.a]; [_sg.mtl.cmd_encoder setCullMode:pip->mtl.cull_mode]; [_sg.mtl.cmd_encoder setFrontFacingWinding:pip->mtl.winding]; [_sg.mtl.cmd_encoder setStencilReferenceValue:pip->mtl.stencil_ref]; @@ -10272,8 +10407,8 @@ _SOKOL_PRIVATE void _sg_mtl_apply_bindings( } /* apply vertex buffers */ - int slot; - for (slot = 0; slot < num_vbs; slot++) { + NSUInteger slot; + for (slot = 0; slot < (NSUInteger)num_vbs; slot++) { const _sg_buffer_t* vb = vbs[slot]; if ((_sg.mtl.state_cache.cur_vertexbuffers[slot] != vb) || (_sg.mtl.state_cache.cur_vertexbuffer_offsets[slot] != vb_offsets[slot]) || @@ -10285,13 +10420,13 @@ _SOKOL_PRIVATE void _sg_mtl_apply_bindings( const NSUInteger mtl_slot = SG_MAX_SHADERSTAGE_UBS + slot; SOKOL_ASSERT(vb->mtl.buf[vb->cmn.active_slot] != _SG_MTL_INVALID_SLOT_INDEX); [_sg.mtl.cmd_encoder setVertexBuffer:_sg_mtl_id(vb->mtl.buf[vb->cmn.active_slot]) - offset:vb_offsets[slot] + offset:(NSUInteger)vb_offsets[slot] atIndex:mtl_slot]; } } /* apply vertex shader images */ - for (slot = 0; slot < num_vs_imgs; slot++) { + for (slot = 0; slot < (NSUInteger)num_vs_imgs; slot++) { const _sg_image_t* img = vs_imgs[slot]; if ((_sg.mtl.state_cache.cur_vs_images[slot] != img) || (_sg.mtl.state_cache.cur_vs_image_ids[slot].id != img->slot.id)) { _sg.mtl.state_cache.cur_vs_images[slot] = img; @@ -10304,7 +10439,7 @@ _SOKOL_PRIVATE void _sg_mtl_apply_bindings( } /* apply fragment shader images */ - for (slot = 0; slot < num_fs_imgs; slot++) { + for (slot = 0; slot < (NSUInteger)num_fs_imgs; slot++) { const _sg_image_t* img = fs_imgs[slot]; if ((_sg.mtl.state_cache.cur_fs_images[slot] != img) || (_sg.mtl.state_cache.cur_fs_image_ids[slot].id != img->slot.id)) { _sg.mtl.state_cache.cur_fs_images[slot] = img; @@ -10317,33 +10452,30 @@ _SOKOL_PRIVATE void _sg_mtl_apply_bindings( } } -_SOKOL_PRIVATE void _sg_mtl_apply_uniforms(sg_shader_stage stage_index, int ub_index, const void* data, int num_bytes) { +_SOKOL_PRIVATE void _sg_mtl_apply_uniforms(sg_shader_stage stage_index, int ub_index, const sg_range* data) { SOKOL_ASSERT(_sg.mtl.in_pass); if (!_sg.mtl.pass_valid) { return; } SOKOL_ASSERT(nil != _sg.mtl.cmd_encoder); - SOKOL_ASSERT(data && (num_bytes > 0)); - SOKOL_ASSERT((stage_index >= 0) && ((int)stage_index < SG_NUM_SHADER_STAGES)); - SOKOL_ASSERT((ub_index >= 0) && (ub_index < SG_MAX_SHADERSTAGE_UBS)); - SOKOL_ASSERT((_sg.mtl.cur_ub_offset + num_bytes) <= _sg.mtl.ub_size); + SOKOL_ASSERT(((size_t)_sg.mtl.cur_ub_offset + data->size) <= (size_t)_sg.mtl.ub_size); SOKOL_ASSERT((_sg.mtl.cur_ub_offset & (_SG_MTL_UB_ALIGN-1)) == 0); SOKOL_ASSERT(_sg.mtl.state_cache.cur_pipeline && _sg.mtl.state_cache.cur_pipeline->shader); SOKOL_ASSERT(_sg.mtl.state_cache.cur_pipeline->slot.id == _sg.mtl.state_cache.cur_pipeline_id.id); SOKOL_ASSERT(_sg.mtl.state_cache.cur_pipeline->shader->slot.id == _sg.mtl.state_cache.cur_pipeline->cmn.shader_id.id); SOKOL_ASSERT(ub_index < _sg.mtl.state_cache.cur_pipeline->shader->cmn.stage[stage_index].num_uniform_blocks); - SOKOL_ASSERT(num_bytes <= _sg.mtl.state_cache.cur_pipeline->shader->cmn.stage[stage_index].uniform_blocks[ub_index].size); + SOKOL_ASSERT(data->size <= _sg.mtl.state_cache.cur_pipeline->shader->cmn.stage[stage_index].uniform_blocks[ub_index].size); /* copy to global uniform buffer, record offset into cmd encoder, and advance offset */ uint8_t* dst = &_sg.mtl.cur_ub_base_ptr[_sg.mtl.cur_ub_offset]; - memcpy(dst, data, num_bytes); + memcpy(dst, data->ptr, data->size); if (stage_index == SG_SHADERSTAGE_VS) { - [_sg.mtl.cmd_encoder setVertexBufferOffset:_sg.mtl.cur_ub_offset atIndex:ub_index]; + [_sg.mtl.cmd_encoder setVertexBufferOffset:(NSUInteger)_sg.mtl.cur_ub_offset atIndex:(NSUInteger)ub_index]; } else { - [_sg.mtl.cmd_encoder setFragmentBufferOffset:_sg.mtl.cur_ub_offset atIndex:ub_index]; + [_sg.mtl.cmd_encoder setFragmentBufferOffset:(NSUInteger)_sg.mtl.cur_ub_offset atIndex:(NSUInteger)ub_index]; } - _sg.mtl.cur_ub_offset = _sg_roundup(_sg.mtl.cur_ub_offset + num_bytes, _SG_MTL_UB_ALIGN); + _sg.mtl.cur_ub_offset = _sg_roundup(_sg.mtl.cur_ub_offset + (int)data->size, _SG_MTL_UB_ALIGN); } _SOKOL_PRIVATE void _sg_mtl_draw(int base_element, int num_elements, int num_instances) { @@ -10358,39 +10490,38 @@ _SOKOL_PRIVATE void _sg_mtl_draw(int base_element, int num_elements, int num_ins SOKOL_ASSERT(_sg.mtl.state_cache.cur_indexbuffer && (_sg.mtl.state_cache.cur_indexbuffer->slot.id == _sg.mtl.state_cache.cur_indexbuffer_id.id)); const _sg_buffer_t* ib = _sg.mtl.state_cache.cur_indexbuffer; SOKOL_ASSERT(ib->mtl.buf[ib->cmn.active_slot] != _SG_MTL_INVALID_SLOT_INDEX); - const NSUInteger index_buffer_offset = _sg.mtl.state_cache.cur_indexbuffer_offset + - base_element * _sg.mtl.state_cache.cur_pipeline->mtl.index_size; + const NSUInteger index_buffer_offset = (NSUInteger) (_sg.mtl.state_cache.cur_indexbuffer_offset + base_element * _sg.mtl.state_cache.cur_pipeline->mtl.index_size); [_sg.mtl.cmd_encoder drawIndexedPrimitives:_sg.mtl.state_cache.cur_pipeline->mtl.prim_type - indexCount:num_elements + indexCount:(NSUInteger)num_elements indexType:_sg.mtl.state_cache.cur_pipeline->mtl.index_type indexBuffer:_sg_mtl_id(ib->mtl.buf[ib->cmn.active_slot]) indexBufferOffset:index_buffer_offset - instanceCount:num_instances]; + instanceCount:(NSUInteger)num_instances]; } else { /* non-indexed rendering */ [_sg.mtl.cmd_encoder drawPrimitives:_sg.mtl.state_cache.cur_pipeline->mtl.prim_type - vertexStart:base_element - vertexCount:num_elements - instanceCount:num_instances]; + vertexStart:(NSUInteger)base_element + vertexCount:(NSUInteger)num_elements + instanceCount:(NSUInteger)num_instances]; } } -_SOKOL_PRIVATE void _sg_mtl_update_buffer(_sg_buffer_t* buf, const void* data, uint32_t data_size) { - SOKOL_ASSERT(buf && data && (data_size > 0)); +_SOKOL_PRIVATE void _sg_mtl_update_buffer(_sg_buffer_t* buf, const sg_range* data) { + SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); if (++buf->cmn.active_slot >= buf->cmn.num_slots) { buf->cmn.active_slot = 0; } __unsafe_unretained id mtl_buf = _sg_mtl_id(buf->mtl.buf[buf->cmn.active_slot]); void* dst_ptr = [mtl_buf contents]; - memcpy(dst_ptr, data, data_size); + memcpy(dst_ptr, data->ptr, data->size); #if defined(_SG_TARGET_MACOS) - [mtl_buf didModifyRange:NSMakeRange(0, data_size)]; + [mtl_buf didModifyRange:NSMakeRange(0, data->size)]; #endif } -_SOKOL_PRIVATE uint32_t _sg_mtl_append_buffer(_sg_buffer_t* buf, const void* data, uint32_t data_size, bool new_frame) { - SOKOL_ASSERT(buf && data && (data_size > 0)); +_SOKOL_PRIVATE int _sg_mtl_append_buffer(_sg_buffer_t* buf, const sg_range* data, bool new_frame) { + SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); if (new_frame) { if (++buf->cmn.active_slot >= buf->cmn.num_slots) { buf->cmn.active_slot = 0; @@ -10399,21 +10530,21 @@ _SOKOL_PRIVATE uint32_t _sg_mtl_append_buffer(_sg_buffer_t* buf, const void* dat __unsafe_unretained id mtl_buf = _sg_mtl_id(buf->mtl.buf[buf->cmn.active_slot]); uint8_t* dst_ptr = (uint8_t*) [mtl_buf contents]; dst_ptr += buf->cmn.append_pos; - memcpy(dst_ptr, data, data_size); + memcpy(dst_ptr, data->ptr, data->size); #if defined(_SG_TARGET_MACOS) - [mtl_buf didModifyRange:NSMakeRange(buf->cmn.append_pos, data_size)]; + [mtl_buf didModifyRange:NSMakeRange((NSUInteger)buf->cmn.append_pos, (NSUInteger)data->size)]; #endif - /* NOTE: this is a requirement from WebGPU, but we want identical behaviour across all backend */ - return _sg_roundup(data_size, 4); + /* NOTE: this is a requirement from WebGPU, but we want identical behaviour across all backends */ + return _sg_roundup((int)data->size, 4); } -_SOKOL_PRIVATE void _sg_mtl_update_image(_sg_image_t* img, const sg_image_content* data) { +_SOKOL_PRIVATE void _sg_mtl_update_image(_sg_image_t* img, const sg_image_data* data) { SOKOL_ASSERT(img && data); if (++img->cmn.active_slot >= img->cmn.num_slots) { img->cmn.active_slot = 0; } __unsafe_unretained id mtl_tex = _sg_mtl_id(img->mtl.tex[img->cmn.active_slot]); - _sg_mtl_copy_image_content(img, mtl_tex, data); + _sg_mtl_copy_image_data(img, mtl_tex, data); } /*== WEBGPU BACKEND IMPLEMENTATION ===========================================*/ @@ -10747,6 +10878,8 @@ _SOKOL_PRIVATE void _sg_wgpu_init_caps(void) { _sg.features.imagetype_3d = true; _sg.features.imagetype_array = true; _sg.features.image_clamp_to_border = false; + _sg.features.mrt_independent_blend_state = true; + _sg.features.mrt_independent_write_mask = true; /* FIXME: max images size??? */ _sg.limits.max_image_size_2d = 8 * 1024; @@ -10965,7 +11098,7 @@ _SOKOL_PRIVATE void _sg_wgpu_ubpool_flush(void) { } /* helper function to compute number of bytes needed in staging buffer to copy image data */ -_SOKOL_PRIVATE uint32_t _sg_wgpu_image_content_buffer_size(const _sg_image_t* img) { +_SOKOL_PRIVATE uint32_t _sg_wgpu_image_data_buffer_size(const _sg_image_t* img) { uint32_t num_bytes = 0; const uint32_t num_faces = (img->cmn.type == SG_IMAGETYPE_CUBE) ? 6:1; const uint32_t num_slices = (img->cmn.type == SG_IMAGETYPE_ARRAY) ? img->cmn.num_slices : 1; @@ -10982,11 +11115,11 @@ _SOKOL_PRIVATE uint32_t _sg_wgpu_image_content_buffer_size(const _sg_image_t* im /* helper function to copy image data into a texture via a staging buffer, returns number of bytes copied */ -_SOKOL_PRIVATE uint32_t _sg_wgpu_copy_image_content(WGPUBuffer stg_buf, uint8_t* stg_base_ptr, uint32_t stg_base_offset, _sg_image_t* img, const sg_image_content* content) { +_SOKOL_PRIVATE uint32_t _sg_wgpu_copy_image_data(WGPUBuffer stg_buf, uint8_t* stg_base_ptr, uint32_t stg_base_offset, _sg_image_t* img, const sg_image_data* data) { SOKOL_ASSERT(_sg.wgpu.staging_cmd_enc); SOKOL_ASSERT(stg_buf && stg_base_ptr); SOKOL_ASSERT(img); - SOKOL_ASSERT(content); + SOKOL_ASSERT(data); uint32_t stg_offset = stg_base_offset; const uint32_t num_faces = (img->cmn.type == SG_IMAGETYPE_CUBE) ? 6:1; const uint32_t num_slices = (img->cmn.type == SG_IMAGETYPE_ARRAY) ? img->cmn.num_slices : 1; @@ -11002,9 +11135,9 @@ _SOKOL_PRIVATE uint32_t _sg_wgpu_copy_image_content(WGPUBuffer stg_buf, uint8_t* for (uint32_t face_index = 0; face_index < num_faces; face_index++) { for (uint32_t mip_index = 0; mip_index < (uint32_t)img->cmn.num_mipmaps; mip_index++) { - SOKOL_ASSERT(content->subimage[face_index][mip_index].ptr); - SOKOL_ASSERT(content->subimage[face_index][mip_index].size > 0); - const uint8_t* src_base_ptr = (const uint8_t*)content->subimage[face_index][mip_index].ptr; + SOKOL_ASSERT(data->subimage[face_index][mip_index].ptr); + SOKOL_ASSERT(data->subimage[face_index][mip_index].size > 0); + const uint8_t* src_base_ptr = (const uint8_t*)data->subimage[face_index][mip_index].ptr; SOKOL_ASSERT(src_base_ptr); uint8_t* dst_base_ptr = stg_base_ptr + stg_offset; @@ -11016,16 +11149,16 @@ _SOKOL_PRIVATE uint32_t _sg_wgpu_copy_image_content(WGPUBuffer stg_buf, uint8_t* const uint32_t dst_bytes_per_row = _sg_row_pitch(fmt, mip_width, _SG_WGPU_ROWPITCH_ALIGN); const uint32_t src_bytes_per_slice = _sg_surface_pitch(fmt, mip_width, mip_height, 1); const uint32_t dst_bytes_per_slice = _sg_surface_pitch(fmt, mip_width, mip_height, _SG_WGPU_ROWPITCH_ALIGN); - SOKOL_ASSERT((uint32_t)content->subimage[face_index][mip_index].size == (src_bytes_per_slice * num_slices)); + SOKOL_ASSERT((uint32_t)data->subimage[face_index][mip_index].size == (src_bytes_per_slice * num_slices)); SOKOL_ASSERT(src_bytes_per_row <= dst_bytes_per_row); SOKOL_ASSERT(src_bytes_per_slice == (src_bytes_per_row * num_rows)); SOKOL_ASSERT(dst_bytes_per_slice == (dst_bytes_per_row * num_rows)); _SOKOL_UNUSED(src_bytes_per_slice); - /* copy content into mapped staging buffer */ + /* copy data into mapped staging buffer */ if (src_bytes_per_row == dst_bytes_per_row) { /* can do a single memcpy */ - uint32_t num_bytes = content->subimage[face_index][mip_index].size; + uint32_t num_bytes = data->subimage[face_index][mip_index].size; memcpy(dst_base_ptr, src_base_ptr, num_bytes); } else { @@ -11177,10 +11310,10 @@ _SOKOL_PRIVATE uint32_t _sg_wgpu_staging_copy_to_buffer(WGPUBuffer dst_buf, uint return copy_num_bytes; } -_SOKOL_PRIVATE bool _sg_wgpu_staging_copy_to_texture(_sg_image_t* img, const sg_image_content* content) { +_SOKOL_PRIVATE bool _sg_wgpu_staging_copy_to_texture(_sg_image_t* img, const sg_image_data* data) { /* similar to _sg_wgpu_staging_copy_to_buffer(), but with image data instead */ SOKOL_ASSERT(_sg.wgpu.staging_cmd_enc); - uint32_t num_bytes = _sg_wgpu_image_content_buffer_size(img); + uint32_t num_bytes = _sg_wgpu_image_data_buffer_size(img); if ((_sg.wgpu.staging.offset + num_bytes) >= _sg.wgpu.staging.num_bytes) { SOKOL_LOG("WGPU: Per frame staging buffer full (in _sg_wgpu_staging_copy_to_texture)!\n"); return false; @@ -11190,7 +11323,7 @@ _SOKOL_PRIVATE bool _sg_wgpu_staging_copy_to_texture(_sg_image_t* img, const sg_ uint32_t stg_offset = _sg.wgpu.staging.offset; uint8_t* stg_ptr = _sg.wgpu.staging.ptr[cur]; WGPUBuffer stg_buf = _sg.wgpu.staging.buf[cur]; - uint32_t bytes_copied = _sg_wgpu_copy_image_content(stg_buf, stg_ptr, stg_offset, img, content); + uint32_t bytes_copied = _sg_wgpu_copy_image_data(stg_buf, stg_ptr, stg_offset, img, data); _SOKOL_UNUSED(bytes_copied); SOKOL_ASSERT(bytes_copied == num_bytes); _sg.wgpu.staging.offset = _sg_roundup(stg_offset + num_bytes, _SG_WGPU_STAGING_ALIGN); @@ -11353,11 +11486,11 @@ _SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_buffer(_sg_buffer_t* buf, const wgpu_buf_desc.usage = _sg_wgpu_buffer_usage(buf->cmn.type, buf->cmn.usage); wgpu_buf_desc.size = buf->cmn.size; if (SG_USAGE_IMMUTABLE == buf->cmn.usage) { - SOKOL_ASSERT(desc->content); + SOKOL_ASSERT(desc->data.ptr); WGPUCreateBufferMappedResult res = wgpuDeviceCreateBufferMapped(_sg.wgpu.dev, &wgpu_buf_desc); buf->wgpu.buf = res.buffer; - SOKOL_ASSERT(res.data && ((int)res.dataLength == buf->cmn.size)); - memcpy(res.data, desc->content, buf->cmn.size); + SOKOL_ASSERT(res.data && (res.dataLength == buf->cmn.size)); + memcpy(res.data, desc->data.ptr, buf->cmn.size); wgpuBufferUnmap(res.buffer); } else { @@ -11441,11 +11574,11 @@ _SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_image(_sg_image_t* img, const s if (desc->usage == SG_USAGE_IMMUTABLE && !desc->render_target) { WGPUBufferDescriptor wgpu_buf_desc; memset(&wgpu_buf_desc, 0, sizeof(wgpu_buf_desc)); - wgpu_buf_desc.size = _sg_wgpu_image_content_buffer_size(img); + wgpu_buf_desc.size = _sg_wgpu_image_data_buffer_size(img); wgpu_buf_desc.usage = WGPUBufferUsage_CopySrc|WGPUBufferUsage_CopyDst; WGPUCreateBufferMappedResult map = wgpuDeviceCreateBufferMapped(_sg.wgpu.dev, &wgpu_buf_desc); SOKOL_ASSERT(map.buffer && map.data); - uint32_t num_bytes = _sg_wgpu_copy_image_content(map.buffer, (uint8_t*)map.data, 0, img, &desc->content); + uint32_t num_bytes = _sg_wgpu_copy_image_data(map.buffer, (uint8_t*)map.data, 0, img, &desc->data); _SOKOL_UNUSED(num_bytes); SOKOL_ASSERT(num_bytes == wgpu_buf_desc.size); wgpuBufferUnmap(map.buffer); @@ -11528,13 +11661,13 @@ _SOKOL_PRIVATE void _sg_wgpu_destroy_image(_sg_image_t* img) { */ _SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_shader(_sg_shader_t* shd, const sg_shader_desc* desc) { SOKOL_ASSERT(shd && desc); - SOKOL_ASSERT(desc->vs.byte_code && desc->fs.byte_code); + SOKOL_ASSERT(desc->vs.bytecode.ptr && desc->fs.bytecode.ptr); _sg_shader_common_init(&shd->cmn, desc); bool success = true; for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { const sg_shader_stage_desc* stage_desc = (stage_index == SG_SHADERSTAGE_VS) ? &desc->vs : &desc->fs; - SOKOL_ASSERT((stage_desc->byte_code_size & 3) == 0); + SOKOL_ASSERT((stage_desc->bytecode.size & 3) == 0); _sg_shader_stage_t* cmn_stage = &shd->cmn.stage[stage_index]; _sg_wgpu_shader_stage_t* wgpu_stage = &shd->wgpu.stage[stage_index]; @@ -11542,8 +11675,8 @@ _SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_shader(_sg_shader_t* shd, const _sg_strcpy(&wgpu_stage->entry, stage_desc->entry); WGPUShaderModuleDescriptor wgpu_shdmod_desc; memset(&wgpu_shdmod_desc, 0, sizeof(wgpu_shdmod_desc)); - wgpu_shdmod_desc.codeSize = stage_desc->byte_code_size >> 2; - wgpu_shdmod_desc.code = (const uint32_t*) stage_desc->byte_code; + wgpu_shdmod_desc.codeSize = stage_desc->bytecode.size >> 2; + wgpu_shdmod_desc.code = (const uint32_t*) stage_desc->bytecode.ptr; wgpu_stage->module = wgpuDeviceCreateShaderModule(_sg.wgpu.dev, &wgpu_shdmod_desc); if (0 == wgpu_stage->module) { success = false; @@ -11565,7 +11698,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_shader(_sg_shader_t* shd, const tex_desc->binding = img_index; tex_desc->visibility = vis; tex_desc->type = WGPUBindingType_SampledTexture; - tex_desc->textureDimension = _sg_wgpu_tex_viewdim(cmn_stage->images[img_index].type); + tex_desc->textureDimension = _sg_wgpu_tex_viewdim(cmn_stage->images[img_index].image_type); tex_desc->textureComponentType = _sg_wgpu_tex_comptype(cmn_stage->images[img_index].sampler_type); smp_desc->binding = img_index + _SG_WGPU_MAX_SHADERSTAGE_IMAGES; @@ -11604,7 +11737,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_pipeline(_sg_pipeline_t* pip, _ SOKOL_ASSERT(shd->wgpu.stage[SG_SHADERSTAGE_FS].bind_group_layout); pip->shader = shd; _sg_pipeline_common_init(&pip->cmn, desc); - pip->wgpu.stencil_ref = (uint32_t) desc->depth_stencil.stencil_ref; + pip->wgpu.stencil_ref = (uint32_t) desc->stencil.ref; WGPUBindGroupLayout pip_bgl[3] = { _sg.wgpu.ub.bindgroup_layout, @@ -11657,27 +11790,27 @@ _SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_pipeline(_sg_pipeline_t* pip, _ WGPURasterizationStateDescriptor rs_desc; memset(&rs_desc, 0, sizeof(rs_desc)); - rs_desc.frontFace = _sg_wgpu_frontface(desc->rasterizer.face_winding); - rs_desc.cullMode = _sg_wgpu_cullmode(desc->rasterizer.cull_mode); - rs_desc.depthBias = (int32_t) desc->rasterizer.depth_bias; - rs_desc.depthBiasClamp = desc->rasterizer.depth_bias_clamp; - rs_desc.depthBiasSlopeScale = desc->rasterizer.depth_bias_slope_scale; + rs_desc.frontFace = _sg_wgpu_frontface(desc->face_winding); + rs_desc.cullMode = _sg_wgpu_cullmode(desc->cull_mode); + rs_desc.depthBias = (int32_t) desc->depth.bias; + rs_desc.depthBiasClamp = desc->depth.bias_clamp; + rs_desc.depthBiasSlopeScale = desc->depth.bias_slope_scale; WGPUDepthStencilStateDescriptor ds_desc; memset(&ds_desc, 0, sizeof(ds_desc)); - ds_desc.format = _sg_wgpu_textureformat(desc->blend.depth_format); - ds_desc.depthWriteEnabled = desc->depth_stencil.depth_write_enabled; - ds_desc.depthCompare = _sg_wgpu_comparefunc(desc->depth_stencil.depth_compare_func); - ds_desc.stencilReadMask = desc->depth_stencil.stencil_read_mask; - ds_desc.stencilWriteMask = desc->depth_stencil.stencil_write_mask; - ds_desc.stencilFront.compare = _sg_wgpu_comparefunc(desc->depth_stencil.stencil_front.compare_func); - ds_desc.stencilFront.failOp = _sg_wgpu_stencilop(desc->depth_stencil.stencil_front.fail_op); - ds_desc.stencilFront.depthFailOp = _sg_wgpu_stencilop(desc->depth_stencil.stencil_front.depth_fail_op); - ds_desc.stencilFront.passOp = _sg_wgpu_stencilop(desc->depth_stencil.stencil_front.pass_op); - ds_desc.stencilBack.compare = _sg_wgpu_comparefunc(desc->depth_stencil.stencil_back.compare_func); - ds_desc.stencilBack.failOp = _sg_wgpu_stencilop(desc->depth_stencil.stencil_back.fail_op); - ds_desc.stencilBack.depthFailOp = _sg_wgpu_stencilop(desc->depth_stencil.stencil_back.depth_fail_op); - ds_desc.stencilBack.passOp = _sg_wgpu_stencilop(desc->depth_stencil.stencil_back.pass_op); + ds_desc.format = _sg_wgpu_textureformat(desc->depth.pixel_format); + ds_desc.depthWriteEnabled = desc->depth.write_enabled; + ds_desc.depthCompare = _sg_wgpu_comparefunc(desc->depth.compare); + ds_desc.stencilReadMask = desc->stencil.read_mask; + ds_desc.stencilWriteMask = desc->stencil.write_mask; + ds_desc.stencilFront.compare = _sg_wgpu_comparefunc(desc->stencil.front.compare); + ds_desc.stencilFront.failOp = _sg_wgpu_stencilop(desc->stencil.front.fail_op); + ds_desc.stencilFront.depthFailOp = _sg_wgpu_stencilop(desc->stencil.front.depth_fail_op); + ds_desc.stencilFront.passOp = _sg_wgpu_stencilop(desc->stencil.front.pass_op); + ds_desc.stencilBack.compare = _sg_wgpu_comparefunc(desc->stencil.back.compare); + ds_desc.stencilBack.failOp = _sg_wgpu_stencilop(desc->stencil.back.fail_op); + ds_desc.stencilBack.depthFailOp = _sg_wgpu_stencilop(desc->stencil.back.depth_fail_op); + ds_desc.stencilBack.passOp = _sg_wgpu_stencilop(desc->stencil.back.pass_op); WGPUProgrammableStageDescriptor fs_desc; memset(&fs_desc, 0, sizeof(fs_desc)); @@ -11686,17 +11819,16 @@ _SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_pipeline(_sg_pipeline_t* pip, _ WGPUColorStateDescriptor cs_desc[SG_MAX_COLOR_ATTACHMENTS]; memset(cs_desc, 0, sizeof(cs_desc)); - cs_desc[0].format = _sg_wgpu_textureformat(desc->blend.color_format); - cs_desc[0].colorBlend.operation = _sg_wgpu_blendop(desc->blend.op_rgb); - cs_desc[0].colorBlend.srcFactor = _sg_wgpu_blendfactor(desc->blend.src_factor_rgb); - cs_desc[0].colorBlend.dstFactor = _sg_wgpu_blendfactor(desc->blend.dst_factor_rgb); - cs_desc[0].alphaBlend.operation = _sg_wgpu_blendop(desc->blend.op_alpha); - cs_desc[0].alphaBlend.srcFactor = _sg_wgpu_blendfactor(desc->blend.src_factor_alpha); - cs_desc[0].alphaBlend.dstFactor = _sg_wgpu_blendfactor(desc->blend.dst_factor_alpha); - cs_desc[0].writeMask = _sg_wgpu_colorwritemask(desc->blend.color_write_mask); - SOKOL_ASSERT(desc->blend.color_attachment_count <= SG_MAX_COLOR_ATTACHMENTS); - for (int i = 1; i < SG_MAX_COLOR_ATTACHMENTS; i++) { - cs_desc[i] = cs_desc[0]; + for (uint32_t i = 0; i < desc->color_count; i++) { + SOKOL_ASSERT(i < SG_MAX_COLOR_ATTACHMENTS); + cs_desc[i].format = _sg_wgpu_textureformat(desc->colors[i].pixel_format); + cs_desc[i].colorBlend.operation = _sg_wgpu_blendop(desc->colors[i].blend.op_rgb); + cs_desc[i].colorBlend.srcFactor = _sg_wgpu_blendfactor(desc->colors[i].blend.src_factor_rgb); + cs_desc[i].colorBlend.dstFactor = _sg_wgpu_blendfactor(desc->colors[i].blend.dst_factor_rgb); + cs_desc[i].alphaBlend.operation = _sg_wgpu_blendop(desc->colors[i].blend.op_alpha); + cs_desc[i].alphaBlend.srcFactor = _sg_wgpu_blendfactor(desc->colors[i].blend.src_factor_alpha); + cs_desc[i].alphaBlend.dstFactor = _sg_wgpu_blendfactor(desc->colors[i].blend.dst_factor_alpha); + cs_desc[i].writeMask = _sg_wgpu_colorwritemask(desc->colors[i].write_mask); } WGPURenderPipelineDescriptor pip_desc; @@ -11708,11 +11840,11 @@ _SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_pipeline(_sg_pipeline_t* pip, _ pip_desc.vertexState = &vx_state_desc; pip_desc.primitiveTopology = _sg_wgpu_topology(desc->primitive_type); pip_desc.rasterizationState = &rs_desc; - pip_desc.sampleCount = desc->rasterizer.sample_count; - if (SG_PIXELFORMAT_NONE != desc->blend.depth_format) { + pip_desc.sampleCount = desc->sample_count; + if (SG_PIXELFORMAT_NONE != desc->depth.pixel_format) { pip_desc.depthStencilState = &ds_desc; } - pip_desc.colorStateCount = desc->blend.color_attachment_count; + pip_desc.colorStateCount = desc->color_count; pip_desc.colorStates = cs_desc; pip_desc.sampleMask = 0xFFFFFFFF; /* FIXME: ??? */ pip->wgpu.pip = wgpuDeviceCreateRenderPipeline(_sg.wgpu.dev, &pip_desc); @@ -11736,8 +11868,8 @@ _SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_pass(_sg_pass_t* pass, _sg_imag _sg_pass_common_init(&pass->cmn, desc); /* copy image pointers and create render-texture views */ - const sg_attachment_desc* att_desc; - for (int i = 0; i < pass->cmn.num_color_atts; i++) { + const sg_pass_attachment_desc* att_desc; + for (uint32_t i = 0; i < pass->cmn.num_color_atts; i++) { att_desc = &desc->color_attachments[i]; if (att_desc->image.id != SG_INVALID_ID) { SOKOL_ASSERT(att_desc->image.id != SG_INVALID_ID); @@ -11791,7 +11923,7 @@ _SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_pass(_sg_pass_t* pass, _sg_imag _SOKOL_PRIVATE void _sg_wgpu_destroy_pass(_sg_pass_t* pass) { SOKOL_ASSERT(pass); - for (int i = 0; i < pass->cmn.num_color_atts; i++) { + for (uint32_t i = 0; i < pass->cmn.num_color_atts; i++) { if (pass->wgpu.color_atts[i].render_tex_view) { wgpuTextureViewRelease(pass->wgpu.color_atts[i].render_tex_view); pass->wgpu.color_atts[i].render_tex_view = 0; @@ -11840,14 +11972,14 @@ _SOKOL_PRIVATE void _sg_wgpu_begin_pass(_sg_pass_t* pass, const sg_pass_action* WGPURenderPassColorAttachmentDescriptor wgpu_color_att_desc[SG_MAX_COLOR_ATTACHMENTS]; memset(&wgpu_color_att_desc, 0, sizeof(wgpu_color_att_desc)); SOKOL_ASSERT(pass->slot.state == SG_RESOURCESTATE_VALID); - for (int i = 0; i < pass->cmn.num_color_atts; i++) { + for (uint32_t i = 0; i < pass->cmn.num_color_atts; i++) { const _sg_wgpu_attachment_t* wgpu_att = &pass->wgpu.color_atts[i]; wgpu_color_att_desc[i].loadOp = _sg_wgpu_load_op(action->colors[i].action); wgpu_color_att_desc[i].storeOp = WGPUStoreOp_Store; - wgpu_color_att_desc[i].clearColor.r = action->colors[i].val[0]; - wgpu_color_att_desc[i].clearColor.g = action->colors[i].val[1]; - wgpu_color_att_desc[i].clearColor.b = action->colors[i].val[2]; - wgpu_color_att_desc[i].clearColor.a = action->colors[i].val[3]; + wgpu_color_att_desc[i].clearColor.r = action->colors[i].value.r; + wgpu_color_att_desc[i].clearColor.g = action->colors[i].value.g; + wgpu_color_att_desc[i].clearColor.b = action->colors[i].value.b; + wgpu_color_att_desc[i].clearColor.a = action->colors[i].value.a; wgpu_color_att_desc[i].attachment = wgpu_att->render_tex_view; if (wgpu_att->image->cmn.sample_count > 1) { wgpu_color_att_desc[i].resolveTarget = wgpu_att->resolve_tex_view; @@ -11859,9 +11991,9 @@ _SOKOL_PRIVATE void _sg_wgpu_begin_pass(_sg_pass_t* pass, const sg_pass_action* WGPURenderPassDepthStencilAttachmentDescriptor wgpu_ds_att_desc; memset(&wgpu_ds_att_desc, 0, sizeof(wgpu_ds_att_desc)); wgpu_ds_att_desc.depthLoadOp = _sg_wgpu_load_op(action->depth.action); - wgpu_ds_att_desc.clearDepth = action->depth.val; + wgpu_ds_att_desc.clearDepth = action->depth.value; wgpu_ds_att_desc.stencilLoadOp = _sg_wgpu_load_op(action->stencil.action); - wgpu_ds_att_desc.clearStencil = action->stencil.val; + wgpu_ds_att_desc.clearStencil = action->stencil.value; wgpu_ds_att_desc.attachment = pass->wgpu.ds_att.render_tex_view; wgpu_pass_desc.depthStencilAttachment = &wgpu_ds_att_desc; _sg.wgpu.pass_enc = wgpuCommandEncoderBeginRenderPass(_sg.wgpu.render_cmd_enc, &wgpu_pass_desc); @@ -11878,10 +12010,10 @@ _SOKOL_PRIVATE void _sg_wgpu_begin_pass(_sg_pass_t* pass, const sg_pass_action* WGPURenderPassColorAttachmentDescriptor color_att_desc; memset(&color_att_desc, 0, sizeof(color_att_desc)); color_att_desc.loadOp = _sg_wgpu_load_op(action->colors[0].action); - color_att_desc.clearColor.r = action->colors[0].val[0]; - color_att_desc.clearColor.g = action->colors[0].val[1]; - color_att_desc.clearColor.b = action->colors[0].val[2]; - color_att_desc.clearColor.a = action->colors[0].val[3]; + color_att_desc.clearColor.r = action->colors[0].value.r; + color_att_desc.clearColor.g = action->colors[0].value.g; + color_att_desc.clearColor.b = action->colors[0].value.b; + color_att_desc.clearColor.a = action->colors[0].value.a; color_att_desc.attachment = wgpu_render_view; color_att_desc.resolveTarget = wgpu_resolve_view; /* null if no MSAA rendering */ pass_desc.colorAttachmentCount = 1; @@ -11891,9 +12023,9 @@ _SOKOL_PRIVATE void _sg_wgpu_begin_pass(_sg_pass_t* pass, const sg_pass_action* ds_att_desc.attachment = wgpu_depth_stencil_view; SOKOL_ASSERT(0 != ds_att_desc.attachment); ds_att_desc.depthLoadOp = _sg_wgpu_load_op(action->depth.action); - ds_att_desc.clearDepth = action->depth.val; + ds_att_desc.clearDepth = action->depth.value; ds_att_desc.stencilLoadOp = _sg_wgpu_load_op(action->stencil.action); - ds_att_desc.clearStencil = action->stencil.val; + ds_att_desc.clearStencil = action->stencil.value; pass_desc.depthStencilAttachment = &ds_att_desc; _sg.wgpu.pass_enc = wgpuCommandEncoderBeginRenderPass(_sg.wgpu.render_cmd_enc, &pass_desc); } @@ -12000,7 +12132,7 @@ _SOKOL_PRIVATE void _sg_wgpu_apply_pipeline(_sg_pipeline_t* pip) { _sg.wgpu.cur_pipeline = pip; _sg.wgpu.cur_pipeline_id.id = pip->slot.id; wgpuRenderPassEncoderSetPipeline(_sg.wgpu.pass_enc, pip->wgpu.pip); - wgpuRenderPassEncoderSetBlendColor(_sg.wgpu.pass_enc, (WGPUColor*)pip->cmn.blend_color); + wgpuRenderPassEncoderSetBlendColor(_sg.wgpu.pass_enc, (WGPUColor*)&pip->cmn.blend_color); wgpuRenderPassEncoderSetStencilReference(_sg.wgpu.pass_enc, pip->wgpu.stencil_ref); } @@ -12077,31 +12209,28 @@ _SOKOL_PRIVATE void _sg_wgpu_apply_bindings( } } -_SOKOL_PRIVATE void _sg_wgpu_apply_uniforms(sg_shader_stage stage_index, int ub_index, const void* data, int num_bytes) { +_SOKOL_PRIVATE void _sg_wgpu_apply_uniforms(sg_shader_stage stage_index, int ub_index, const sg_range* data) { SOKOL_ASSERT(_sg.wgpu.in_pass); SOKOL_ASSERT(_sg.wgpu.pass_enc); - SOKOL_ASSERT(data && (num_bytes > 0)); - SOKOL_ASSERT((stage_index >= 0) && ((int)stage_index < SG_NUM_SHADER_STAGES)); - SOKOL_ASSERT((ub_index >= 0) && (ub_index < SG_MAX_SHADERSTAGE_UBS)); - SOKOL_ASSERT((_sg.wgpu.ub.offset + num_bytes) <= _sg.wgpu.ub.num_bytes); + SOKOL_ASSERT((_sg.wgpu.ub.offset + data->size) <= _sg.wgpu.ub.num_bytes); SOKOL_ASSERT((_sg.wgpu.ub.offset & (_SG_WGPU_STAGING_ALIGN-1)) == 0); SOKOL_ASSERT(_sg.wgpu.cur_pipeline && _sg.wgpu.cur_pipeline->shader); SOKOL_ASSERT(_sg.wgpu.cur_pipeline->slot.id == _sg.wgpu.cur_pipeline_id.id); SOKOL_ASSERT(_sg.wgpu.cur_pipeline->shader->slot.id == _sg.wgpu.cur_pipeline->cmn.shader_id.id); SOKOL_ASSERT(ub_index < _sg.wgpu.cur_pipeline->shader->cmn.stage[stage_index].num_uniform_blocks); - SOKOL_ASSERT(num_bytes <= _sg.wgpu.cur_pipeline->shader->cmn.stage[stage_index].uniform_blocks[ub_index].size); - SOKOL_ASSERT(num_bytes <= _SG_WGPU_MAX_UNIFORM_UPDATE_SIZE); + SOKOL_ASSERT(data->size <= _sg.wgpu.cur_pipeline->shader->cmn.stage[stage_index].uniform_blocks[ub_index].size); + SOKOL_ASSERT(data->size <= _SG_WGPU_MAX_UNIFORM_UPDATE_SIZE); SOKOL_ASSERT(0 != _sg.wgpu.ub.stage.ptr[_sg.wgpu.ub.stage.cur]); uint8_t* dst_ptr = _sg.wgpu.ub.stage.ptr[_sg.wgpu.ub.stage.cur] + _sg.wgpu.ub.offset; - memcpy(dst_ptr, data, num_bytes); + memcpy(dst_ptr, data->ptr, data->size); _sg.wgpu.ub.bind_offsets[stage_index][ub_index] = _sg.wgpu.ub.offset; wgpuRenderPassEncoderSetBindGroup(_sg.wgpu.pass_enc, 0, /* groupIndex 0 is reserved for uniform buffers */ _sg.wgpu.ub.bindgroup, SG_NUM_SHADER_STAGES * SG_MAX_SHADERSTAGE_UBS, &_sg.wgpu.ub.bind_offsets[0][0]); - _sg.wgpu.ub.offset = _sg_roundup(_sg.wgpu.ub.offset + num_bytes, _SG_WGPU_STAGING_ALIGN); + _sg.wgpu.ub.offset = _sg_roundup(_sg.wgpu.ub.offset + data->size, _SG_WGPU_STAGING_ALIGN); } _SOKOL_PRIVATE void _sg_wgpu_draw(int base_element, int num_elements, int num_instances) { @@ -12115,21 +12244,21 @@ _SOKOL_PRIVATE void _sg_wgpu_draw(int base_element, int num_elements, int num_in } } -_SOKOL_PRIVATE void _sg_wgpu_update_buffer(_sg_buffer_t* buf, const void* data, uint32_t num_bytes) { - SOKOL_ASSERT(buf && data && (num_bytes > 0)); - uint32_t copied_num_bytes = _sg_wgpu_staging_copy_to_buffer(buf->wgpu.buf, 0, data, (uint32_t)num_bytes); +_SOKOL_PRIVATE void _sg_wgpu_update_buffer(_sg_buffer_t* buf, const sg_range* data) { + SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); + uint32_t copied_num_bytes = _sg_wgpu_staging_copy_to_buffer(buf->wgpu.buf, 0, data->ptr, data->size); SOKOL_ASSERT(copied_num_bytes > 0); _SOKOL_UNUSED(copied_num_bytes); } -_SOKOL_PRIVATE uint32_t _sg_wgpu_append_buffer(_sg_buffer_t* buf, const void* data, uint32_t num_bytes, bool new_frame) { - SOKOL_ASSERT(buf && data && (num_bytes > 0)); +_SOKOL_PRIVATE int _sg_wgpu_append_buffer(_sg_buffer_t* buf, const sg_range* data, bool new_frame) { + SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); _SOKOL_UNUSED(new_frame); - uint32_t copied_num_bytes = _sg_wgpu_staging_copy_to_buffer(buf->wgpu.buf, buf->cmn.append_pos, data, num_bytes); + uint32_t copied_num_bytes = _sg_wgpu_staging_copy_to_buffer(buf->wgpu.buf, buf->cmn.append_pos, data->ptr, data->size); SOKOL_ASSERT(copied_num_bytes > 0); _SOKOL_UNUSED(copied_num_bytes); - return copied_num_bytes; + return (int)copied_num_bytes; } -_SOKOL_PRIVATE void _sg_wgpu_update_image(_sg_image_t* img, const sg_image_content* data) { +_SOKOL_PRIVATE void _sg_wgpu_update_image(_sg_image_t* img, const sg_image_data* data) { SOKOL_ASSERT(img && data); bool success = _sg_wgpu_staging_copy_to_texture(img, data); SOKOL_ASSERT(success); @@ -12528,17 +12657,17 @@ static inline void _sg_apply_bindings( #endif } -static inline void _sg_apply_uniforms(sg_shader_stage stage_index, int ub_index, const void* data, int num_bytes) { +static inline void _sg_apply_uniforms(sg_shader_stage stage_index, int ub_index, const sg_range* data) { #if defined(_SOKOL_ANY_GL) - _sg_gl_apply_uniforms(stage_index, ub_index, data, num_bytes); + _sg_gl_apply_uniforms(stage_index, ub_index, data); #elif defined(SOKOL_METAL) - _sg_mtl_apply_uniforms(stage_index, ub_index, data, num_bytes); + _sg_mtl_apply_uniforms(stage_index, ub_index, data); #elif defined(SOKOL_D3D11) - _sg_d3d11_apply_uniforms(stage_index, ub_index, data, num_bytes); + _sg_d3d11_apply_uniforms(stage_index, ub_index, data); #elif defined(SOKOL_WGPU) - _sg_wgpu_apply_uniforms(stage_index, ub_index, data, num_bytes); + _sg_wgpu_apply_uniforms(stage_index, ub_index, data); #elif defined(SOKOL_DUMMY_BACKEND) - _sg_dummy_apply_uniforms(stage_index, ub_index, data, num_bytes); + _sg_dummy_apply_uniforms(stage_index, ub_index, data); #else #error("INVALID BACKEND"); #endif @@ -12576,39 +12705,39 @@ static inline void _sg_commit(void) { #endif } -static inline void _sg_update_buffer(_sg_buffer_t* buf, const void* data_ptr, uint32_t data_size) { +static inline void _sg_update_buffer(_sg_buffer_t* buf, const sg_range* data) { #if defined(_SOKOL_ANY_GL) - _sg_gl_update_buffer(buf, data_ptr, data_size); + _sg_gl_update_buffer(buf, data); #elif defined(SOKOL_METAL) - _sg_mtl_update_buffer(buf, data_ptr, data_size); + _sg_mtl_update_buffer(buf, data); #elif defined(SOKOL_D3D11) - _sg_d3d11_update_buffer(buf, data_ptr, data_size); + _sg_d3d11_update_buffer(buf, data); #elif defined(SOKOL_WGPU) - _sg_wgpu_update_buffer(buf, data_ptr, data_size); + _sg_wgpu_update_buffer(buf, data); #elif defined(SOKOL_DUMMY_BACKEND) - _sg_dummy_update_buffer(buf, data_ptr, data_size); + _sg_dummy_update_buffer(buf, data); #else #error("INVALID BACKEND"); #endif } -static inline uint32_t _sg_append_buffer(_sg_buffer_t* buf, const void* data_ptr, uint32_t data_size, bool new_frame) { +static inline int _sg_append_buffer(_sg_buffer_t* buf, const sg_range* data, bool new_frame) { #if defined(_SOKOL_ANY_GL) - return _sg_gl_append_buffer(buf, data_ptr, data_size, new_frame); + return _sg_gl_append_buffer(buf, data, new_frame); #elif defined(SOKOL_METAL) - return _sg_mtl_append_buffer(buf, data_ptr, data_size, new_frame); + return _sg_mtl_append_buffer(buf, data, new_frame); #elif defined(SOKOL_D3D11) - return _sg_d3d11_append_buffer(buf, data_ptr, data_size, new_frame); + return _sg_d3d11_append_buffer(buf, data, new_frame); #elif defined(SOKOL_WGPU) - return _sg_wgpu_append_buffer(buf, data_ptr, data_size, new_frame); + return _sg_wgpu_append_buffer(buf, data, new_frame); #elif defined(SOKOL_DUMMY_BACKEND) - return _sg_dummy_append_buffer(buf, data_ptr, data_size, new_frame); + return _sg_dummy_append_buffer(buf, data, new_frame); #else #error("INVALID BACKEND"); #endif } -static inline void _sg_update_image(_sg_image_t* img, const sg_image_content* data) { +static inline void _sg_update_image(_sg_image_t* img, const sg_image_data* data) { #if defined(_SOKOL_ANY_GL) _sg_gl_update_image(img, data); #elif defined(SOKOL_METAL) @@ -12632,12 +12761,12 @@ _SOKOL_PRIVATE void _sg_init_pool(_sg_pool_t* pool, int num) { pool->size = num + 1; pool->queue_top = 0; /* generation counters indexable by pool slot index, slot 0 is reserved */ - size_t gen_ctrs_size = sizeof(uint32_t) * pool->size; + size_t gen_ctrs_size = sizeof(uint32_t) * (size_t)pool->size; pool->gen_ctrs = (uint32_t*) SOKOL_MALLOC(gen_ctrs_size); SOKOL_ASSERT(pool->gen_ctrs); memset(pool->gen_ctrs, 0, gen_ctrs_size); /* it's not a bug to only reserve 'num' here */ - pool->free_queue = (int*) SOKOL_MALLOC(sizeof(int)*num); + pool->free_queue = (int*) SOKOL_MALLOC(sizeof(int) * (size_t)num); SOKOL_ASSERT(pool->free_queue); /* never allocate the zero-th pool item since the invalid id is 0 */ for (int i = pool->size-1; i >= 1; i--) { @@ -12745,42 +12874,42 @@ _SOKOL_PRIVATE void _sg_setup_pools(_sg_pools_t* p, const sg_desc* desc) { /* note: the pools here will have an additional item, since slot 0 is reserved */ SOKOL_ASSERT((desc->buffer_pool_size > 0) && (desc->buffer_pool_size < _SG_MAX_POOL_SIZE)); _sg_init_pool(&p->buffer_pool, desc->buffer_pool_size); - size_t buffer_pool_byte_size = sizeof(_sg_buffer_t) * p->buffer_pool.size; + size_t buffer_pool_byte_size = sizeof(_sg_buffer_t) * (size_t)p->buffer_pool.size; p->buffers = (_sg_buffer_t*) SOKOL_MALLOC(buffer_pool_byte_size); SOKOL_ASSERT(p->buffers); memset(p->buffers, 0, buffer_pool_byte_size); SOKOL_ASSERT((desc->image_pool_size > 0) && (desc->image_pool_size < _SG_MAX_POOL_SIZE)); _sg_init_pool(&p->image_pool, desc->image_pool_size); - size_t image_pool_byte_size = sizeof(_sg_image_t) * p->image_pool.size; + size_t image_pool_byte_size = sizeof(_sg_image_t) * (size_t)p->image_pool.size; p->images = (_sg_image_t*) SOKOL_MALLOC(image_pool_byte_size); SOKOL_ASSERT(p->images); memset(p->images, 0, image_pool_byte_size); SOKOL_ASSERT((desc->shader_pool_size > 0) && (desc->shader_pool_size < _SG_MAX_POOL_SIZE)); _sg_init_pool(&p->shader_pool, desc->shader_pool_size); - size_t shader_pool_byte_size = sizeof(_sg_shader_t) * p->shader_pool.size; + size_t shader_pool_byte_size = sizeof(_sg_shader_t) * (size_t)p->shader_pool.size; p->shaders = (_sg_shader_t*) SOKOL_MALLOC(shader_pool_byte_size); SOKOL_ASSERT(p->shaders); memset(p->shaders, 0, shader_pool_byte_size); SOKOL_ASSERT((desc->pipeline_pool_size > 0) && (desc->pipeline_pool_size < _SG_MAX_POOL_SIZE)); _sg_init_pool(&p->pipeline_pool, desc->pipeline_pool_size); - size_t pipeline_pool_byte_size = sizeof(_sg_pipeline_t) * p->pipeline_pool.size; + size_t pipeline_pool_byte_size = sizeof(_sg_pipeline_t) * (size_t)p->pipeline_pool.size; p->pipelines = (_sg_pipeline_t*) SOKOL_MALLOC(pipeline_pool_byte_size); SOKOL_ASSERT(p->pipelines); memset(p->pipelines, 0, pipeline_pool_byte_size); SOKOL_ASSERT((desc->pass_pool_size > 0) && (desc->pass_pool_size < _SG_MAX_POOL_SIZE)); _sg_init_pool(&p->pass_pool, desc->pass_pool_size); - size_t pass_pool_byte_size = sizeof(_sg_pass_t) * p->pass_pool.size; + size_t pass_pool_byte_size = sizeof(_sg_pass_t) * (size_t)p->pass_pool.size; p->passes = (_sg_pass_t*) SOKOL_MALLOC(pass_pool_byte_size); SOKOL_ASSERT(p->passes); memset(p->passes, 0, pass_pool_byte_size); SOKOL_ASSERT((desc->context_pool_size > 0) && (desc->context_pool_size < _SG_MAX_POOL_SIZE)); _sg_init_pool(&p->context_pool, desc->context_pool_size); - size_t context_pool_byte_size = sizeof(_sg_context_t) * p->context_pool.size; + size_t context_pool_byte_size = sizeof(_sg_context_t) * (size_t)p->context_pool.size; p->contexts = (_sg_context_t*) SOKOL_MALLOC(context_pool_byte_size); SOKOL_ASSERT(p->contexts); memset(p->contexts, 0, context_pool_byte_size); @@ -12996,8 +13125,9 @@ _SOKOL_PRIVATE const char* _sg_validate_string(_sg_validate_error_t err) { /* buffer creation validation errors */ case _SG_VALIDATE_BUFFERDESC_CANARY: return "sg_buffer_desc not initialized"; case _SG_VALIDATE_BUFFERDESC_SIZE: return "sg_buffer_desc.size cannot be 0"; - case _SG_VALIDATE_BUFFERDESC_CONTENT: return "immutable buffers must be initialized with content (sg_buffer_desc.content)"; - case _SG_VALIDATE_BUFFERDESC_NO_CONTENT: return "dynamic/stream usage buffers cannot be initialized with content"; + case _SG_VALIDATE_BUFFERDESC_DATA: return "immutable buffers must be initialized with data (sg_buffer_desc.data.ptr and sg_buffer_desc.data.size)"; + case _SG_VALIDATE_BUFFERDESC_DATA_SIZE: return "immutable buffer data size differs from buffer size"; + case _SG_VALIDATE_BUFFERDESC_NO_DATA: return "dynamic/stream usage buffers cannot be initialized with data"; /* image creation validation errros */ case _SG_VALIDATE_IMAGEDESC_CANARY: return "sg_image_desc not initialized"; @@ -13008,9 +13138,9 @@ _SOKOL_PRIVATE const char* _sg_validate_string(_sg_validate_error_t err) { case _SG_VALIDATE_IMAGEDESC_MSAA_BUT_NO_RT: return "non-render-target images cannot be multisampled"; case _SG_VALIDATE_IMAGEDESC_NO_MSAA_RT_SUPPORT: return "MSAA not supported for this pixel format"; case _SG_VALIDATE_IMAGEDESC_RT_IMMUTABLE: return "render target images must be SG_USAGE_IMMUTABLE"; - case _SG_VALIDATE_IMAGEDESC_RT_NO_CONTENT: return "render target images cannot be initialized with content"; - case _SG_VALIDATE_IMAGEDESC_CONTENT: return "missing or invalid content for immutable image"; - case _SG_VALIDATE_IMAGEDESC_NO_CONTENT: return "dynamic/stream usage images cannot be initialized with content"; + case _SG_VALIDATE_IMAGEDESC_RT_NO_DATA: return "render target images cannot be initialized with data"; + case _SG_VALIDATE_IMAGEDESC_DATA: return "missing or invalid data for immutable image"; + case _SG_VALIDATE_IMAGEDESC_NO_DATA: return "dynamic/stream usage images cannot be initialized with data"; /* shader creation */ case _SG_VALIDATE_SHADERDESC_CANARY: return "sg_shader_desc not initialized"; @@ -13047,7 +13177,6 @@ _SOKOL_PRIVATE const char* _sg_validate_string(_sg_validate_error_t err) { case _SG_VALIDATE_PASSDESC_LAYER: return "pass attachment image is array texture, but layer index is too big"; case _SG_VALIDATE_PASSDESC_SLICE: return "pass attachment image is 3d texture, but slice value is too big"; case _SG_VALIDATE_PASSDESC_IMAGE_NO_RT: return "pass attachment image must be render targets"; - case _SG_VALIDATE_PASSDESC_COLOR_PIXELFORMATS: return "all pass color attachment images must have the same pixel format"; case _SG_VALIDATE_PASSDESC_COLOR_INV_PIXELFORMAT: return "pass color-attachment images must have a renderable pixel format"; case _SG_VALIDATE_PASSDESC_DEPTH_INV_PIXELFORMAT: return "pass depth-attachment image must have depth pixel format"; case _SG_VALIDATE_PASSDESC_IMAGE_SIZES: return "all pass attachments must have the same size"; @@ -13063,10 +13192,10 @@ _SOKOL_PRIVATE const char* _sg_validate_string(_sg_validate_error_t err) { case _SG_VALIDATE_APIP_PIPELINE_VALID: return "sg_apply_pipeline: pipeline object not in valid state"; case _SG_VALIDATE_APIP_SHADER_EXISTS: return "sg_apply_pipeline: shader object no longer alive"; case _SG_VALIDATE_APIP_SHADER_VALID: return "sg_apply_pipeline: shader object not in valid state"; - case _SG_VALIDATE_APIP_ATT_COUNT: return "sg_apply_pipeline: color_attachment_count in pipeline doesn't match number of pass color attachments"; - case _SG_VALIDATE_APIP_COLOR_FORMAT: return "sg_apply_pipeline: color_format in pipeline doesn't match pass color attachment pixel format"; - case _SG_VALIDATE_APIP_DEPTH_FORMAT: return "sg_apply_pipeline: depth_format in pipeline doesn't match pass depth attachment pixel format"; - case _SG_VALIDATE_APIP_SAMPLE_COUNT: return "sg_apply_pipeline: MSAA sample count in pipeline doesn't match render pass attachment sample count"; + case _SG_VALIDATE_APIP_ATT_COUNT: return "sg_apply_pipeline: number of pipeline color attachments doesn't match number of pass color attachments"; + case _SG_VALIDATE_APIP_COLOR_FORMAT: return "sg_apply_pipeline: pipeline color attachment pixel format doesn't match pass color attachment pixel format"; + case _SG_VALIDATE_APIP_DEPTH_FORMAT: return "sg_apply_pipeline: pipeline depth pixel_format doesn't match pass depth attachment pixel format"; + case _SG_VALIDATE_APIP_SAMPLE_COUNT: return "sg_apply_pipeline: pipeline MSAA sample count doesn't match render pass attachment sample count"; /* sg_apply_bindings */ case _SG_VALIDATE_ABND_PIPELINE: return "sg_apply_bindings: must be called after sg_apply_pipeline"; @@ -13158,10 +13287,11 @@ _SOKOL_PRIVATE bool _sg_validate_buffer_desc(const sg_buffer_desc* desc) { (0 != desc->d3d11_buffer) || (0 != desc->wgpu_buffer); if (!injected && (desc->usage == SG_USAGE_IMMUTABLE)) { - SOKOL_VALIDATE(0 != desc->content, _SG_VALIDATE_BUFFERDESC_CONTENT); + SOKOL_VALIDATE((0 != desc->data.ptr) && (desc->data.size > 0), _SG_VALIDATE_BUFFERDESC_DATA); + SOKOL_VALIDATE(desc->size == desc->data.size, _SG_VALIDATE_BUFFERDESC_DATA_SIZE); } else { - SOKOL_VALIDATE(0 == desc->content, _SG_VALIDATE_BUFFERDESC_NO_CONTENT); + SOKOL_VALIDATE(0 == desc->data.ptr, _SG_VALIDATE_BUFFERDESC_NO_DATA); } return SOKOL_VALIDATE_END(); #endif @@ -13198,7 +13328,7 @@ _SOKOL_PRIVATE bool _sg_validate_image_desc(const sg_image_desc* desc) { } #endif SOKOL_VALIDATE(usage == SG_USAGE_IMMUTABLE, _SG_VALIDATE_IMAGEDESC_RT_IMMUTABLE); - SOKOL_VALIDATE(desc->content.subimage[0][0].ptr==0, _SG_VALIDATE_IMAGEDESC_RT_NO_CONTENT); + SOKOL_VALIDATE(desc->data.subimage[0][0].ptr==0, _SG_VALIDATE_IMAGEDESC_RT_NO_DATA); } else { SOKOL_VALIDATE(desc->sample_count <= 1, _SG_VALIDATE_IMAGEDESC_MSAA_BUT_NO_RT); @@ -13210,18 +13340,18 @@ _SOKOL_PRIVATE bool _sg_validate_image_desc(const sg_image_desc* desc) { const int num_mips = desc->num_mipmaps; for (int face_index = 0; face_index < num_faces; face_index++) { for (int mip_index = 0; mip_index < num_mips; mip_index++) { - const bool has_data = desc->content.subimage[face_index][mip_index].ptr != 0; - const bool has_size = desc->content.subimage[face_index][mip_index].size > 0; - SOKOL_VALIDATE(has_data && has_size, _SG_VALIDATE_IMAGEDESC_CONTENT); + const bool has_data = desc->data.subimage[face_index][mip_index].ptr != 0; + const bool has_size = desc->data.subimage[face_index][mip_index].size > 0; + SOKOL_VALIDATE(has_data && has_size, _SG_VALIDATE_IMAGEDESC_DATA); } } } else { for (int face_index = 0; face_index < SG_CUBEFACE_NUM; face_index++) { for (int mip_index = 0; mip_index < SG_MAX_MIPMAPS; mip_index++) { - const bool no_data = 0 == desc->content.subimage[face_index][mip_index].ptr; - const bool no_size = 0 == desc->content.subimage[face_index][mip_index].size; - SOKOL_VALIDATE(no_data && no_size, _SG_VALIDATE_IMAGEDESC_NO_CONTENT); + const bool no_data = 0 == desc->data.subimage[face_index][mip_index].ptr; + const bool no_size = 0 == desc->data.subimage[face_index][mip_index].size; + SOKOL_VALIDATE(no_data && no_size, _SG_VALIDATE_IMAGEDESC_NO_DATA); } } } @@ -13250,12 +13380,12 @@ _SOKOL_PRIVATE bool _sg_validate_shader_desc(const sg_shader_desc* desc) { SOKOL_VALIDATE(0 != desc->fs.source, _SG_VALIDATE_SHADERDESC_SOURCE); #elif defined(SOKOL_METAL) || defined(SOKOL_D3D11) /* on Metal or D3D11, must provide shader source code or byte code */ - SOKOL_VALIDATE((0 != desc->vs.source)||(0 != desc->vs.byte_code), _SG_VALIDATE_SHADERDESC_SOURCE_OR_BYTECODE); - SOKOL_VALIDATE((0 != desc->fs.source)||(0 != desc->fs.byte_code), _SG_VALIDATE_SHADERDESC_SOURCE_OR_BYTECODE); + SOKOL_VALIDATE((0 != desc->vs.source)||(0 != desc->vs.bytecode.ptr), _SG_VALIDATE_SHADERDESC_SOURCE_OR_BYTECODE); + SOKOL_VALIDATE((0 != desc->fs.source)||(0 != desc->fs.bytecode.ptr), _SG_VALIDATE_SHADERDESC_SOURCE_OR_BYTECODE); #elif defined(SOKOL_WGPU) /* on WGPU byte code must be provided */ - SOKOL_VALIDATE((0 != desc->vs.byte_code), _SG_VALIDATE_SHADERDESC_BYTECODE); - SOKOL_VALIDATE((0 != desc->fs.byte_code), _SG_VALIDATE_SHADERDESC_BYTECODE); + SOKOL_VALIDATE((0 != desc->vs.bytecode.ptr), _SG_VALIDATE_SHADERDESC_BYTECODE); + SOKOL_VALIDATE((0 != desc->fs.bytecode.ptr), _SG_VALIDATE_SHADERDESC_BYTECODE); #else /* Dummy Backend, don't require source or bytecode */ #endif @@ -13268,11 +13398,11 @@ _SOKOL_PRIVATE bool _sg_validate_shader_desc(const sg_shader_desc* desc) { } } /* if shader byte code, the size must also be provided */ - if (0 != desc->vs.byte_code) { - SOKOL_VALIDATE(desc->vs.byte_code_size > 0, _SG_VALIDATE_SHADERDESC_NO_BYTECODE_SIZE); + if (0 != desc->vs.bytecode.ptr) { + SOKOL_VALIDATE(desc->vs.bytecode.size > 0, _SG_VALIDATE_SHADERDESC_NO_BYTECODE_SIZE); } - if (0 != desc->fs.byte_code) { - SOKOL_VALIDATE(desc->fs.byte_code_size > 0, _SG_VALIDATE_SHADERDESC_NO_BYTECODE_SIZE); + if (0 != desc->fs.bytecode.ptr) { + SOKOL_VALIDATE(desc->fs.bytecode.size > 0, _SG_VALIDATE_SHADERDESC_NO_BYTECODE_SIZE); } for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { const sg_shader_stage_desc* stage_desc = (stage_index == 0)? &desc->vs : &desc->fs; @@ -13300,7 +13430,7 @@ _SOKOL_PRIVATE bool _sg_validate_shader_desc(const sg_shader_desc* desc) { } } #if defined(SOKOL_GLCORE33) || defined(SOKOL_GLES2) || defined(SOKOL_GLES3) - SOKOL_VALIDATE(uniform_offset == ub_desc->size, _SG_VALIDATE_SHADERDESC_UB_SIZE_MISMATCH); + SOKOL_VALIDATE((size_t)uniform_offset == ub_desc->size, _SG_VALIDATE_SHADERDESC_UB_SIZE_MISMATCH); SOKOL_VALIDATE(num_uniforms > 0, _SG_VALIDATE_SHADERDESC_NO_UB_MEMBERS); #endif } @@ -13311,7 +13441,7 @@ _SOKOL_PRIVATE bool _sg_validate_shader_desc(const sg_shader_desc* desc) { bool images_continuous = true; for (int img_index = 0; img_index < SG_MAX_SHADERSTAGE_IMAGES; img_index++) { const sg_shader_image_desc* img_desc = &stage_desc->images[img_index]; - if (img_desc->type != _SG_IMAGETYPE_DEFAULT) { + if (img_desc->image_type != _SG_IMAGETYPE_DEFAULT) { SOKOL_VALIDATE(images_continuous, _SG_VALIDATE_SHADERDESC_NO_CONT_IMGS); #if defined(SOKOL_GLES2) SOKOL_VALIDATE(0 != img_desc->name, _SG_VALIDATE_SHADERDESC_IMG_NAME); @@ -13380,10 +13510,9 @@ _SOKOL_PRIVATE bool _sg_validate_pass_desc(const sg_pass_desc* desc) { SOKOL_VALIDATE(desc->_start_canary == 0, _SG_VALIDATE_PASSDESC_CANARY); SOKOL_VALIDATE(desc->_end_canary == 0, _SG_VALIDATE_PASSDESC_CANARY); bool atts_cont = true; - sg_pixel_format color_fmt = SG_PIXELFORMAT_NONE; int width = -1, height = -1, sample_count = -1; for (int att_index = 0; att_index < SG_MAX_COLOR_ATTACHMENTS; att_index++) { - const sg_attachment_desc* att = &desc->color_attachments[att_index]; + const sg_pass_attachment_desc* att = &desc->color_attachments[att_index]; if (att->image.id == SG_INVALID_ID) { SOKOL_VALIDATE(att_index > 0, _SG_VALIDATE_PASSDESC_NO_COLOR_ATTS); atts_cont = false; @@ -13391,7 +13520,8 @@ _SOKOL_PRIVATE bool _sg_validate_pass_desc(const sg_pass_desc* desc) { } SOKOL_VALIDATE(atts_cont, _SG_VALIDATE_PASSDESC_NO_CONT_COLOR_ATTS); const _sg_image_t* img = _sg_lookup_image(&_sg.pools, att->image.id); - SOKOL_VALIDATE((0 != img) && (img->slot.state == SG_RESOURCESTATE_VALID), _SG_VALIDATE_PASSDESC_IMAGE); + SOKOL_ASSERT(img); + SOKOL_VALIDATE(img->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_PASSDESC_IMAGE); SOKOL_VALIDATE(att->mip_level < img->cmn.num_mipmaps, _SG_VALIDATE_PASSDESC_MIPLEVEL); if (img->cmn.type == SG_IMAGETYPE_CUBE) { SOKOL_VALIDATE(att->slice < 6, _SG_VALIDATE_PASSDESC_FACE); @@ -13404,13 +13534,11 @@ _SOKOL_PRIVATE bool _sg_validate_pass_desc(const sg_pass_desc* desc) { } SOKOL_VALIDATE(img->cmn.render_target, _SG_VALIDATE_PASSDESC_IMAGE_NO_RT); if (att_index == 0) { - color_fmt = img->cmn.pixel_format; width = img->cmn.width >> att->mip_level; height = img->cmn.height >> att->mip_level; sample_count = img->cmn.sample_count; } else { - SOKOL_VALIDATE(img->cmn.pixel_format == color_fmt, _SG_VALIDATE_PASSDESC_COLOR_PIXELFORMATS); SOKOL_VALIDATE(width == img->cmn.width >> att->mip_level, _SG_VALIDATE_PASSDESC_IMAGE_SIZES); SOKOL_VALIDATE(height == img->cmn.height >> att->mip_level, _SG_VALIDATE_PASSDESC_IMAGE_SIZES); SOKOL_VALIDATE(sample_count == img->cmn.sample_count, _SG_VALIDATE_PASSDESC_IMAGE_SAMPLE_COUNTS); @@ -13418,9 +13546,10 @@ _SOKOL_PRIVATE bool _sg_validate_pass_desc(const sg_pass_desc* desc) { SOKOL_VALIDATE(_sg_is_valid_rendertarget_color_format(img->cmn.pixel_format), _SG_VALIDATE_PASSDESC_COLOR_INV_PIXELFORMAT); } if (desc->depth_stencil_attachment.image.id != SG_INVALID_ID) { - const sg_attachment_desc* att = &desc->depth_stencil_attachment; + const sg_pass_attachment_desc* att = &desc->depth_stencil_attachment; const _sg_image_t* img = _sg_lookup_image(&_sg.pools, att->image.id); - SOKOL_VALIDATE((0 != img) && (img->slot.state == SG_RESOURCESTATE_VALID), _SG_VALIDATE_PASSDESC_IMAGE); + SOKOL_ASSERT(img); + SOKOL_VALIDATE(img->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_PASSDESC_IMAGE); SOKOL_VALIDATE(att->mip_level < img->cmn.num_mipmaps, _SG_VALIDATE_PASSDESC_MIPLEVEL); if (img->cmn.type == SG_IMAGETYPE_CUBE) { SOKOL_VALIDATE(att->slice < 6, _SG_VALIDATE_PASSDESC_FACE); @@ -13450,7 +13579,7 @@ _SOKOL_PRIVATE bool _sg_validate_begin_pass(_sg_pass_t* pass) { SOKOL_VALIDATE(pass->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_BEGINPASS_PASS); for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { - const _sg_attachment_t* att = &pass->cmn.color_atts[i]; + const _sg_pass_attachment_t* att = &pass->cmn.color_atts[i]; const _sg_image_t* img = _sg_pass_color_image(pass, i); if (img) { SOKOL_VALIDATE(img->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_BEGINPASS_IMAGE); @@ -13459,7 +13588,7 @@ _SOKOL_PRIVATE bool _sg_validate_begin_pass(_sg_pass_t* pass) { } const _sg_image_t* ds_img = _sg_pass_ds_image(pass); if (ds_img) { - const _sg_attachment_t* att = &pass->cmn.ds_att; + const _sg_pass_attachment_t* att = &pass->cmn.ds_att; SOKOL_VALIDATE(ds_img->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_BEGINPASS_IMAGE); SOKOL_VALIDATE(ds_img->slot.id == att->image_id.id, _SG_VALIDATE_BEGINPASS_IMAGE); } @@ -13489,10 +13618,12 @@ _SOKOL_PRIVATE bool _sg_validate_apply_pipeline(sg_pipeline pip_id) { const _sg_pass_t* pass = _sg_lookup_pass(&_sg.pools, _sg.cur_pass.id); if (pass) { /* an offscreen pass */ - const _sg_image_t* att_img = _sg_pass_color_image(pass, 0); SOKOL_VALIDATE(pip->cmn.color_attachment_count == pass->cmn.num_color_atts, _SG_VALIDATE_APIP_ATT_COUNT); - SOKOL_VALIDATE(pip->cmn.color_format == att_img->cmn.pixel_format, _SG_VALIDATE_APIP_COLOR_FORMAT); - SOKOL_VALIDATE(pip->cmn.sample_count == att_img->cmn.sample_count, _SG_VALIDATE_APIP_SAMPLE_COUNT); + for (int i = 0; i < pip->cmn.color_attachment_count; i++) { + const _sg_image_t* att_img = _sg_pass_color_image(pass, i); + SOKOL_VALIDATE(pip->cmn.color_formats[i] == att_img->cmn.pixel_format, _SG_VALIDATE_APIP_COLOR_FORMAT); + SOKOL_VALIDATE(pip->cmn.sample_count == att_img->cmn.sample_count, _SG_VALIDATE_APIP_SAMPLE_COUNT); + } const _sg_image_t* att_dsimg = _sg_pass_ds_image(pass); if (att_dsimg) { SOKOL_VALIDATE(pip->cmn.depth_format == att_dsimg->cmn.pixel_format, _SG_VALIDATE_APIP_DEPTH_FORMAT); @@ -13504,7 +13635,7 @@ _SOKOL_PRIVATE bool _sg_validate_apply_pipeline(sg_pipeline pip_id) { else { /* default pass */ SOKOL_VALIDATE(pip->cmn.color_attachment_count == 1, _SG_VALIDATE_APIP_ATT_COUNT); - SOKOL_VALIDATE(pip->cmn.color_format == _sg.desc.context.color_format, _SG_VALIDATE_APIP_COLOR_FORMAT); + SOKOL_VALIDATE(pip->cmn.color_formats[0] == _sg.desc.context.color_format, _SG_VALIDATE_APIP_COLOR_FORMAT); SOKOL_VALIDATE(pip->cmn.depth_format == _sg.desc.context.depth_format, _SG_VALIDATE_APIP_DEPTH_FORMAT); SOKOL_VALIDATE(pip->cmn.sample_count == _sg.desc.context.sample_count, _SG_VALIDATE_APIP_SAMPLE_COUNT); } @@ -13574,7 +13705,7 @@ _SOKOL_PRIVATE bool _sg_validate_apply_bindings(const sg_bindings* bindings) { const _sg_image_t* img = _sg_lookup_image(&_sg.pools, bindings->vs_images[i].id); SOKOL_VALIDATE(img != 0, _SG_VALIDATE_ABND_VS_IMG_EXISTS); if (img && img->slot.state == SG_RESOURCESTATE_VALID) { - SOKOL_VALIDATE(img->cmn.type == stage->images[i].type, _SG_VALIDATE_ABND_VS_IMG_TYPES); + SOKOL_VALIDATE(img->cmn.type == stage->images[i].image_type, _SG_VALIDATE_ABND_VS_IMG_TYPES); } } else { @@ -13590,7 +13721,7 @@ _SOKOL_PRIVATE bool _sg_validate_apply_bindings(const sg_bindings* bindings) { const _sg_image_t* img = _sg_lookup_image(&_sg.pools, bindings->fs_images[i].id); SOKOL_VALIDATE(img != 0, _SG_VALIDATE_ABND_FS_IMG_EXISTS); if (img && img->slot.state == SG_RESOURCESTATE_VALID) { - SOKOL_VALIDATE(img->cmn.type == stage->images[i].type, _SG_VALIDATE_ABND_FS_IMG_TYPES); + SOKOL_VALIDATE(img->cmn.type == stage->images[i].image_type, _SG_VALIDATE_ABND_FS_IMG_TYPES); } } else { @@ -13601,12 +13732,11 @@ _SOKOL_PRIVATE bool _sg_validate_apply_bindings(const sg_bindings* bindings) { #endif } -_SOKOL_PRIVATE bool _sg_validate_apply_uniforms(sg_shader_stage stage_index, int ub_index, const void* data, int num_bytes) { - _SOKOL_UNUSED(data); +_SOKOL_PRIVATE bool _sg_validate_apply_uniforms(sg_shader_stage stage_index, int ub_index, const sg_range* data) { #if !defined(SOKOL_DEBUG) _SOKOL_UNUSED(stage_index); _SOKOL_UNUSED(ub_index); - _SOKOL_UNUSED(num_bytes); + _SOKOL_UNUSED(data); return true; #else SOKOL_ASSERT((stage_index == SG_SHADERSTAGE_VS) || (stage_index == SG_SHADERSTAGE_FS)); @@ -13622,46 +13752,44 @@ _SOKOL_PRIVATE bool _sg_validate_apply_uniforms(sg_shader_stage stage_index, int SOKOL_VALIDATE(ub_index < stage->num_uniform_blocks, _SG_VALIDATE_AUB_NO_UB_AT_SLOT); /* check that the provided data size doesn't exceed the uniform block size */ - SOKOL_VALIDATE(num_bytes <= stage->uniform_blocks[ub_index].size, _SG_VALIDATE_AUB_SIZE); + SOKOL_VALIDATE(data->size <= stage->uniform_blocks[ub_index].size, _SG_VALIDATE_AUB_SIZE); return SOKOL_VALIDATE_END(); #endif } -_SOKOL_PRIVATE bool _sg_validate_update_buffer(const _sg_buffer_t* buf, const void* data, int size) { +_SOKOL_PRIVATE bool _sg_validate_update_buffer(const _sg_buffer_t* buf, const sg_range* data) { #if !defined(SOKOL_DEBUG) _SOKOL_UNUSED(buf); _SOKOL_UNUSED(data); - _SOKOL_UNUSED(size); return true; #else - SOKOL_ASSERT(buf && data); + SOKOL_ASSERT(buf && data && data->ptr); SOKOL_VALIDATE_BEGIN(); SOKOL_VALIDATE(buf->cmn.usage != SG_USAGE_IMMUTABLE, _SG_VALIDATE_UPDATEBUF_USAGE); - SOKOL_VALIDATE(buf->cmn.size >= size, _SG_VALIDATE_UPDATEBUF_SIZE); + SOKOL_VALIDATE(buf->cmn.size >= (int)data->size, _SG_VALIDATE_UPDATEBUF_SIZE); SOKOL_VALIDATE(buf->cmn.update_frame_index != _sg.frame_index, _SG_VALIDATE_UPDATEBUF_ONCE); SOKOL_VALIDATE(buf->cmn.append_frame_index != _sg.frame_index, _SG_VALIDATE_UPDATEBUF_APPEND); return SOKOL_VALIDATE_END(); #endif } -_SOKOL_PRIVATE bool _sg_validate_append_buffer(const _sg_buffer_t* buf, const void* data, int size) { +_SOKOL_PRIVATE bool _sg_validate_append_buffer(const _sg_buffer_t* buf, const sg_range* data) { #if !defined(SOKOL_DEBUG) _SOKOL_UNUSED(buf); _SOKOL_UNUSED(data); - _SOKOL_UNUSED(size); return true; #else - SOKOL_ASSERT(buf && data); + SOKOL_ASSERT(buf && data && data->ptr); SOKOL_VALIDATE_BEGIN(); SOKOL_VALIDATE(buf->cmn.usage != SG_USAGE_IMMUTABLE, _SG_VALIDATE_APPENDBUF_USAGE); - SOKOL_VALIDATE(buf->cmn.size >= (buf->cmn.append_pos+size), _SG_VALIDATE_APPENDBUF_SIZE); + SOKOL_VALIDATE(buf->cmn.size >= (buf->cmn.append_pos + (int)data->size), _SG_VALIDATE_APPENDBUF_SIZE); SOKOL_VALIDATE(buf->cmn.update_frame_index != _sg.frame_index, _SG_VALIDATE_APPENDBUF_UPDATE); return SOKOL_VALIDATE_END(); #endif } -_SOKOL_PRIVATE bool _sg_validate_update_image(const _sg_image_t* img, const sg_image_content* data) { +_SOKOL_PRIVATE bool _sg_validate_update_image(const _sg_image_t* img, const sg_image_data* data) { #if !defined(SOKOL_DEBUG) _SOKOL_UNUSED(img); _SOKOL_UNUSED(data); @@ -13681,7 +13809,7 @@ _SOKOL_PRIVATE bool _sg_validate_update_image(const _sg_image_t* img, const sg_i const int mip_height = _sg_max(img->cmn.height >> mip_index, 1); const int bytes_per_slice = _sg_surface_pitch(img->cmn.pixel_format, mip_width, mip_height, 1); const int expected_size = bytes_per_slice * img->cmn.num_slices; - SOKOL_VALIDATE(data->subimage[face_index][mip_index].size <= expected_size, _SG_VALIDATE_UPDIMG_SIZE); + SOKOL_VALIDATE(data->subimage[face_index][mip_index].size <= (size_t)expected_size, _SG_VALIDATE_UPDIMG_SIZE); } } return SOKOL_VALIDATE_END(); @@ -13693,6 +13821,12 @@ _SOKOL_PRIVATE sg_buffer_desc _sg_buffer_desc_defaults(const sg_buffer_desc* des sg_buffer_desc def = *desc; def.type = _sg_def(def.type, SG_BUFFERTYPE_VERTEXBUFFER); def.usage = _sg_def(def.usage, SG_USAGE_IMMUTABLE); + if (def.size == 0) { + def.size = def.data.size; + } + else if (def.data.size == 0) { + def.data.size = def.size; + } return def; } @@ -13755,7 +13889,7 @@ _SOKOL_PRIVATE sg_shader_desc _sg_shader_desc_defaults(const sg_shader_desc* des } for (int img_index = 0; img_index < SG_MAX_SHADERSTAGE_IMAGES; img_index++) { sg_shader_image_desc* img_desc = &stage_desc->images[img_index]; - if (img_desc->type == _SG_IMAGETYPE_DEFAULT) { + if (img_desc->image_type == _SG_IMAGETYPE_DEFAULT) { break; } img_desc->sampler_type = _sg_def(img_desc->sampler_type, SG_SAMPLERTYPE_FLOAT); @@ -13769,43 +13903,44 @@ _SOKOL_PRIVATE sg_pipeline_desc _sg_pipeline_desc_defaults(const sg_pipeline_des def.primitive_type = _sg_def(def.primitive_type, SG_PRIMITIVETYPE_TRIANGLES); def.index_type = _sg_def(def.index_type, SG_INDEXTYPE_NONE); - - def.depth_stencil.stencil_front.fail_op = _sg_def(def.depth_stencil.stencil_front.fail_op, SG_STENCILOP_KEEP); - def.depth_stencil.stencil_front.depth_fail_op = _sg_def(def.depth_stencil.stencil_front.depth_fail_op, SG_STENCILOP_KEEP); - def.depth_stencil.stencil_front.pass_op = _sg_def(def.depth_stencil.stencil_front.pass_op, SG_STENCILOP_KEEP); - def.depth_stencil.stencil_front.compare_func = _sg_def(def.depth_stencil.stencil_front.compare_func, SG_COMPAREFUNC_ALWAYS); - def.depth_stencil.stencil_back.fail_op = _sg_def(def.depth_stencil.stencil_back.fail_op, SG_STENCILOP_KEEP); - def.depth_stencil.stencil_back.depth_fail_op = _sg_def(def.depth_stencil.stencil_back.depth_fail_op, SG_STENCILOP_KEEP); - def.depth_stencil.stencil_back.pass_op = _sg_def(def.depth_stencil.stencil_back.pass_op, SG_STENCILOP_KEEP); - def.depth_stencil.stencil_back.compare_func = _sg_def(def.depth_stencil.stencil_back.compare_func, SG_COMPAREFUNC_ALWAYS); - def.depth_stencil.depth_compare_func = _sg_def(def.depth_stencil.depth_compare_func, SG_COMPAREFUNC_ALWAYS); - - def.blend.src_factor_rgb = _sg_def(def.blend.src_factor_rgb, SG_BLENDFACTOR_ONE); - def.blend.dst_factor_rgb = _sg_def(def.blend.dst_factor_rgb, SG_BLENDFACTOR_ZERO); - def.blend.op_rgb = _sg_def(def.blend.op_rgb, SG_BLENDOP_ADD); - def.blend.src_factor_alpha = _sg_def(def.blend.src_factor_alpha, SG_BLENDFACTOR_ONE); - def.blend.dst_factor_alpha = _sg_def(def.blend.dst_factor_alpha, SG_BLENDFACTOR_ZERO); - def.blend.op_alpha = _sg_def(def.blend.op_alpha, SG_BLENDOP_ADD); - if (def.blend.color_write_mask == SG_COLORMASK_NONE) { - def.blend.color_write_mask = 0; - } - else { - def.blend.color_write_mask = (uint8_t) _sg_def((sg_color_mask)def.blend.color_write_mask, SG_COLORMASK_RGBA); + def.cull_mode = _sg_def(def.cull_mode, SG_CULLMODE_NONE); + def.face_winding = _sg_def(def.face_winding, SG_FACEWINDING_CW); + def.sample_count = _sg_def(def.sample_count, _sg.desc.context.sample_count); + + def.stencil.front.compare = _sg_def(def.stencil.front.compare, SG_COMPAREFUNC_ALWAYS); + def.stencil.front.fail_op = _sg_def(def.stencil.front.fail_op, SG_STENCILOP_KEEP); + def.stencil.front.depth_fail_op = _sg_def(def.stencil.front.depth_fail_op, SG_STENCILOP_KEEP); + def.stencil.front.pass_op = _sg_def(def.stencil.front.pass_op, SG_STENCILOP_KEEP); + def.stencil.back.compare = _sg_def(def.stencil.back.compare, SG_COMPAREFUNC_ALWAYS); + def.stencil.back.fail_op = _sg_def(def.stencil.back.fail_op, SG_STENCILOP_KEEP); + def.stencil.back.depth_fail_op = _sg_def(def.stencil.back.depth_fail_op, SG_STENCILOP_KEEP); + def.stencil.back.pass_op = _sg_def(def.stencil.back.pass_op, SG_STENCILOP_KEEP); + + def.depth.compare = _sg_def(def.depth.compare, SG_COMPAREFUNC_ALWAYS); + def.depth.pixel_format = _sg_def(def.depth.pixel_format, _sg.desc.context.depth_format); + def.color_count = _sg_def(def.color_count, 1); + if (def.color_count > SG_MAX_COLOR_ATTACHMENTS) { + def.color_count = SG_MAX_COLOR_ATTACHMENTS; + } + for (int i = 0; i < def.color_count; i++) { + sg_color_state* cs = &def.colors[i]; + cs->pixel_format = _sg_def(cs->pixel_format, _sg.desc.context.color_format); + cs->write_mask = _sg_def(cs->write_mask, SG_COLORMASK_RGBA); + sg_blend_state* bs = &def.colors[i].blend; + bs->src_factor_rgb = _sg_def(bs->src_factor_rgb, SG_BLENDFACTOR_ONE); + bs->dst_factor_rgb = _sg_def(bs->dst_factor_rgb, SG_BLENDFACTOR_ZERO); + bs->op_rgb = _sg_def(bs->op_rgb, SG_BLENDOP_ADD); + bs->src_factor_alpha = _sg_def(bs->src_factor_alpha, SG_BLENDFACTOR_ONE); + bs->dst_factor_alpha = _sg_def(bs->dst_factor_alpha, SG_BLENDFACTOR_ZERO); + bs->op_alpha = _sg_def(bs->op_alpha, SG_BLENDOP_ADD); } - def.blend.color_attachment_count = _sg_def(def.blend.color_attachment_count, 1); - def.blend.color_format = _sg_def(def.blend.color_format, _sg.desc.context.color_format); - def.blend.depth_format = _sg_def(def.blend.depth_format, _sg.desc.context.depth_format); - - def.rasterizer.cull_mode = _sg_def(def.rasterizer.cull_mode, SG_CULLMODE_NONE); - def.rasterizer.face_winding = _sg_def(def.rasterizer.face_winding, SG_FACEWINDING_CW); - def.rasterizer.sample_count = _sg_def(def.rasterizer.sample_count, _sg.desc.context.sample_count); for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { sg_vertex_attr_desc* a_desc = &def.layout.attrs[attr_index]; if (a_desc->format == SG_VERTEXFORMAT_INVALID) { break; } - SOKOL_ASSERT((a_desc->buffer_index >= 0) && (a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS)); + SOKOL_ASSERT(a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS); sg_buffer_layout_desc* b_desc = &def.layout.buffers[a_desc->buffer_index]; b_desc->step_func = _sg_def(b_desc->step_func, SG_VERTEXSTEP_PER_VERTEX); b_desc->step_rate = _sg_def(b_desc->step_rate, 1); @@ -13826,7 +13961,7 @@ _SOKOL_PRIVATE sg_pipeline_desc _sg_pipeline_desc_defaults(const sg_pipeline_des if (a_desc->format == SG_VERTEXFORMAT_INVALID) { break; } - SOKOL_ASSERT((a_desc->buffer_index >= 0) && (a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS)); + SOKOL_ASSERT(a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS); if (use_auto_offset) { a_desc->offset = auto_offset[a_desc->buffer_index]; } @@ -14640,6 +14775,10 @@ SOKOL_API_IMPL void sg_begin_default_pass(const sg_pass_action* pass_action, int _SG_TRACE_ARGS(begin_default_pass, pass_action, width, height); } +SOKOL_API_IMPL void sg_begin_default_passf(const sg_pass_action* pass_action, float width, float height) { + sg_begin_default_pass(pass_action, (int)width, (int)height); +} + SOKOL_API_IMPL void sg_begin_pass(sg_pass pass_id, const sg_pass_action* pass_action) { SOKOL_ASSERT(_sg.valid); SOKOL_ASSERT(pass_action); @@ -14673,6 +14812,10 @@ SOKOL_API_IMPL void sg_apply_viewport(int x, int y, int width, int height, bool _SG_TRACE_ARGS(apply_viewport, x, y, width, height, origin_top_left); } +SOKOL_API_IMPL void sg_apply_viewportf(float x, float y, float width, float height, bool origin_top_left) { + sg_apply_viewport((int)x, (int)y, (int)width, (int)height, origin_top_left); +} + SOKOL_API_IMPL void sg_apply_scissor_rect(int x, int y, int width, int height, bool origin_top_left) { SOKOL_ASSERT(_sg.valid); if (!_sg.pass_valid) { @@ -14683,6 +14826,10 @@ SOKOL_API_IMPL void sg_apply_scissor_rect(int x, int y, int width, int height, b _SG_TRACE_ARGS(apply_scissor_rect, x, y, width, height, origin_top_left); } +SOKOL_API_IMPL void sg_apply_scissor_rectf(float x, float y, float width, float height, bool origin_top_left) { + sg_apply_scissor_rect((int)x, (int)y, (int)width, (int)height, origin_top_left); +} + SOKOL_API_IMPL void sg_apply_pipeline(sg_pipeline pip_id) { SOKOL_ASSERT(_sg.valid); _sg.bindings_valid = false; @@ -14776,12 +14923,12 @@ SOKOL_API_IMPL void sg_apply_bindings(const sg_bindings* bindings) { } } -SOKOL_API_IMPL void sg_apply_uniforms(sg_shader_stage stage, int ub_index, const void* data, int num_bytes) { +SOKOL_API_IMPL void sg_apply_uniforms(sg_shader_stage stage, int ub_index, const sg_range* data) { SOKOL_ASSERT(_sg.valid); SOKOL_ASSERT((stage == SG_SHADERSTAGE_VS) || (stage == SG_SHADERSTAGE_FS)); SOKOL_ASSERT((ub_index >= 0) && (ub_index < SG_MAX_SHADERSTAGE_UBS)); - SOKOL_ASSERT(data && (num_bytes > 0)); - if (!_sg_validate_apply_uniforms(stage, ub_index, data, num_bytes)) { + SOKOL_ASSERT(data && data->ptr && (data->size > 0)); + if (!_sg_validate_apply_uniforms(stage, ub_index, data)) { _sg.next_draw_valid = false; _SG_TRACE_NOARGS(err_draw_invalid); return; @@ -14793,13 +14940,15 @@ SOKOL_API_IMPL void sg_apply_uniforms(sg_shader_stage stage, int ub_index, const if (!_sg.next_draw_valid) { _SG_TRACE_NOARGS(err_draw_invalid); } - _sg_apply_uniforms(stage, ub_index, data, num_bytes); - _SG_TRACE_ARGS(apply_uniforms, stage, ub_index, data, num_bytes); + _sg_apply_uniforms(stage, ub_index, data); + _SG_TRACE_ARGS(apply_uniforms, stage, ub_index, data); } SOKOL_API_IMPL void sg_draw(int base_element, int num_elements, int num_instances) { SOKOL_ASSERT(_sg.valid); - SOKOL_ASSERT((base_element >= 0) && (num_elements >= 0) && (num_instances >= 0)); + SOKOL_ASSERT(base_element >= 0); + SOKOL_ASSERT(num_elements >= 0); + SOKOL_ASSERT(num_instances >= 0); #if defined(SOKOL_DEBUG) if (!_sg.bindings_valid) { SOKOL_LOG("attempting to draw without resource bindings"); @@ -14854,25 +15003,27 @@ SOKOL_API_IMPL void sg_reset_state_cache(void) { _SG_TRACE_NOARGS(reset_state_cache); } -SOKOL_API_IMPL void sg_update_buffer(sg_buffer buf_id, const void* data, int num_bytes) { +SOKOL_API_IMPL void sg_update_buffer(sg_buffer buf_id, const sg_range* data) { SOKOL_ASSERT(_sg.valid); + SOKOL_ASSERT(data && data->ptr && (data->size > 0)); _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); - if ((num_bytes > 0) && buf && (buf->slot.state == SG_RESOURCESTATE_VALID)) { - if (_sg_validate_update_buffer(buf, data, num_bytes)) { - SOKOL_ASSERT(num_bytes <= buf->cmn.size); + if ((data->size > 0) && buf && (buf->slot.state == SG_RESOURCESTATE_VALID)) { + if (_sg_validate_update_buffer(buf, data)) { + SOKOL_ASSERT(data->size <= (size_t)buf->cmn.size); /* only one update allowed per buffer and frame */ SOKOL_ASSERT(buf->cmn.update_frame_index != _sg.frame_index); /* update and append on same buffer in same frame not allowed */ SOKOL_ASSERT(buf->cmn.append_frame_index != _sg.frame_index); - _sg_update_buffer(buf, data, (uint32_t)num_bytes); + _sg_update_buffer(buf, data); buf->cmn.update_frame_index = _sg.frame_index; } } - _SG_TRACE_ARGS(update_buffer, buf_id, data, num_bytes); + _SG_TRACE_ARGS(update_buffer, buf_id, data); } -SOKOL_API_IMPL int sg_append_buffer(sg_buffer buf_id, const void* data, int num_bytes) { +SOKOL_API_IMPL int sg_append_buffer(sg_buffer buf_id, const sg_range* data) { SOKOL_ASSERT(_sg.valid); + SOKOL_ASSERT(data && data->ptr); _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); int result; if (buf) { @@ -14881,16 +15032,16 @@ SOKOL_API_IMPL int sg_append_buffer(sg_buffer buf_id, const void* data, int num_ buf->cmn.append_pos = 0; buf->cmn.append_overflow = false; } - if ((buf->cmn.append_pos + _sg_roundup(num_bytes, 4)) > buf->cmn.size) { + if ((buf->cmn.append_pos + _sg_roundup((int)data->size, 4)) > buf->cmn.size) { buf->cmn.append_overflow = true; } const int start_pos = buf->cmn.append_pos; if (buf->slot.state == SG_RESOURCESTATE_VALID) { - if (_sg_validate_append_buffer(buf, data, num_bytes)) { - if (!buf->cmn.append_overflow && (num_bytes > 0)) { + if (_sg_validate_append_buffer(buf, data)) { + if (!buf->cmn.append_overflow && (data->size > 0)) { /* update and append on same buffer in same frame not allowed */ SOKOL_ASSERT(buf->cmn.update_frame_index != _sg.frame_index); - uint32_t copied_num_bytes = _sg_append_buffer(buf, data, (uint32_t)num_bytes, buf->cmn.append_frame_index != _sg.frame_index); + int copied_num_bytes = _sg_append_buffer(buf, data, buf->cmn.append_frame_index != _sg.frame_index); buf->cmn.append_pos += copied_num_bytes; buf->cmn.append_frame_index = _sg.frame_index; } @@ -14902,7 +15053,7 @@ SOKOL_API_IMPL int sg_append_buffer(sg_buffer buf_id, const void* data, int num_ /* FIXME: should we return -1 here? */ result = 0; } - _SG_TRACE_ARGS(append_buffer, buf_id, data, num_bytes, result); + _SG_TRACE_ARGS(append_buffer, buf_id, data, result); return result; } @@ -14913,7 +15064,7 @@ SOKOL_API_IMPL bool sg_query_buffer_overflow(sg_buffer buf_id) { return result; } -SOKOL_API_IMPL void sg_update_image(sg_image img_id, const sg_image_content* data) { +SOKOL_API_IMPL void sg_update_image(sg_image img_id, const sg_image_data* data) { SOKOL_ASSERT(_sg.valid); _sg_image_t* img = _sg_lookup_image(&_sg.pools, img_id.id); if (img && img->slot.state == SG_RESOURCESTATE_VALID) { diff --git a/sokol_time.h b/sokol_time.h index 29ecdacf3..8821da703 100644 --- a/sokol_time.h +++ b/sokol_time.h @@ -236,10 +236,10 @@ SOKOL_API_IMPL uint64_t stm_now(void) { #if defined(_WIN32) LARGE_INTEGER qpc_t; QueryPerformanceCounter(&qpc_t); - now = int64_muldiv(qpc_t.QuadPart - _stm.start.QuadPart, 1000000000, _stm.freq.QuadPart); + now = (uint64_t) int64_muldiv(qpc_t.QuadPart - _stm.start.QuadPart, 1000000000, _stm.freq.QuadPart); #elif defined(__APPLE__) && defined(__MACH__) const uint64_t mach_now = mach_absolute_time() - _stm.start; - now = int64_muldiv(mach_now, _stm.timebase.numer, _stm.timebase.denom); + now = (uint64_t) int64_muldiv((int64_t)mach_now, (int64_t)_stm.timebase.numer, (int64_t)_stm.timebase.denom); #elif defined(__EMSCRIPTEN__) double js_now = stm_js_perfnow() - _stm.start; SOKOL_ASSERT(js_now >= 0.0); diff --git a/util/sokol_debugtext.h b/util/sokol_debugtext.h index b07bfe39a..236d4778f 100644 --- a/util/sokol_debugtext.h +++ b/util/sokol_debugtext.h @@ -303,8 +303,10 @@ .fonts = { [0] = sdtx_font_kc853(), [1] = { - .ptr = my_font_data, - .size = sizeof(my_font_data) + .data = { + .ptr = my_font_data, + .size = sizeof(my_font_data) + }, .first_char = ..., .last_char = ... } @@ -330,14 +332,15 @@ be rendered). If you provide such a complete font data array, you can drop the .first_char - and .last_char initialization parameters since those default to 0 and 255: + and .last_char initialization parameters since those default to 0 and 255, + note that you can also use the SDTX_RANGE() helper macro to build the + .data item: sdtx_setup(&sdtx_desc_t){ .fonts = { [0] = sdtx_font_kc853(), [1] = { - .ptr = my_font_data, - .size = sizeof(my_font_data) + .data = SDTX_RANGE(my_font_data) } } }); @@ -352,8 +355,7 @@ .fonts = { [0] = sdtx_font_kc853(), [1] = { - .ptr = my_font_data, - .size = sizeof(my_font_data) + .data = SDTX_RANGE(my_font_data), .first_char = 32, // could also write ' ' .last_char = 90 // could also write 'Z' } @@ -391,6 +393,7 @@ #define SOKOL_DEBUGTEXT_INCLUDED (1) #include #include +#include // size_t #include // va_list #if !defined(SOKOL_GFX_INCLUDED) @@ -426,6 +429,27 @@ typedef struct sdtx_context { uint32_t id; } sdtx_context; /* the default context handle */ static const sdtx_context SDTX_DEFAULT_CONTEXT = { 0x00010001 }; +/* + sdtx_range is a pointer-size-pair struct used to pass memory + blobs into sokol-debugtext. When initialized from a value type + (array or struct), use the SDTX_RANGE() macro to build + an sdtx_range struct. +*/ +typedef struct sdtx_range { + const void* ptr; + size_t size; +} sdtx_range; + +// disabling this for every includer isn't great, but the warning is also quite pointless +#if defined(_MSC_VER) +#pragma warning(disable:4221) /* /W4 only: nonstandard extension used: 'x': cannot be initialized using address of automatic variable 'y' */ +#endif +#if defined(__cplusplus) +#define SDTX_RANGE(x) sdtx_range{ &x, sizeof(x) } +#else +#define SDTX_RANGE(x) (sdtx_range){ &x, sizeof(x) } +#endif + /* sdtx_font_desc_t @@ -450,8 +474,7 @@ static const sdtx_context SDTX_DEFAULT_CONTEXT = { 0x00010001 }; #define SDTX_MAX_FONTS (8) typedef struct sdtx_font_desc_t { - const uint8_t* ptr; // pointer to font pixel data - int16_t size; // byte size of font pixel data + sdtx_range data; // pointer to and size of font pixel data uint8_t first_char; // first character index in font pixel data uint8_t last_char; // last character index in font pixel data, inclusive (default: 255) } sdtx_font_desc_t; @@ -3411,12 +3434,12 @@ static void _sdtx_init_pool(_sdtx_pool_t* pool, int num) { pool->size = num + 1; pool->queue_top = 0; /* generation counters indexable by pool slot index, slot 0 is reserved */ - size_t gen_ctrs_size = sizeof(uint32_t) * pool->size; + size_t gen_ctrs_size = sizeof(uint32_t) * (size_t)pool->size; pool->gen_ctrs = (uint32_t*) SOKOL_MALLOC(gen_ctrs_size); SOKOL_ASSERT(pool->gen_ctrs); memset(pool->gen_ctrs, 0, gen_ctrs_size); /* it's not a bug to only reserve 'num' here */ - pool->free_queue = (int*) SOKOL_MALLOC(sizeof(int)*num); + pool->free_queue = (int*) SOKOL_MALLOC(sizeof(int) * (size_t)num); SOKOL_ASSERT(pool->free_queue); /* never allocate the zero-th pool item since the invalid id is 0 */ for (int i = pool->size-1; i >= 1; i--) { @@ -3470,7 +3493,7 @@ static void _sdtx_setup_context_pool(const sdtx_desc_t* desc) { /* note: the pool will have an additional item, since slot 0 is reserved */ SOKOL_ASSERT((desc->context_pool_size > 0) && (desc->context_pool_size < _SDTX_MAX_POOL_SIZE)); _sdtx_init_pool(&_sdtx.context_pool.pool, desc->context_pool_size); - size_t pool_byte_size = sizeof(_sdtx_context_t) * _sdtx.context_pool.pool.size; + size_t pool_byte_size = sizeof(_sdtx_context_t) * (size_t)_sdtx.context_pool.pool.size; _sdtx.context_pool.contexts = (_sdtx_context_t*) SOKOL_MALLOC(pool_byte_size); SOKOL_ASSERT(_sdtx.context_pool.contexts); memset(_sdtx.context_pool.contexts, 0, pool_byte_size); @@ -3557,6 +3580,9 @@ static sdtx_context_desc_t _sdtx_context_desc_defaults(const sdtx_context_desc_t res.canvas_height = _sdtx_def(res.canvas_height, _SDTX_DEFAULT_CANVAS_HEIGHT); res.tab_width = _sdtx_def(res.tab_width, _SDTX_DEFAULT_TAB_WIDTH); /* keep pixel format attrs are passed as is into pipeline creation */ + SOKOL_ASSERT(res.char_buf_size > 0); + SOKOL_ASSERT(res.canvas_width > 0.0f); + SOKOL_ASSERT(res.canvas_height > 0.0f); return res; } @@ -3567,11 +3593,9 @@ static void _sdtx_init_context(sdtx_context ctx_id, const sdtx_context_desc_t* i _sdtx_context_t* ctx = _sdtx_lookup_context(ctx_id.id); SOKOL_ASSERT(ctx); ctx->desc = _sdtx_context_desc_defaults(in_desc); - SOKOL_ASSERT(ctx->desc.canvas_width > 0.0f); - SOKOL_ASSERT(ctx->desc.canvas_height > 0.0f); - const uint32_t max_vertices = 6 * ctx->desc.char_buf_size; - const int vbuf_size = max_vertices * sizeof(_sdtx_vertex_t); + const int max_vertices = 6 * ctx->desc.char_buf_size; + const size_t vbuf_size = (size_t)max_vertices * sizeof(_sdtx_vertex_t); ctx->vertices = (_sdtx_vertex_t*) SOKOL_MALLOC(vbuf_size); SOKOL_ASSERT(ctx->vertices); ctx->cur_vertex_ptr = ctx->vertices; @@ -3594,14 +3618,14 @@ static void _sdtx_init_context(sdtx_context ctx_id, const sdtx_context_desc_t* i pip_desc.layout.attrs[2].format = SG_VERTEXFORMAT_UBYTE4N; pip_desc.shader = _sdtx.shader; pip_desc.index_type = SG_INDEXTYPE_NONE; - pip_desc.blend.enabled = true; - pip_desc.blend.src_factor_rgb = SG_BLENDFACTOR_SRC_ALPHA; - pip_desc.blend.dst_factor_rgb = SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA; - pip_desc.blend.src_factor_alpha = SG_BLENDFACTOR_ZERO; - pip_desc.blend.dst_factor_alpha = SG_BLENDFACTOR_ONE; - pip_desc.blend.color_format = ctx->desc.color_format; - pip_desc.blend.depth_format = ctx->desc.depth_format; - pip_desc.rasterizer.sample_count = ctx->desc.sample_count; + pip_desc.sample_count = ctx->desc.sample_count; + pip_desc.depth.pixel_format = ctx->desc.depth_format; + pip_desc.colors[0].pixel_format = ctx->desc.color_format; + pip_desc.colors[0].blend.enabled = true; + pip_desc.colors[0].blend.src_factor_rgb = SG_BLENDFACTOR_SRC_ALPHA; + pip_desc.colors[0].blend.dst_factor_rgb = SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA; + pip_desc.colors[0].blend.src_factor_alpha = SG_BLENDFACTOR_ZERO; + pip_desc.colors[0].blend.dst_factor_alpha = SG_BLENDFACTOR_ONE; pip_desc.label = "sdtx-pipeline"; ctx->pip = sg_make_pipeline(&pip_desc); SOKOL_ASSERT(SG_INVALID_ID != ctx->pip.id); @@ -3640,11 +3664,11 @@ static bool _sdtx_is_default_context(sdtx_context ctx_id) { /* unpack linear 8x8 bits-per-pixel font data into 2D byte-per-pixel texture data */ static void _sdtx_unpack_font(const sdtx_font_desc_t* font_desc, uint8_t* out_pixels) { - SOKOL_ASSERT(font_desc->ptr); - SOKOL_ASSERT((font_desc->size > 0) && ((font_desc->size % 8) == 0)); + SOKOL_ASSERT(font_desc->data.ptr); + SOKOL_ASSERT((font_desc->data.size > 0) && ((font_desc->data.size % 8) == 0)); SOKOL_ASSERT(font_desc->first_char <= font_desc->last_char); - SOKOL_ASSERT((((font_desc->last_char - font_desc->first_char) + 1) * 8) == font_desc->size); - const uint8_t* ptr = font_desc->ptr; + SOKOL_ASSERT((size_t)(((font_desc->last_char - font_desc->first_char) + 1) * 8) == font_desc->data.size); + const uint8_t* ptr = (const uint8_t*) font_desc->data.ptr; for (int chr = font_desc->first_char; chr <= font_desc->last_char; chr++) { for (int line = 0; line < 8; line++) { uint8_t bits = *ptr++; @@ -3658,7 +3682,7 @@ static void _sdtx_unpack_font(const sdtx_font_desc_t* font_desc, uint8_t* out_pi static void _sdtx_setup_common(void) { /* common printf formatting buffer */ - _sdtx.fmt_buf_size = _sdtx.desc.printf_buf_size + 1; + _sdtx.fmt_buf_size = (uint32_t) _sdtx.desc.printf_buf_size + 1; _sdtx.fmt_buf = (char*) SOKOL_MALLOC(_sdtx.fmt_buf_size); SOKOL_ASSERT(_sdtx.fmt_buf); @@ -3678,7 +3702,7 @@ static void _sdtx_setup_common(void) { shd_desc.attrs[2].sem_name = "TEXCOORD"; shd_desc.attrs[2].sem_index = 2; shd_desc.fs.images[0].name = "tex"; - shd_desc.fs.images[0].type = SG_IMAGETYPE_2D; + shd_desc.fs.images[0].image_type = SG_IMAGETYPE_2D; shd_desc.fs.images[0].sampler_type = SG_SAMPLERTYPE_FLOAT; #if defined(SOKOL_GLCORE33) shd_desc.vs.source = _sdtx_vs_src_glcore33; @@ -3691,16 +3715,12 @@ static void _sdtx_setup_common(void) { shd_desc.fs.entry = "main0"; switch (sg_query_backend()) { case SG_BACKEND_METAL_MACOS: - shd_desc.vs.byte_code = _sdtx_vs_bytecode_metal_macos; - shd_desc.vs.byte_code_size = sizeof(_sdtx_vs_bytecode_metal_macos); - shd_desc.fs.byte_code = _sdtx_fs_bytecode_metal_macos; - shd_desc.fs.byte_code_size = sizeof(_sdtx_fs_bytecode_metal_macos); + shd_desc.vs.bytecode = SG_RANGE(_sdtx_vs_bytecode_metal_macos); + shd_desc.fs.bytecode = SG_RANGE(_sdtx_fs_bytecode_metal_macos); break; case SG_BACKEND_METAL_IOS: - shd_desc.vs.byte_code = _sdtx_vs_bytecode_metal_ios; - shd_desc.vs.byte_code_size = sizeof(_sdtx_vs_bytecode_metal_ios); - shd_desc.fs.byte_code = _sdtx_fs_bytecode_metal_ios; - shd_desc.fs.byte_code_size = sizeof(_sdtx_fs_bytecode_metal_ios); + shd_desc.vs.bytecode = SG_RANGE(_sdtx_vs_bytecode_metal_ios); + shd_desc.fs.bytecode = SG_RANGE(_sdtx_fs_bytecode_metal_ios); break; default: shd_desc.vs.source = _sdtx_vs_src_metal_sim; @@ -3708,10 +3728,8 @@ static void _sdtx_setup_common(void) { break; } #elif defined(SOKOL_D3D11) - shd_desc.vs.byte_code = _sdtx_vs_bytecode_d3d11; - shd_desc.vs.byte_code_size = sizeof(_sdtx_vs_bytecode_d3d11); - shd_desc.fs.byte_code = _sdtx_fs_bytecode_d3d11; - shd_desc.fs.byte_code_size = sizeof(_sdtx_fs_bytecode_d3d11); + shd_desc.vs.bytecode = SG_RANGE(_sdtx_vs_bytecode_d3d11); + shd_desc.fs.bytecode = SG_RANGE(_sdtx_fs_bytecode_d3d11); #elif defined(SOKOL_WGPU) shd_desc.vs.byte_code = _sdtx_vs_bytecode_wgpu; shd_desc.vs.byte_code_size = sizeof(_sdtx_vs_bytecode_wgpu); @@ -3728,7 +3746,7 @@ static void _sdtx_setup_common(void) { memset(_sdtx.font_pixels, 0xFF, sizeof(_sdtx.font_pixels)); const int unpacked_font_size = 256 * 8 * 8; for (int i = 0; i < SDTX_MAX_FONTS; i++) { - if (_sdtx.desc.fonts[i].ptr) { + if (_sdtx.desc.fonts[i].data.ptr) { _sdtx_unpack_font(&_sdtx.desc.fonts[i], &_sdtx.font_pixels[i * unpacked_font_size]); } } @@ -3743,8 +3761,7 @@ static void _sdtx_setup_common(void) { img_desc.mag_filter = SG_FILTER_NEAREST; img_desc.wrap_u = SG_WRAP_CLAMP_TO_EDGE; img_desc.wrap_v = SG_WRAP_CLAMP_TO_EDGE; - img_desc.content.subimage[0][0].ptr = _sdtx.font_pixels; - img_desc.content.subimage[0][0].size = sizeof(_sdtx.font_pixels); + img_desc.data.subimage[0][0] = SG_RANGE(_sdtx.font_pixels); _sdtx.font_img = sg_make_image(&img_desc); SOKOL_ASSERT(SG_INVALID_ID != _sdtx.font_img.id); @@ -3837,7 +3854,7 @@ static inline void _sdtx_draw_char(_sdtx_context_t* ctx, uint8_t c) { } static inline void _sdtx_put_char(_sdtx_context_t* ctx, char c) { - uint8_t c_u8 = c; + uint8_t c_u8 = (uint8_t)c; if (c_u8 <= 32) { _sdtx_ctrl_char(ctx, c_u8); } @@ -3851,11 +3868,14 @@ static sdtx_desc_t _sdtx_desc_defaults(const sdtx_desc_t* in_desc) { desc.context_pool_size = _sdtx_def(desc.context_pool_size, _SDTX_DEFAULT_CONTEXT_POOL_SIZE); desc.printf_buf_size = _sdtx_def(desc.printf_buf_size, _SDTX_DEFAULT_PRINTF_BUF_SIZE); for (int i = 0; i < SDTX_MAX_FONTS; i++) { - if (desc.fonts[i].ptr) { + if (desc.fonts[i].data.ptr) { desc.fonts[i].last_char = _sdtx_def(desc.fonts[i].last_char, 255); } } desc.context = _sdtx_context_desc_defaults(&desc.context); + SOKOL_ASSERT(desc.context_pool_size > 0); + SOKOL_ASSERT(desc.printf_buf_size > 0); + SOKOL_ASSERT(desc.context.char_buf_size > 0); return desc; } @@ -3884,32 +3904,32 @@ SOKOL_API_IMPL void sdtx_shutdown(void) { } SOKOL_API_IMPL sdtx_font_desc_t sdtx_font_kc853(void) { - sdtx_font_desc_t desc = { _sdtx_font_kc853, sizeof(_sdtx_font_kc853), 0, 255 }; + sdtx_font_desc_t desc = { { _sdtx_font_kc853, sizeof(_sdtx_font_kc853) }, 0, 255 }; return desc; } SOKOL_API_IMPL sdtx_font_desc_t sdtx_font_kc854(void) { - sdtx_font_desc_t desc = { _sdtx_font_kc854, sizeof(_sdtx_font_kc854), 0, 255 }; + sdtx_font_desc_t desc = { { _sdtx_font_kc854, sizeof(_sdtx_font_kc854) }, 0, 255 }; return desc; } SOKOL_API_IMPL sdtx_font_desc_t sdtx_font_z1013(void) { - sdtx_font_desc_t desc = { _sdtx_font_z1013, sizeof(_sdtx_font_z1013), 0, 255 }; + sdtx_font_desc_t desc = { { _sdtx_font_z1013, sizeof(_sdtx_font_z1013) }, 0, 255 }; return desc; } SOKOL_API_IMPL sdtx_font_desc_t sdtx_font_cpc(void) { - sdtx_font_desc_t desc = { _sdtx_font_cpc, sizeof(_sdtx_font_cpc), 0, 255 }; + sdtx_font_desc_t desc = { { _sdtx_font_cpc, sizeof(_sdtx_font_cpc) }, 0, 255 }; return desc; } SOKOL_API_IMPL sdtx_font_desc_t sdtx_font_c64(void) { - sdtx_font_desc_t desc = { _sdtx_font_c64, sizeof(_sdtx_font_c64), 0, 255 }; + sdtx_font_desc_t desc = { { _sdtx_font_c64, sizeof(_sdtx_font_c64) }, 0, 255 }; return desc; } SOKOL_API_IMPL sdtx_font_desc_t sdtx_font_oric(void) { - sdtx_font_desc_t desc = { _sdtx_font_oric, sizeof(_sdtx_font_oric), 0, 255 }; + sdtx_font_desc_t desc = { { _sdtx_font_oric, sizeof(_sdtx_font_oric) }, 0, 255 }; return desc; } @@ -4161,7 +4181,8 @@ SOKOL_API_IMPL void sdtx_draw(void) { if (num_verts > 0) { SOKOL_ASSERT((num_verts % 6) == 0); sg_push_debug_group("sokol-debugtext"); - int vbuf_offset = sg_append_buffer(ctx->vbuf, ctx->vertices, num_verts * sizeof(_sdtx_vertex_t)); + const sg_range range = { ctx->vertices, (size_t)num_verts * sizeof(_sdtx_vertex_t) }; + int vbuf_offset = sg_append_buffer(ctx->vbuf, &range); sg_apply_pipeline(ctx->pip); sg_bindings bindings; memset(&bindings, 0, sizeof(bindings)); diff --git a/util/sokol_fontstash.h b/util/sokol_fontstash.h index 1db200190..9bb915fb0 100644 --- a/util/sokol_fontstash.h +++ b/util/sokol_fontstash.h @@ -1610,7 +1610,7 @@ static int _sfons_render_create(void* user_ptr, int width, int height) { ub->uniforms[0].type = SG_UNIFORMTYPE_FLOAT4; ub->uniforms[0].array_count = 8; shd_desc.fs.images[0].name = "tex"; - shd_desc.fs.images[0].type = SG_IMAGETYPE_2D; + shd_desc.fs.images[0].image_type = SG_IMAGETYPE_2D; shd_desc.fs.images[0].sampler_type = SG_SAMPLERTYPE_FLOAT; shd_desc.label = "sokol-fontstash-shader"; #if defined(SOKOL_GLCORE33) @@ -1624,16 +1624,12 @@ static int _sfons_render_create(void* user_ptr, int width, int height) { shd_desc.fs.entry = "main0"; switch (sg_query_backend()) { case SG_BACKEND_METAL_MACOS: - shd_desc.vs.byte_code = _sfons_vs_bytecode_metal_macos; - shd_desc.vs.byte_code_size = sizeof(_sfons_vs_bytecode_metal_macos); - shd_desc.fs.byte_code = _sfons_fs_bytecode_metal_macos; - shd_desc.fs.byte_code_size = sizeof(_sfons_fs_bytecode_metal_macos); + shd_desc.vs.bytecode = SG_RANGE(_sfons_vs_bytecode_metal_macos); + shd_desc.fs.bytecode = SG_RANGE(_sfons_fs_bytecode_metal_macos); break; case SG_BACKEND_METAL_IOS: - shd_desc.vs.byte_code = _sfons_vs_bytecode_metal_ios; - shd_desc.vs.byte_code_size = sizeof(_sfons_vs_bytecode_metal_ios); - shd_desc.fs.byte_code = _sfons_fs_bytecode_metal_ios; - shd_desc.fs.byte_code_size = sizeof(_sfons_fs_bytecode_metal_ios); + shd_desc.vs.bytecode = SG_RANGE(_sfons_vs_bytecode_metal_ios); + shd_desc.fs.bytecode = SG_RANGE(_sfons_fs_bytecode_metal_ios); break; default: shd_desc.vs.source = _sfons_vs_source_metal_sim; @@ -1641,10 +1637,8 @@ static int _sfons_render_create(void* user_ptr, int width, int height) { break; } #elif defined(SOKOL_D3D11) - shd_desc.vs.byte_code = _sfons_vs_bytecode_hlsl4; - shd_desc.vs.byte_code_size = sizeof(_sfons_vs_bytecode_hlsl4); - shd_desc.fs.byte_code = _sfons_fs_bytecode_hlsl4; - shd_desc.fs.byte_code_size = sizeof(_sfons_fs_bytecode_hlsl4); + shd_desc.vs.bytecode = SG_RANGE(_sfons_vs_bytecode_hlsl4); + shd_desc.fs.bytecode = SG_RANGE(_sfons_fs_bytecode_hlsl4); #elif defined(SOKOL_WGPU) shd_desc.vs.byte_code = _sfons_vs_bytecode_wgpu; shd_desc.vs.byte_code_size = sizeof(_sfons_vs_bytecode_wgpu); @@ -1663,9 +1657,9 @@ static int _sfons_render_create(void* user_ptr, int width, int height) { sg_pipeline_desc pip_desc; memset(&pip_desc, 0, sizeof(pip_desc)); pip_desc.shader = sfons->shd; - pip_desc.blend.enabled = true; - pip_desc.blend.src_factor_rgb = SG_BLENDFACTOR_SRC_ALPHA; - pip_desc.blend.dst_factor_rgb = SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA; + pip_desc.colors[0].blend.enabled = true; + pip_desc.colors[0].blend.src_factor_rgb = SG_BLENDFACTOR_SRC_ALPHA; + pip_desc.colors[0].blend.dst_factor_rgb = SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA; sfons->pip = sgl_make_pipeline(&pip_desc); } @@ -1776,16 +1770,16 @@ SOKOL_API_IMPL void sfons_flush(FONScontext* ctx) { _sfons_t* sfons = (_sfons_t*) ctx->params.userPtr; if (sfons->img_dirty) { sfons->img_dirty = false; - sg_image_content content; - memset(&content, 0, sizeof(content)); - content.subimage[0][0].ptr = ctx->texData; - content.subimage[0][0].size = sfons->width * sfons->height; - sg_update_image(sfons->img, &content); + sg_image_data data; + memset(&data, 0, sizeof(data)); + data.subimage[0][0].ptr = ctx->texData; + data.subimage[0][0].size = (size_t) (sfons->width * sfons->height); + sg_update_image(sfons->img, &data); } } SOKOL_API_IMPL uint32_t sfons_rgba(uint8_t r, uint8_t g, uint8_t b, uint8_t a) { - return (r) | (g<<8) | (b<<16) | (a<<24); + return ((uint32_t)r) | ((uint32_t)g<<8) | ((uint32_t)b<<16) | ((uint32_t)a<<24); } #endif /* SOKOL_FONTSTASH_IMPL */ diff --git a/util/sokol_gfx_imgui.h b/util/sokol_gfx_imgui.h index cf39e0bbf..861625d3a 100644 --- a/util/sokol_gfx_imgui.h +++ b/util/sokol_gfx_imgui.h @@ -378,7 +378,7 @@ typedef struct { typedef struct { sg_buffer buffer; - int data_size; + size_t data_size; } sg_imgui_args_update_buffer_t; typedef struct { @@ -387,7 +387,7 @@ typedef struct { typedef struct { sg_buffer buffer; - int data_size; + size_t data_size; int result; } sg_imgui_args_append_buffer_t; @@ -423,10 +423,9 @@ typedef struct { typedef struct { sg_shader_stage stage; int ub_index; - const void* data; - int num_bytes; + size_t data_size; sg_pipeline pipeline; /* the pipeline which was active at this call */ - uint32_t ubuf_pos; /* start of copied data in capture buffer */ + size_t ubuf_pos; /* start of copied data in capture buffer */ } sg_imgui_args_apply_uniforms_t; typedef struct { @@ -596,10 +595,10 @@ typedef struct { } sg_imgui_capture_item_t; typedef struct { - uint32_t ubuf_size; /* size of uniform capture buffer in bytes */ - uint32_t ubuf_pos; /* current uniform buffer pos */ + size_t ubuf_size; /* size of uniform capture buffer in bytes */ + size_t ubuf_pos; /* current uniform buffer pos */ uint8_t* ubuf; /* buffer for capturing uniform updates */ - uint32_t num_items; + int num_items; sg_imgui_capture_item_t items[SG_IMGUI_MAX_FRAMECAPTURE_ITEMS]; } sg_imgui_capture_bucket_t; @@ -608,8 +607,8 @@ typedef struct { */ typedef struct { bool open; - uint32_t bucket_index; /* which bucket to record to, 0 or 1 */ - uint32_t sel_item; /* currently selected capture item by index */ + int bucket_index; /* which bucket to record to, 0 or 1 */ + int sel_item; /* currently selected capture item by index */ sg_imgui_capture_bucket_t bucket[2]; } sg_imgui_capture_t; @@ -806,7 +805,7 @@ _SOKOL_PRIVATE int _sg_imgui_uniform_size(sg_uniform_type type, int count) { } } -_SOKOL_PRIVATE void* _sg_imgui_alloc(int size) { +_SOKOL_PRIVATE void* _sg_imgui_alloc(size_t size) { SOKOL_ASSERT(size > 0); return SOKOL_MALLOC(size); } @@ -817,7 +816,7 @@ _SOKOL_PRIVATE void _sg_imgui_free(void* ptr) { } } -_SOKOL_PRIVATE void* _sg_imgui_realloc(void* old_ptr, int old_size, int new_size) { +_SOKOL_PRIVATE void* _sg_imgui_realloc(void* old_ptr, size_t old_size, size_t new_size) { SOKOL_ASSERT((new_size > 0) && (new_size > old_size)); void* new_ptr = SOKOL_MALLOC(new_size); SOKOL_ASSERT(new_ptr); @@ -853,17 +852,17 @@ _SOKOL_PRIVATE sg_imgui_str_t _sg_imgui_make_str(const char* str) { _SOKOL_PRIVATE const char* _sg_imgui_str_dup(const char* src) { SOKOL_ASSERT(src); - int len = (int) strlen(src) + 1; + size_t len = strlen(src) + 1; char* dst = (char*) _sg_imgui_alloc(len); memcpy(dst, src, len); return (const char*) dst; } -_SOKOL_PRIVATE const uint8_t* _sg_imgui_bin_dup(const uint8_t* src, int num_bytes) { +_SOKOL_PRIVATE const void* _sg_imgui_bin_dup(const void* src, size_t num_bytes) { SOKOL_ASSERT(src && (num_bytes > 0)); - uint8_t* dst = (uint8_t*) _sg_imgui_alloc(num_bytes); + void* dst = _sg_imgui_alloc(num_bytes); memcpy(dst, src, num_bytes); - return (const uint8_t*) dst; + return (const void*) dst; } _SOKOL_PRIVATE void _sg_imgui_snprintf(sg_imgui_str_t* dst, const char* fmt, ...) { @@ -1155,7 +1154,7 @@ _SOKOL_PRIVATE const char* _sg_imgui_blendop_string(sg_blend_op op) { } } -_SOKOL_PRIVATE const char* _sg_imgui_colormask_string(uint8_t m) { +_SOKOL_PRIVATE const char* _sg_imgui_colormask_string(sg_color_mask m) { static const char* str[] = { "NONE", "R", @@ -1206,6 +1205,11 @@ _SOKOL_PRIVATE const char* _sg_imgui_bool_string(bool b) { return b ? "true" : "false"; } +_SOKOL_PRIVATE const char* _sg_imgui_color_string(sg_imgui_str_t* dst_str, sg_color color) { + _sg_imgui_snprintf(dst_str, "%.3f %.3f %.3f %.3f", color.r, color.g, color.b, color.a); + return dst_str->buf; +} + _SOKOL_PRIVATE sg_imgui_str_t _sg_imgui_res_id_string(uint32_t res_id, const char* label) { SOKOL_ASSERT(label); sg_imgui_str_t res; @@ -1353,14 +1357,14 @@ _SOKOL_PRIVATE void _sg_imgui_shader_created(sg_imgui_t* ctx, sg_shader res_id, if (shd->desc.vs.source) { shd->desc.vs.source = _sg_imgui_str_dup(shd->desc.vs.source); } - if (shd->desc.vs.byte_code) { - shd->desc.vs.byte_code = _sg_imgui_bin_dup(shd->desc.vs.byte_code, shd->desc.vs.byte_code_size); + if (shd->desc.vs.bytecode.ptr) { + shd->desc.vs.bytecode.ptr = _sg_imgui_bin_dup(shd->desc.vs.bytecode.ptr, shd->desc.vs.bytecode.size); } if (shd->desc.fs.source) { shd->desc.fs.source = _sg_imgui_str_dup(shd->desc.fs.source); } - if (shd->desc.fs.byte_code) { - shd->desc.fs.byte_code = _sg_imgui_bin_dup(shd->desc.fs.byte_code, shd->desc.fs.byte_code_size); + if (shd->desc.fs.bytecode.ptr) { + shd->desc.fs.bytecode.ptr = _sg_imgui_bin_dup(shd->desc.fs.bytecode.ptr, shd->desc.fs.bytecode.size); } for (int i = 0; i < SG_MAX_VERTEX_ATTRIBUTES; i++) { sg_shader_attr_desc* ad = &shd->desc.attrs[i]; @@ -1383,17 +1387,17 @@ _SOKOL_PRIVATE void _sg_imgui_shader_destroyed(sg_imgui_t* ctx, int slot_index) _sg_imgui_free((void*)shd->desc.vs.source); shd->desc.vs.source = 0; } - if (shd->desc.vs.byte_code) { - _sg_imgui_free((void*)shd->desc.vs.byte_code); - shd->desc.vs.byte_code = 0; + if (shd->desc.vs.bytecode.ptr) { + _sg_imgui_free((void*)shd->desc.vs.bytecode.ptr); + shd->desc.vs.bytecode.ptr = 0; } if (shd->desc.fs.source) { _sg_imgui_free((void*)shd->desc.fs.source); shd->desc.fs.source = 0; } - if (shd->desc.fs.byte_code) { - _sg_imgui_free((void*)shd->desc.fs.byte_code); - shd->desc.fs.byte_code = 0; + if (shd->desc.fs.bytecode.ptr) { + _sg_imgui_free((void*)shd->desc.fs.bytecode.ptr); + shd->desc.fs.bytecode.ptr = 0; } } @@ -1432,7 +1436,7 @@ _SOKOL_PRIVATE void _sg_imgui_pass_destroyed(sg_imgui_t* ctx, int slot_index) { /*--- COMMAND CAPTURING ------------------------------------------------------*/ _SOKOL_PRIVATE void _sg_imgui_capture_init(sg_imgui_t* ctx) { - const int ubuf_initial_size = 256 * 1024; + const size_t ubuf_initial_size = 256 * 1024; for (int i = 0; i < 2; i++) { sg_imgui_capture_bucket_t* bucket = &ctx->capture.bucket[i]; bucket->ubuf_size = ubuf_initial_size; @@ -1465,11 +1469,11 @@ _SOKOL_PRIVATE void _sg_imgui_capture_next_frame(sg_imgui_t* ctx) { bucket->ubuf_pos = 0; } -_SOKOL_PRIVATE void _sg_imgui_capture_grow_ubuf(sg_imgui_t* ctx, uint32_t required_size) { +_SOKOL_PRIVATE void _sg_imgui_capture_grow_ubuf(sg_imgui_t* ctx, size_t required_size) { sg_imgui_capture_bucket_t* bucket = _sg_imgui_capture_get_write_bucket(ctx); SOKOL_ASSERT(required_size > bucket->ubuf_size); - int old_size = bucket->ubuf_size; - int new_size = required_size + (required_size>>1); /* allocate a bit ahead */ + size_t old_size = bucket->ubuf_size; + size_t new_size = required_size + (required_size>>1); /* allocate a bit ahead */ bucket->ubuf_size = new_size; bucket->ubuf = (uint8_t*) _sg_imgui_realloc(bucket->ubuf, old_size, new_size); } @@ -1485,27 +1489,27 @@ _SOKOL_PRIVATE sg_imgui_capture_item_t* _sg_imgui_capture_next_write_item(sg_img } } -_SOKOL_PRIVATE uint32_t _sg_imgui_capture_num_read_items(sg_imgui_t* ctx) { +_SOKOL_PRIVATE int _sg_imgui_capture_num_read_items(sg_imgui_t* ctx) { sg_imgui_capture_bucket_t* bucket = _sg_imgui_capture_get_read_bucket(ctx); return bucket->num_items; } -_SOKOL_PRIVATE sg_imgui_capture_item_t* _sg_imgui_capture_read_item_at(sg_imgui_t* ctx, uint32_t index) { +_SOKOL_PRIVATE sg_imgui_capture_item_t* _sg_imgui_capture_read_item_at(sg_imgui_t* ctx, int index) { sg_imgui_capture_bucket_t* bucket = _sg_imgui_capture_get_read_bucket(ctx); SOKOL_ASSERT(index < bucket->num_items); return &bucket->items[index]; } -_SOKOL_PRIVATE uint32_t _sg_imgui_capture_uniforms(sg_imgui_t* ctx, const void* data, int num_bytes) { +_SOKOL_PRIVATE size_t _sg_imgui_capture_uniforms(sg_imgui_t* ctx, const sg_range* data) { sg_imgui_capture_bucket_t* bucket = _sg_imgui_capture_get_write_bucket(ctx); - const uint32_t required_size = bucket->ubuf_pos + num_bytes; + const size_t required_size = bucket->ubuf_pos + data->size; if (required_size > bucket->ubuf_size) { _sg_imgui_capture_grow_ubuf(ctx, required_size); } SOKOL_ASSERT(required_size <= bucket->ubuf_size); - memcpy(bucket->ubuf + bucket->ubuf_pos, data, num_bytes); - const uint32_t pos = bucket->ubuf_pos; - bucket->ubuf_pos += num_bytes; + memcpy(bucket->ubuf + bucket->ubuf_pos, data->ptr, data->size); + const size_t pos = bucket->ubuf_pos; + bucket->ubuf_pos += data->size; SOKOL_ASSERT(bucket->ubuf_pos <= bucket->ubuf_size); return pos; } @@ -1590,7 +1594,7 @@ _SOKOL_PRIVATE sg_imgui_str_t _sg_imgui_capture_item_string(sg_imgui_t* ctx, int case SG_IMGUI_CMD_UPDATE_BUFFER: { sg_imgui_str_t res_id = _sg_imgui_buffer_id_string(ctx, item->args.update_buffer.buffer); - _sg_imgui_snprintf(&str, "%d: sg_update_buffer(buf=%s, data_ptr=.., data_size=%d)", + _sg_imgui_snprintf(&str, "%d: sg_update_buffer(buf=%s, data.size=%d)", index, res_id.buf, item->args.update_buffer.data_size); } @@ -1606,7 +1610,7 @@ _SOKOL_PRIVATE sg_imgui_str_t _sg_imgui_capture_item_string(sg_imgui_t* ctx, int case SG_IMGUI_CMD_APPEND_BUFFER: { sg_imgui_str_t res_id = _sg_imgui_buffer_id_string(ctx, item->args.append_buffer.buffer); - _sg_imgui_snprintf(&str, "%d: sg_append_buffer(buf=%s, data_ptr=.., data_size=%d) => %d", + _sg_imgui_snprintf(&str, "%d: sg_append_buffer(buf=%s, data.size=%d) => %d", index, res_id.buf, item->args.append_buffer.data_size, item->args.append_buffer.result); @@ -1659,11 +1663,11 @@ _SOKOL_PRIVATE sg_imgui_str_t _sg_imgui_capture_item_string(sg_imgui_t* ctx, int break; case SG_IMGUI_CMD_APPLY_UNIFORMS: - _sg_imgui_snprintf(&str, "%d: sg_apply_uniforms(stage=%s, ub_index=%d, data=.., num_bytes=%d)", + _sg_imgui_snprintf(&str, "%d: sg_apply_uniforms(stage=%s, ub_index=%d, data.size=%d)", index, _sg_imgui_shaderstage_string(item->args.apply_uniforms.stage), item->args.apply_uniforms.ub_index, - item->args.apply_uniforms.num_bytes); + item->args.apply_uniforms.data_size); break; case SG_IMGUI_CMD_DRAW: @@ -2093,7 +2097,7 @@ _SOKOL_PRIVATE void _sg_imgui_destroy_pass(sg_pass pass, void* user_data) { } } -_SOKOL_PRIVATE void _sg_imgui_update_buffer(sg_buffer buf, const void* data_ptr, int data_size, void* user_data) { +_SOKOL_PRIVATE void _sg_imgui_update_buffer(sg_buffer buf, const sg_range* data, void* user_data) { sg_imgui_t* ctx = (sg_imgui_t*) user_data; SOKOL_ASSERT(ctx); sg_imgui_capture_item_t* item = _sg_imgui_capture_next_write_item(ctx); @@ -2101,14 +2105,14 @@ _SOKOL_PRIVATE void _sg_imgui_update_buffer(sg_buffer buf, const void* data_ptr, item->cmd = SG_IMGUI_CMD_UPDATE_BUFFER; item->color = _SG_IMGUI_COLOR_RSRC; item->args.update_buffer.buffer = buf; - item->args.update_buffer.data_size = data_size; + item->args.update_buffer.data_size = data->size; } if (ctx->hooks.update_buffer) { - ctx->hooks.update_buffer(buf, data_ptr, data_size, ctx->hooks.user_data); + ctx->hooks.update_buffer(buf, data, ctx->hooks.user_data); } } -_SOKOL_PRIVATE void _sg_imgui_update_image(sg_image img, const sg_image_content* data, void* user_data) { +_SOKOL_PRIVATE void _sg_imgui_update_image(sg_image img, const sg_image_data* data, void* user_data) { sg_imgui_t* ctx = (sg_imgui_t*) user_data; SOKOL_ASSERT(ctx); sg_imgui_capture_item_t* item = _sg_imgui_capture_next_write_item(ctx); @@ -2122,7 +2126,7 @@ _SOKOL_PRIVATE void _sg_imgui_update_image(sg_image img, const sg_image_content* } } -_SOKOL_PRIVATE void _sg_imgui_append_buffer(sg_buffer buf, const void* data_ptr, int data_size, int result, void* user_data) { +_SOKOL_PRIVATE void _sg_imgui_append_buffer(sg_buffer buf, const sg_range* data, int result, void* user_data) { sg_imgui_t* ctx = (sg_imgui_t*) user_data; SOKOL_ASSERT(ctx); sg_imgui_capture_item_t* item = _sg_imgui_capture_next_write_item(ctx); @@ -2130,11 +2134,11 @@ _SOKOL_PRIVATE void _sg_imgui_append_buffer(sg_buffer buf, const void* data_ptr, item->cmd = SG_IMGUI_CMD_APPEND_BUFFER; item->color = _SG_IMGUI_COLOR_RSRC; item->args.append_buffer.buffer = buf; - item->args.append_buffer.data_size = data_size; + item->args.append_buffer.data_size = data->size; item->args.append_buffer.result = result; } if (ctx->hooks.append_buffer) { - ctx->hooks.append_buffer(buf, data_ptr, data_size, result, ctx->hooks.user_data); + ctx->hooks.append_buffer(buf, data, result, ctx->hooks.user_data); } } @@ -2237,9 +2241,10 @@ _SOKOL_PRIVATE void _sg_imgui_apply_bindings(const sg_bindings* bindings, void* } } -_SOKOL_PRIVATE void _sg_imgui_apply_uniforms(sg_shader_stage stage, int ub_index, const void* data, int num_bytes, void* user_data) { +_SOKOL_PRIVATE void _sg_imgui_apply_uniforms(sg_shader_stage stage, int ub_index, const sg_range* data, void* user_data) { sg_imgui_t* ctx = (sg_imgui_t*) user_data; SOKOL_ASSERT(ctx); + SOKOL_ASSERT(data); sg_imgui_capture_item_t* item = _sg_imgui_capture_next_write_item(ctx); if (item) { item->cmd = SG_IMGUI_CMD_APPLY_UNIFORMS; @@ -2247,13 +2252,12 @@ _SOKOL_PRIVATE void _sg_imgui_apply_uniforms(sg_shader_stage stage, int ub_index sg_imgui_args_apply_uniforms_t* args = &item->args.apply_uniforms; args->stage = stage; args->ub_index = ub_index; - args->data = data; - args->num_bytes = num_bytes; + args->data_size = data->size; args->pipeline = ctx->cur_pipeline; - args->ubuf_pos = _sg_imgui_capture_uniforms(ctx, data, num_bytes); + args->ubuf_pos = _sg_imgui_capture_uniforms(ctx, data); } if (ctx->hooks.apply_uniforms) { - ctx->hooks.apply_uniforms(stage, ub_index, data, num_bytes, ctx->hooks.user_data); + ctx->hooks.apply_uniforms(stage, ub_index, data, ctx->hooks.user_data); } } @@ -2977,9 +2981,9 @@ _SOKOL_PRIVATE void _sg_imgui_draw_pass_list(sg_imgui_t* ctx) { _SOKOL_PRIVATE void _sg_imgui_draw_capture_list(sg_imgui_t* ctx) { igBeginChildStr("capture_list", IMVEC2(_SG_IMGUI_LIST_WIDTH,0), true, 0); - const uint32_t num_items = _sg_imgui_capture_num_read_items(ctx); + const int num_items = _sg_imgui_capture_num_read_items(ctx); uint64_t group_stack = 1; /* bit set: group unfolded, cleared: folded */ - for (uint32_t i = 0; i < num_items; i++) { + for (int i = 0; i < num_items; i++) { const sg_imgui_capture_item_t* item = _sg_imgui_capture_read_item_at(ctx, i); sg_imgui_str_t item_string = _sg_imgui_capture_item_string(ctx, i, item); igPushStyleColorU32(ImGuiCol_Text, item->color); @@ -3035,7 +3039,7 @@ _SOKOL_PRIVATE void _sg_imgui_draw_buffer_panel(sg_imgui_t* ctx, sg_buffer buf) igText("Update Frame Index: %d", info.update_frame_index); igText("Append Frame Index: %d", info.append_frame_index); igText("Append Pos: %d", info.append_pos); - igText("Append Overflow: %s", info.append_overflow ? "YES":"NO"); + igText("Append Overflow: %s", _sg_imgui_bool_string(info.append_overflow)); } } else { @@ -3080,7 +3084,7 @@ _SOKOL_PRIVATE void _sg_imgui_draw_image_panel(sg_imgui_t* ctx, sg_image img) { igSeparator(); igText("Type: %s", _sg_imgui_imagetype_string(desc->type)); igText("Usage: %s", _sg_imgui_usage_string(desc->usage)); - igText("Render Target: %s", desc->render_target ? "YES":"NO"); + igText("Render Target: %s", _sg_imgui_bool_string(desc->render_target)); igText("Width: %d", desc->width); igText("Height: %d", desc->height); igText("Num Slices: %d", desc->num_slices); @@ -3124,7 +3128,7 @@ _SOKOL_PRIVATE void _sg_imgui_draw_shader_stage(const sg_shader_stage_desc* stag } int num_valid_images = 0; for (int i = 0; i < SG_MAX_SHADERSTAGE_IMAGES; i++) { - if (_SG_IMAGETYPE_DEFAULT != stage->images[i].type) { + if (_SG_IMAGETYPE_DEFAULT != stage->images[i].image_type) { num_valid_images++; } else { @@ -3155,10 +3159,10 @@ _SOKOL_PRIVATE void _sg_imgui_draw_shader_stage(const sg_shader_stage_desc* stag if (igTreeNodeStr("Images")) { for (int i = 0; i < SG_MAX_SHADERSTAGE_IMAGES; i++) { const sg_shader_image_desc* sid = &stage->images[i]; - if (sid->type != _SG_IMAGETYPE_DEFAULT) { - igText("slot: %d\n name: %s\n type: %s\n sampler_type: %s", + if (sid->image_type != _SG_IMAGETYPE_DEFAULT) { + igText("slot: %d\n name: %s\n image_type: %s\n sampler_type: %s", i, sid->name ? sid->name : "NONE", - _sg_imgui_imagetype_string(sid->type), + _sg_imgui_imagetype_string(sid->image_type), _sg_imgui_samplertype_string(sid->sampler_type)); } else { @@ -3180,7 +3184,7 @@ _SOKOL_PRIVATE void _sg_imgui_draw_shader_stage(const sg_shader_stage_desc* stag igTreePop(); } } - else if (stage->byte_code) { + else if (stage->bytecode.ptr) { if (igTreeNodeStr("Byte Code")) { igText("Byte-code display currently not supported."); igTreePop(); @@ -3252,53 +3256,54 @@ _SOKOL_PRIVATE void _sg_imgui_draw_vertex_layout(const sg_layout_desc* layout) { } } +_SOKOL_PRIVATE void _sg_imgui_draw_stencil_face_state(const sg_stencil_face_state* sfs) { + igText("Fail Op: %s", _sg_imgui_stencilop_string(sfs->fail_op)); + igText("Depth Fail Op: %s", _sg_imgui_stencilop_string(sfs->depth_fail_op)); + igText("Pass Op: %s", _sg_imgui_stencilop_string(sfs->pass_op)); + igText("Compare: %s", _sg_imgui_comparefunc_string(sfs->compare)); +} + _SOKOL_PRIVATE void _sg_imgui_draw_stencil_state(const sg_stencil_state* ss) { - igText("Fail Op: %s", _sg_imgui_stencilop_string(ss->fail_op)); - igText("Depth Fail Op: %s", _sg_imgui_stencilop_string(ss->depth_fail_op)); - igText("Pass Op: %s", _sg_imgui_stencilop_string(ss->pass_op)); - igText("Compare Func: %s", _sg_imgui_comparefunc_string(ss->compare_func)); -} - -_SOKOL_PRIVATE void _sg_imgui_draw_depth_stencil_state(const sg_depth_stencil_state* dss) { - igText("Depth Compare Func: %s", _sg_imgui_comparefunc_string(dss->depth_compare_func)); - igText("Depth Write Enabled: %s", dss->depth_write_enabled ? "YES":"NO"); - igText("Stencil Enabled: %s", dss->stencil_enabled ? "YES":"NO"); - igText("Stencil Read Mask: 0x%02X", dss->stencil_read_mask); - igText("Stencil Write Mask: 0x%02X", dss->stencil_write_mask); - igText("Stencil Ref: 0x%02X", dss->stencil_ref); - if (igTreeNodeStr("Stencil Front")) { - _sg_imgui_draw_stencil_state(&dss->stencil_front); + igText("Enabled: %s", _sg_imgui_bool_string(ss->enabled)); + igText("Read Mask: 0x%02X", ss->read_mask); + igText("Write Mask: 0x%02X", ss->write_mask); + igText("Ref: 0x%02X", ss->ref); + if (igTreeNodeStr("Front")) { + _sg_imgui_draw_stencil_face_state(&ss->front); igTreePop(); } - if (igTreeNodeStr("Stencil Back")) { - _sg_imgui_draw_stencil_state(&dss->stencil_back); + if (igTreeNodeStr("Back")) { + _sg_imgui_draw_stencil_face_state(&ss->back); igTreePop(); } } +_SOKOL_PRIVATE void _sg_imgui_draw_depth_state(const sg_depth_state* ds) { + igText("Pixel Format: %s", _sg_imgui_pixelformat_string(ds->pixel_format)); + igText("Compare: %s", _sg_imgui_comparefunc_string(ds->compare)); + igText("Write Enabled: %s", _sg_imgui_bool_string(ds->write_enabled)); + igText("Bias: %f", ds->bias); + igText("Bias Slope: %f", ds->bias_slope_scale); + igText("Bias Clamp: %f", ds->bias_clamp); +} + _SOKOL_PRIVATE void _sg_imgui_draw_blend_state(const sg_blend_state* bs) { - igText("Blend Enabled: %s", bs->enabled ? "YES":"NO"); + igText("Blend Enabled: %s", _sg_imgui_bool_string(bs->enabled)); igText("Src Factor RGB: %s", _sg_imgui_blendfactor_string(bs->src_factor_rgb)); igText("Dst Factor RGB: %s", _sg_imgui_blendfactor_string(bs->dst_factor_rgb)); igText("Op RGB: %s", _sg_imgui_blendop_string(bs->op_rgb)); igText("Src Factor Alpha: %s", _sg_imgui_blendfactor_string(bs->src_factor_alpha)); igText("Dst Factor Alpha: %s", _sg_imgui_blendfactor_string(bs->dst_factor_alpha)); igText("Op Alpha: %s", _sg_imgui_blendop_string(bs->op_alpha)); - igText("Color Write Mask: %s", _sg_imgui_colormask_string(bs->color_write_mask)); - igText("Attachment Count: %d", bs->color_attachment_count); - igText("Color Format: %s", _sg_imgui_pixelformat_string(bs->color_format)); - igText("Depth Format: %s", _sg_imgui_pixelformat_string(bs->depth_format)); - igText("Blend Color: %.3f %.3f %.3f %.3f", bs->blend_color[0], bs->blend_color[1], bs->blend_color[2], bs->blend_color[3]); } -_SOKOL_PRIVATE void _sg_imgui_draw_rasterizer_state(const sg_rasterizer_state* rs) { - igText("Alpha to Coverage: %s", rs->alpha_to_coverage_enabled ? "YES":"NO"); - igText("Cull Mode: %s", _sg_imgui_cullmode_string(rs->cull_mode)); - igText("Face Winding: %s", _sg_imgui_facewinding_string(rs->face_winding)); - igText("Sample Count: %d", rs->sample_count); - igText("Depth Bias: %f", rs->depth_bias); - igText("Depth Bias Slope: %f", rs->depth_bias_slope_scale); - igText("Depth Bias Clamp: %f", rs->depth_bias_clamp); +_SOKOL_PRIVATE void _sg_imgui_draw_color_state(const sg_color_state* cs) { + igText("Pixel Format: %s", _sg_imgui_pixelformat_string(cs->pixel_format)); + igText("Write Mask: %s", _sg_imgui_colormask_string(cs->write_mask)); + if (igTreeNodeStr("Blend State:")) { + _sg_imgui_draw_blend_state(&cs->blend); + igTreePop(); + } } _SOKOL_PRIVATE void _sg_imgui_draw_pipeline_panel(sg_imgui_t* ctx, sg_pipeline pip) { @@ -3314,24 +3319,35 @@ _SOKOL_PRIVATE void _sg_imgui_draw_pipeline_panel(sg_imgui_t* ctx, sg_pipeline p if (_sg_imgui_draw_shader_link(ctx, pip_ui->desc.shader)) { _sg_imgui_show_shader(ctx, pip_ui->desc.shader); } - igText("Prim Type: %s", _sg_imgui_primitivetype_string(pip_ui->desc.primitive_type)); - igText("Index Type: %s", _sg_imgui_indextype_string(pip_ui->desc.index_type)); if (igTreeNodeStr("Vertex Layout")) { _sg_imgui_draw_vertex_layout(&pip_ui->desc.layout); igTreePop(); } - if (igTreeNodeStr("Depth Stencil State")) { - _sg_imgui_draw_depth_stencil_state(&pip_ui->desc.depth_stencil); + if (igTreeNodeStr("Depth State")) { + _sg_imgui_draw_depth_state(&pip_ui->desc.depth); igTreePop(); } - if (igTreeNodeStr("Blend State")) { - _sg_imgui_draw_blend_state(&pip_ui->desc.blend); + if (igTreeNodeStr("Stencil State")) { + _sg_imgui_draw_stencil_state(&pip_ui->desc.stencil); igTreePop(); } - if (igTreeNodeStr("Rasterizer State")) { - _sg_imgui_draw_rasterizer_state(&pip_ui->desc.rasterizer); - igTreePop(); + igText("Color Count: %d", pip_ui->desc.color_count); + for (int i = 0; i < pip_ui->desc.color_count; i++) { + sg_imgui_str_t str; + _sg_imgui_snprintf(&str, "Color %d", i); + if (igTreeNodeStr(str.buf)) { + _sg_imgui_draw_color_state(&pip_ui->desc.colors[i]); + igTreePop(); + } } + igText("Prim Type: %s", _sg_imgui_primitivetype_string(pip_ui->desc.primitive_type)); + igText("Index Type: %s", _sg_imgui_indextype_string(pip_ui->desc.index_type)); + igText("Cull Mode: %s", _sg_imgui_cullmode_string(pip_ui->desc.cull_mode)); + igText("Face Winding: %s", _sg_imgui_facewinding_string(pip_ui->desc.face_winding)); + igText("Sample Count: %d", pip_ui->desc.sample_count); + sg_imgui_str_t blend_color_str; + igText("Blend Color: %.3f %.3f %.3f %.3f", _sg_imgui_color_string(&blend_color_str, pip_ui->desc.blend_color)); + igText("Alpha To Coverage: %s", _sg_imgui_bool_string(pip_ui->desc.alpha_to_coverage_enabled)); } else { igText("Pipeline 0x%08X not valid.", pip.id); @@ -3340,7 +3356,7 @@ _SOKOL_PRIVATE void _sg_imgui_draw_pipeline_panel(sg_imgui_t* ctx, sg_pipeline p } } -_SOKOL_PRIVATE void _sg_imgui_draw_attachment(sg_imgui_t* ctx, const sg_attachment_desc* att, float* img_scale) { +_SOKOL_PRIVATE void _sg_imgui_draw_pass_attachment(sg_imgui_t* ctx, const sg_pass_attachment_desc* att, float* img_scale) { igText(" Image: "); igSameLine(0,-1); if (_sg_imgui_draw_image_link(ctx, att->image)) { _sg_imgui_show_image(ctx, att->image); @@ -3364,12 +3380,12 @@ _SOKOL_PRIVATE void _sg_imgui_draw_pass_panel(sg_imgui_t* ctx, sg_pass pass) { } igSeparator(); igText("Color Attachment #%d:", i); - _sg_imgui_draw_attachment(ctx, &pass_ui->desc.color_attachments[i], &pass_ui->color_image_scale[i]); + _sg_imgui_draw_pass_attachment(ctx, &pass_ui->desc.color_attachments[i], &pass_ui->color_image_scale[i]); } if (pass_ui->desc.depth_stencil_attachment.image.id != SG_INVALID_ID) { igSeparator(); igText("Depth-Stencil Attachemnt:"); - _sg_imgui_draw_attachment(ctx, &pass_ui->desc.depth_stencil_attachment, &pass_ui->ds_image_scale); + _sg_imgui_draw_pass_attachment(ctx, &pass_ui->desc.depth_stencil_attachment, &pass_ui->ds_image_scale); } } else { @@ -3454,14 +3470,14 @@ _SOKOL_PRIVATE void _sg_imgui_draw_uniforms_panel(sg_imgui_t* ctx, const sg_imgu const sg_shader_uniform_block_desc* ub_desc = (args->stage == SG_SHADERSTAGE_VS) ? &shd_ui->desc.vs.uniform_blocks[args->ub_index] : &shd_ui->desc.fs.uniform_blocks[args->ub_index]; - SOKOL_ASSERT(args->num_bytes <= ub_desc->size); + SOKOL_ASSERT(args->data_size <= ub_desc->size); bool draw_dump = false; if (ub_desc->uniforms[0].type == SG_UNIFORMTYPE_INVALID) { draw_dump = true; } sg_imgui_capture_bucket_t* bucket = _sg_imgui_capture_get_read_bucket(ctx); - SOKOL_ASSERT((args->ubuf_pos + args->num_bytes) <= bucket->ubuf_size); + SOKOL_ASSERT((args->ubuf_pos + args->data_size) <= bucket->ubuf_size); const float* uptrf = (const float*) (bucket->ubuf + args->ubuf_pos); if (!draw_dump) { for (int i = 0; i < SG_MAX_UB_MEMBERS; i++) { @@ -3504,12 +3520,12 @@ _SOKOL_PRIVATE void _sg_imgui_draw_uniforms_panel(sg_imgui_t* ctx, const sg_imgu igText("???"); break; } - uptrf += _sg_imgui_uniform_size(ud->type, 1) / sizeof(float); + uptrf += _sg_imgui_uniform_size(ud->type, 1) / (int)sizeof(float); } } } else { - const uint32_t num_floats = ub_desc->size / sizeof(float); + const size_t num_floats = ub_desc->size / sizeof(float); for (uint32_t i = 0; i < num_floats; i++) { igText("%.3f, ", uptrf[i]); if (((i + 1) % 4) != 0) { @@ -3539,15 +3555,12 @@ _SOKOL_PRIVATE void _sg_imgui_draw_passaction_panel(sg_imgui_t* ctx, sg_pass pas for (int i = 0; i < num_color_atts; i++) { const sg_color_attachment_action* c_att = &action->colors[i]; igText(" Color Attachment %d:", i); + sg_imgui_str_t color_str; switch (c_att->action) { case SG_ACTION_LOAD: igText(" SG_ACTION_LOAD"); break; case SG_ACTION_DONTCARE: igText(" SG_ACTION_DONTCARE"); break; default: - igText(" SG_ACTION_CLEAR: %.3f, %.3f, %.3f, %.3f", - c_att->val[0], - c_att->val[1], - c_att->val[2], - c_att->val[3]); + igText(" SG_ACTION_CLEAR: %s", _sg_imgui_color_string(&color_str, c_att->value)); break; } } @@ -3556,19 +3569,19 @@ _SOKOL_PRIVATE void _sg_imgui_draw_passaction_panel(sg_imgui_t* ctx, sg_pass pas switch (d_att->action) { case SG_ACTION_LOAD: igText(" SG_ACTION_LOAD"); break; case SG_ACTION_DONTCARE: igText(" SG_ACTION_DONTCARE"); break; - default: igText(" SG_ACTION_CLEAR: %.3f", d_att->val); break; + default: igText(" SG_ACTION_CLEAR: %.3f", d_att->value); break; } const sg_stencil_attachment_action* s_att = &action->stencil; igText(" Stencil Attachment"); switch (s_att->action) { case SG_ACTION_LOAD: igText(" SG_ACTION_LOAD"); break; case SG_ACTION_DONTCARE: igText(" SG_ACTION_DONTCARE"); break; - default: igText(" SG_ACTION_CLEAR: 0x%02X", s_att->val); break; + default: igText(" SG_ACTION_CLEAR: 0x%02X", s_att->value); break; } } _SOKOL_PRIVATE void _sg_imgui_draw_capture_panel(sg_imgui_t* ctx) { - uint32_t sel_item_index = ctx->capture.sel_item; + int sel_item_index = ctx->capture.sel_item; if (sel_item_index >= _sg_imgui_capture_num_read_items(ctx)) { return; } @@ -3709,6 +3722,8 @@ _SOKOL_PRIVATE void _sg_imgui_draw_caps_panel(void) { igText(" imagetype_3d: %s", _sg_imgui_bool_string(f.imagetype_3d)); igText(" imagetype_array: %s", _sg_imgui_bool_string(f.imagetype_array)); igText(" image_clamp_to_border: %s", _sg_imgui_bool_string(f.image_clamp_to_border)); + igText(" mrt_independent_blend_state: %s", _sg_imgui_bool_string(f.mrt_independent_blend_state)); + igText(" mrt_independent_write_mask: %s", _sg_imgui_bool_string(f.mrt_independent_write_mask)); sg_limits l = sg_query_limits(); igText("\nLimits:\n"); igText(" max_image_size_2d: %d", l.max_image_size_2d); @@ -3815,27 +3830,27 @@ SOKOL_API_IMPL void sg_imgui_init(sg_imgui_t* ctx) { ctx->pipelines.num_slots = desc.pipeline_pool_size; ctx->passes.num_slots = desc.pass_pool_size; - const int buffer_pool_size = ctx->buffers.num_slots * sizeof(sg_imgui_buffer_t); + const size_t buffer_pool_size = (size_t)ctx->buffers.num_slots * sizeof(sg_imgui_buffer_t); ctx->buffers.slots = (sg_imgui_buffer_t*) _sg_imgui_alloc(buffer_pool_size); SOKOL_ASSERT(ctx->buffers.slots); memset(ctx->buffers.slots, 0, buffer_pool_size); - const int image_pool_size = ctx->images.num_slots * sizeof(sg_imgui_image_t); + const size_t image_pool_size = (size_t)ctx->images.num_slots * sizeof(sg_imgui_image_t); ctx->images.slots = (sg_imgui_image_t*) _sg_imgui_alloc(image_pool_size); SOKOL_ASSERT(ctx->images.slots); memset(ctx->images.slots, 0, image_pool_size); - const int shader_pool_size = ctx->shaders.num_slots * sizeof(sg_imgui_shader_t); + const size_t shader_pool_size = (size_t)ctx->shaders.num_slots * sizeof(sg_imgui_shader_t); ctx->shaders.slots = (sg_imgui_shader_t*) _sg_imgui_alloc(shader_pool_size); SOKOL_ASSERT(ctx->shaders.slots); memset(ctx->shaders.slots, 0, shader_pool_size); - const int pipeline_pool_size = ctx->pipelines.num_slots * sizeof(sg_imgui_pipeline_t); + const size_t pipeline_pool_size = (size_t)ctx->pipelines.num_slots * sizeof(sg_imgui_pipeline_t); ctx->pipelines.slots = (sg_imgui_pipeline_t*) _sg_imgui_alloc(pipeline_pool_size); SOKOL_ASSERT(ctx->pipelines.slots); memset(ctx->pipelines.slots, 0, pipeline_pool_size); - const int pass_pool_size = ctx->passes.num_slots * sizeof(sg_imgui_pass_t); + const size_t pass_pool_size = (size_t)ctx->passes.num_slots * sizeof(sg_imgui_pass_t); ctx->passes.slots = (sg_imgui_pass_t*) _sg_imgui_alloc(pass_pool_size); SOKOL_ASSERT(ctx->passes.slots); memset(ctx->passes.slots, 0, pass_pool_size); diff --git a/util/sokol_gl.h b/util/sokol_gl.h index 048951700..e41fc06bb 100644 --- a/util/sokol_gl.h +++ b/util/sokol_gl.h @@ -206,6 +206,13 @@ sgl_viewport(int x, int y, int w, int h, bool origin_top_left) sgl_scissor_rect(int x, int y, int w, int h, bool origin_top_left) + ...or call these alternatives which take float arguments (this might allow + to avoid casting between float and integer in more strongly typed languages + when floating point pixel coordinates are used): + + sgl_viewportf(float x, float y, float w, float h, bool origin_top_left) + sgl_scissor_rectf(float x, float y, float w, float h, bool origin_top_left) + ...these calls add a new command to the internal command queue, so that the viewport or scissor rect are set at the right time relative to other sokol-gl calls. @@ -509,7 +516,9 @@ SOKOL_GL_API_DECL void sgl_destroy_pipeline(sgl_pipeline pip); /* render state functions */ SOKOL_GL_API_DECL void sgl_viewport(int x, int y, int w, int h, bool origin_top_left); +SOKOL_GL_API_DECL void sgl_viewportf(float x, float y, float w, float h, bool origin_top_left); SOKOL_GL_API_DECL void sgl_scissor_rect(int x, int y, int w, int h, bool origin_top_left); +SOKOL_GL_API_DECL void sgl_scissor_rectf(float x, float y, float w, float h, bool origin_top_left); SOKOL_GL_API_DECL void sgl_enable_texture(void); SOKOL_GL_API_DECL void sgl_disable_texture(void); SOKOL_GL_API_DECL void sgl_texture(sg_image img); @@ -2078,12 +2087,12 @@ static void _sgl_init_pool(_sgl_pool_t* pool, int num) { pool->size = num + 1; pool->queue_top = 0; /* generation counters indexable by pool slot index, slot 0 is reserved */ - size_t gen_ctrs_size = sizeof(uint32_t) * pool->size; + size_t gen_ctrs_size = sizeof(uint32_t) * (size_t)pool->size; pool->gen_ctrs = (uint32_t*) SOKOL_MALLOC(gen_ctrs_size); SOKOL_ASSERT(pool->gen_ctrs); memset(pool->gen_ctrs, 0, gen_ctrs_size); /* it's not a bug to only reserve 'num' here */ - pool->free_queue = (int*) SOKOL_MALLOC(sizeof(int)*num); + pool->free_queue = (int*) SOKOL_MALLOC(sizeof(int) * (size_t)num); SOKOL_ASSERT(pool->free_queue); /* never allocate the zero-th pool item since the invalid id is 0 */ for (int i = pool->size-1; i >= 1; i--) { @@ -2142,7 +2151,7 @@ static void _sgl_setup_pipeline_pool(const sgl_desc_t* desc) { /* note: the pools here will have an additional item, since slot 0 is reserved */ SOKOL_ASSERT((desc->pipeline_pool_size > 0) && (desc->pipeline_pool_size < _SGL_MAX_POOL_SIZE)); _sgl_init_pool(&_sgl.pip_pool.pool, desc->pipeline_pool_size); - size_t pool_byte_size = sizeof(_sgl_pipeline_t) * _sgl.pip_pool.pool.size; + size_t pool_byte_size = sizeof(_sgl_pipeline_t) * (size_t)_sgl.pip_pool.pool.size; _sgl.pip_pool.pips = (_sgl_pipeline_t*) SOKOL_MALLOC(pool_byte_size); SOKOL_ASSERT(_sgl.pip_pool.pips); memset(_sgl.pip_pool.pips, 0, pool_byte_size); @@ -2245,14 +2254,14 @@ static void _sgl_init_pipeline(sgl_pipeline pip_id, const sg_pipeline_desc* in_d desc.shader = _sgl.shd; } desc.index_type = SG_INDEXTYPE_NONE; - desc.blend.color_format = _sgl.desc.color_format; - desc.blend.depth_format = _sgl.desc.depth_format; - desc.rasterizer.sample_count = _sgl.desc.sample_count; - if (desc.rasterizer.face_winding == _SG_FACEWINDING_DEFAULT) { - desc.rasterizer.face_winding = _sgl.desc.face_winding; + desc.sample_count = _sgl.desc.sample_count; + if (desc.face_winding == _SG_FACEWINDING_DEFAULT) { + desc.face_winding = _sgl.desc.face_winding; } - if (desc.blend.color_write_mask == _SG_COLORMASK_DEFAULT) { - desc.blend.color_write_mask = SG_COLORMASK_RGB; + desc.depth.pixel_format = _sgl.desc.depth_format; + desc.colors[0].pixel_format = _sgl.desc.color_format; + if (desc.colors[0].write_mask == _SG_COLORMASK_DEFAULT) { + desc.colors[0].write_mask = SG_COLORMASK_RGB; } _sgl_pipeline_t* pip = _sgl_lookup_pipeline(pip_id.id); @@ -2647,11 +2656,11 @@ SOKOL_API_IMPL void sgl_setup(const sgl_desc_t* desc) { _sgl.num_vertices = _sgl.desc.max_vertices; _sgl.num_uniforms = _sgl.desc.max_commands; _sgl.num_commands = _sgl.num_uniforms; - _sgl.vertices = (_sgl_vertex_t*) SOKOL_MALLOC(_sgl.num_vertices * sizeof(_sgl_vertex_t)); + _sgl.vertices = (_sgl_vertex_t*) SOKOL_MALLOC((size_t)_sgl.num_vertices * sizeof(_sgl_vertex_t)); SOKOL_ASSERT(_sgl.vertices); - _sgl.uniforms = (_sgl_uniform_t*) SOKOL_MALLOC(_sgl.num_uniforms * sizeof(_sgl_uniform_t)); + _sgl.uniforms = (_sgl_uniform_t*) SOKOL_MALLOC((size_t)_sgl.num_uniforms * sizeof(_sgl_uniform_t)); SOKOL_ASSERT(_sgl.uniforms); - _sgl.commands = (_sgl_command_t*) SOKOL_MALLOC(_sgl.num_commands * sizeof(_sgl_command_t)); + _sgl.commands = (_sgl_command_t*) SOKOL_MALLOC((size_t)_sgl.num_commands * sizeof(_sgl_command_t)); SOKOL_ASSERT(_sgl.commands); _sgl_setup_pipeline_pool(&_sgl.desc); @@ -2660,7 +2669,7 @@ SOKOL_API_IMPL void sgl_setup(const sgl_desc_t* desc) { sg_buffer_desc vbuf_desc; memset(&vbuf_desc, 0, sizeof(vbuf_desc)); - vbuf_desc.size = _sgl.num_vertices * sizeof(_sgl_vertex_t); + vbuf_desc.size = (size_t)_sgl.num_vertices * sizeof(_sgl_vertex_t); vbuf_desc.type = SG_BUFFERTYPE_VERTEXBUFFER; vbuf_desc.usage = SG_USAGE_STREAM; vbuf_desc.label = "sgl-vertex-buffer"; @@ -2680,8 +2689,7 @@ SOKOL_API_IMPL void sgl_setup(const sgl_desc_t* desc) { img_desc.pixel_format = SG_PIXELFORMAT_RGBA8; img_desc.min_filter = SG_FILTER_NEAREST; img_desc.mag_filter = SG_FILTER_NEAREST; - img_desc.content.subimage[0][0].ptr = pixels; - img_desc.content.subimage[0][0].size = sizeof(pixels); + img_desc.data.subimage[0][0] = SG_RANGE(pixels); img_desc.label = "sgl-default-texture"; _sgl.def_img = sg_make_image(&img_desc); SOKOL_ASSERT(SG_INVALID_ID != _sgl.def_img.id); @@ -2704,7 +2712,7 @@ SOKOL_API_IMPL void sgl_setup(const sgl_desc_t* desc) { ub->uniforms[0].type = SG_UNIFORMTYPE_FLOAT4; ub->uniforms[0].array_count = 8; shd_desc.fs.images[0].name = "tex"; - shd_desc.fs.images[0].type = SG_IMAGETYPE_2D; + shd_desc.fs.images[0].image_type = SG_IMAGETYPE_2D; shd_desc.fs.images[0].sampler_type = SG_SAMPLERTYPE_FLOAT; shd_desc.label = "sgl-shader"; #if defined(SOKOL_GLCORE33) @@ -2718,16 +2726,12 @@ SOKOL_API_IMPL void sgl_setup(const sgl_desc_t* desc) { shd_desc.fs.entry = "main0"; switch (sg_query_backend()) { case SG_BACKEND_METAL_MACOS: - shd_desc.vs.byte_code = _sgl_vs_bytecode_metal_macos; - shd_desc.vs.byte_code_size = sizeof(_sgl_vs_bytecode_metal_macos); - shd_desc.fs.byte_code = _sgl_fs_bytecode_metal_macos; - shd_desc.fs.byte_code_size = sizeof(_sgl_fs_bytecode_metal_macos); + shd_desc.vs.bytecode = SG_RANGE(_sgl_vs_bytecode_metal_macos); + shd_desc.fs.bytecode = SG_RANGE(_sgl_fs_bytecode_metal_macos); break; case SG_BACKEND_METAL_IOS: - shd_desc.vs.byte_code = _sgl_vs_bytecode_metal_ios; - shd_desc.vs.byte_code_size = sizeof(_sgl_vs_bytecode_metal_ios); - shd_desc.fs.byte_code = _sgl_fs_bytecode_metal_ios; - shd_desc.fs.byte_code_size = sizeof(_sgl_fs_bytecode_metal_ios); + shd_desc.vs.bytecode = SG_RANGE(_sgl_vs_bytecode_metal_ios); + shd_desc.fs.bytecode = SG_RANGE(_sgl_fs_bytecode_metal_ios); break; default: shd_desc.vs.source = _sgl_vs_source_metal_sim; @@ -2735,15 +2739,11 @@ SOKOL_API_IMPL void sgl_setup(const sgl_desc_t* desc) { break; } #elif defined(SOKOL_D3D11) - shd_desc.vs.byte_code = _sgl_vs_bytecode_hlsl4; - shd_desc.vs.byte_code_size = sizeof(_sgl_vs_bytecode_hlsl4); - shd_desc.fs.byte_code = _sgl_fs_bytecode_hlsl4; - shd_desc.fs.byte_code_size = sizeof(_sgl_fs_bytecode_hlsl4); + shd_desc.vs.bytecode = SG_RANGE(_sgl_vs_bytecode_hlsl4); + shd_desc.fs.bytecode = SG_RANGE(_sgl_fs_bytecode_hlsl4); #elif defined(SOKOL_WGPU) - shd_desc.vs.byte_code = _sgl_vs_bytecode_wgpu; - shd_desc.vs.byte_code_size = sizeof(_sgl_vs_bytecode_wgpu); - shd_desc.fs.byte_code = _sgl_fs_bytecode_wgpu; - shd_desc.fs.byte_code_size = sizeof(_sgl_fs_bytecode_wgpu); + shd_desc.vs.bytecode = SG_RANGE(_sgl_vs_bytecode_wgpu); + shd_desc.fs.bytecode = SG_RANGE(_sgl_fs_bytecode_wgpu); #else shd_desc.vs.source = _sgl_vs_src_dummy; shd_desc.fs.source = _sgl_fs_src_dummy; @@ -2754,7 +2754,7 @@ SOKOL_API_IMPL void sgl_setup(const sgl_desc_t* desc) { /* create default pipeline object */ sg_pipeline_desc def_pip_desc; memset(&def_pip_desc, 0, sizeof(def_pip_desc)); - def_pip_desc.depth_stencil.depth_write_enabled = true; + def_pip_desc.depth.write_enabled = true; _sgl.def_pip = _sgl_make_pipeline(&def_pip_desc); sg_pop_debug_group(); @@ -2869,6 +2869,10 @@ SOKOL_API_IMPL void sgl_viewport(int x, int y, int w, int h, bool origin_top_lef } } +SOKOL_API_IMPL void sgl_viewportf(float x, float y, float w, float h, bool origin_top_left) { + sgl_viewport((int)x, (int)y, (int)w, (int)h, origin_top_left); +} + SOKOL_API_IMPL void sgl_scissor_rect(int x, int y, int w, int h, bool origin_top_left) { SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); SOKOL_ASSERT(!_sgl.in_begin); @@ -2883,6 +2887,10 @@ SOKOL_API_IMPL void sgl_scissor_rect(int x, int y, int w, int h, bool origin_top } } +SOKOL_API_IMPL void sgl_scissor_rectf(float x, float y, float w, float h, bool origin_top_left) { + sgl_scissor_rect((int)x, (int)y, (int)w, (int)h, origin_top_left); +} + SOKOL_API_IMPL void sgl_enable_texture(void) { SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); SOKOL_ASSERT(!_sgl.in_begin); @@ -3236,7 +3244,8 @@ SOKOL_API_IMPL void sgl_draw(void) { uint32_t cur_img_id = SG_INVALID_ID; int cur_uniform_index = -1; sg_push_debug_group("sokol-gl"); - sg_update_buffer(_sgl.vbuf, _sgl.vertices, _sgl.cur_vertex * sizeof(_sgl_vertex_t)); + const sg_range range = { _sgl.vertices, (size_t)_sgl.cur_vertex * sizeof(_sgl_vertex_t) }; + sg_update_buffer(_sgl.vbuf, &range); _sgl.bind.vertex_buffers[0] = _sgl.vbuf; for (int i = 0; i < _sgl.cur_command; i++) { const _sgl_command_t* cmd = &_sgl.commands[i]; @@ -3269,7 +3278,8 @@ SOKOL_API_IMPL void sgl_draw(void) { cur_img_id = args->img.id; } if (cur_uniform_index != args->uniform_index) { - sg_apply_uniforms(SG_SHADERSTAGE_VS, 0, &_sgl.uniforms[args->uniform_index], sizeof(_sgl_uniform_t)); + const sg_range ub_range = { &_sgl.uniforms[args->uniform_index], sizeof(_sgl_uniform_t) }; + sg_apply_uniforms(SG_SHADERSTAGE_VS, 0, &ub_range); cur_uniform_index = args->uniform_index; } /* FIXME: what if number of vertices doesn't match the primitive type? */ diff --git a/util/sokol_imgui.h b/util/sokol_imgui.h index 590e5ca86..3440e8027 100644 --- a/util/sokol_imgui.h +++ b/util/sokol_imgui.h @@ -1672,7 +1672,7 @@ SOKOL_API_IMPL void simgui_setup(const simgui_desc_t* desc) { sg_buffer_desc vb_desc; memset(&vb_desc, 0, sizeof(vb_desc)); vb_desc.usage = SG_USAGE_STREAM; - vb_desc.size = _simgui.desc.max_vertices * sizeof(ImDrawVert); + vb_desc.size = (size_t)_simgui.desc.max_vertices * sizeof(ImDrawVert); vb_desc.label = "sokol-imgui-vertices"; _simgui.vbuf = sg_make_buffer(&vb_desc); @@ -1680,7 +1680,7 @@ SOKOL_API_IMPL void simgui_setup(const simgui_desc_t* desc) { memset(&ib_desc, 0, sizeof(ib_desc)); ib_desc.type = SG_BUFFERTYPE_INDEXBUFFER; ib_desc.usage = SG_USAGE_STREAM; - ib_desc.size = _simgui.desc.max_vertices * 3 * sizeof(uint16_t); + ib_desc.size = (size_t)_simgui.desc.max_vertices * 3 * sizeof(uint16_t); ib_desc.label = "sokol-imgui-indices"; _simgui.ibuf = sg_make_buffer(&ib_desc); @@ -1703,8 +1703,8 @@ SOKOL_API_IMPL void simgui_setup(const simgui_desc_t* desc) { img_desc.wrap_v = SG_WRAP_CLAMP_TO_EDGE; img_desc.min_filter = SG_FILTER_LINEAR; img_desc.mag_filter = SG_FILTER_LINEAR; - img_desc.content.subimage[0][0].ptr = font_pixels; - img_desc.content.subimage[0][0].size = font_width * font_height * sizeof(uint32_t); + img_desc.data.subimage[0][0].ptr = font_pixels; + img_desc.data.subimage[0][0].size = (size_t)(font_width * font_height) * sizeof(uint32_t); img_desc.label = "sokol-imgui-font"; _simgui.img = sg_make_image(&img_desc); io->Fonts->TexID = (ImTextureID)(uintptr_t) _simgui.img.id; @@ -1728,7 +1728,7 @@ SOKOL_API_IMPL void simgui_setup(const simgui_desc_t* desc) { ub->uniforms[0].type = SG_UNIFORMTYPE_FLOAT4; ub->uniforms[0].array_count = 1; shd_desc.fs.images[0].name = "tex"; - shd_desc.fs.images[0].type = SG_IMAGETYPE_2D; + shd_desc.fs.images[0].image_type = SG_IMAGETYPE_2D; shd_desc.fs.images[0].sampler_type = SG_SAMPLERTYPE_FLOAT; shd_desc.label = "sokol-imgui-shader"; #if defined(SOKOL_GLCORE33) @@ -1742,16 +1742,12 @@ SOKOL_API_IMPL void simgui_setup(const simgui_desc_t* desc) { shd_desc.fs.entry = "main0"; switch (sg_query_backend()) { case SG_BACKEND_METAL_MACOS: - shd_desc.vs.byte_code = _simgui_vs_bytecode_metal_macos; - shd_desc.vs.byte_code_size = sizeof(_simgui_vs_bytecode_metal_macos); - shd_desc.fs.byte_code = _simgui_fs_bytecode_metal_macos; - shd_desc.fs.byte_code_size = sizeof(_simgui_fs_bytecode_metal_macos); + shd_desc.vs.bytecode = SG_RANGE(_simgui_vs_bytecode_metal_macos); + shd_desc.fs.bytecode = SG_RANGE(_simgui_fs_bytecode_metal_macos); break; case SG_BACKEND_METAL_IOS: - shd_desc.vs.byte_code = _simgui_vs_bytecode_metal_ios; - shd_desc.vs.byte_code_size = sizeof(_simgui_vs_bytecode_metal_ios); - shd_desc.fs.byte_code = _simgui_fs_bytecode_metal_ios; - shd_desc.fs.byte_code_size = sizeof(_simgui_fs_bytecode_metal_ios); + shd_desc.vs.bytecode = SG_RANGE(_simgui_vs_bytecode_metal_ios); + shd_desc.fs.bytecode = SG_RANGE(_simgui_fs_bytecode_metal_ios); break; default: shd_desc.vs.source = _simgui_vs_source_metal_sim; @@ -1759,15 +1755,11 @@ SOKOL_API_IMPL void simgui_setup(const simgui_desc_t* desc) { break; } #elif defined(SOKOL_D3D11) - shd_desc.vs.byte_code = _simgui_vs_bytecode_hlsl4; - shd_desc.vs.byte_code_size = sizeof(_simgui_vs_bytecode_hlsl4); - shd_desc.fs.byte_code = _simgui_fs_bytecode_hlsl4; - shd_desc.fs.byte_code_size = sizeof(_simgui_fs_bytecode_hlsl4); + shd_desc.vs.bytecode = SG_RANGE(_simgui_vs_bytecode_hlsl4); + shd_desc.fs.bytecode = SG_RANGE(_simgui_fs_bytecode_hlsl4); #elif defined(SOKOL_WGPU) - shd_desc.vs.byte_code = _simgui_vs_bytecode_wgpu; - shd_desc.vs.byte_code_size = sizeof(_simgui_vs_bytecode_wgpu); - shd_desc.fs.byte_code = _simgui_fs_bytecode_wgpu; - shd_desc.fs.byte_code_size = sizeof(_simgui_fs_bytecode_wgpu); + shd_desc.vs.bytecode = SG_RANGE(_simgui_vs_bytecode_wgpu); + shd_desc.fs.bytecode = SG_RANGE(_simgui_fs_bytecode_wgpu); #else shd_desc.vs.source = _simgui_vs_src_dummy; shd_desc.fs.source = _simgui_fs_src_dummy; @@ -1795,13 +1787,13 @@ SOKOL_API_IMPL void simgui_setup(const simgui_desc_t* desc) { } pip_desc.shader = _simgui.shd; pip_desc.index_type = SG_INDEXTYPE_UINT16; - pip_desc.blend.enabled = true; - pip_desc.blend.src_factor_rgb = SG_BLENDFACTOR_SRC_ALPHA; - pip_desc.blend.dst_factor_rgb = SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA; - pip_desc.blend.color_write_mask = SG_COLORMASK_RGB; - pip_desc.blend.color_format = _simgui.desc.color_format; - pip_desc.blend.depth_format = _simgui.desc.depth_format; - pip_desc.rasterizer.sample_count = _simgui.desc.sample_count; + pip_desc.sample_count = _simgui.desc.sample_count; + pip_desc.depth.pixel_format = _simgui.desc.depth_format; + pip_desc.colors[0].pixel_format = _simgui.desc.color_format; + pip_desc.colors[0].write_mask = SG_COLORMASK_RGB; + pip_desc.colors[0].blend.enabled = true; + pip_desc.colors[0].blend.src_factor_rgb = SG_BLENDFACTOR_SRC_ALPHA; + pip_desc.colors[0].blend.dst_factor_rgb = SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA; pip_desc.label = "sokol-imgui-pipeline"; _simgui.pip = sg_make_pipeline(&pip_desc); @@ -1907,37 +1899,40 @@ SOKOL_API_IMPL void simgui_render(void) { sg_apply_pipeline(_simgui.pip); _simgui_vs_params_t vs_params; + memset((void*)&vs_params, 0, sizeof(vs_params)); vs_params.disp_size.x = io->DisplaySize.x; vs_params.disp_size.y = io->DisplaySize.y; - sg_apply_uniforms(SG_SHADERSTAGE_VS, 0, &vs_params, sizeof(vs_params)); + sg_apply_uniforms(SG_SHADERSTAGE_VS, 0, SG_RANGE_REF(vs_params)); sg_bindings bind; - memset(&bind, 0, sizeof(bind)); + memset((void*)&bind, 0, sizeof(bind)); bind.vertex_buffers[0] = _simgui.vbuf; bind.index_buffer = _simgui.ibuf; ImTextureID tex_id = io->Fonts->TexID; bind.fs_images[0].id = (uint32_t)(uintptr_t)tex_id; - uint32_t vb_offset = 0; - uint32_t ib_offset = 0; + int vb_offset = 0; + int ib_offset = 0; for (int cl_index = 0; cl_index < draw_data->CmdListsCount; cl_index++) { ImDrawList* cl = draw_data->CmdLists[cl_index]; /* append vertices and indices to buffers, record start offsets in draw state */ #if defined(__cplusplus) - const int vtx_size = cl->VtxBuffer.size() * sizeof(ImDrawVert); - const int idx_size = cl->IdxBuffer.size() * sizeof(ImDrawIdx); + const size_t vtx_size = cl->VtxBuffer.size() * sizeof(ImDrawVert); + const size_t idx_size = cl->IdxBuffer.size() * sizeof(ImDrawIdx); const ImDrawVert* vtx_ptr = &cl->VtxBuffer.front(); const ImDrawIdx* idx_ptr = &cl->IdxBuffer.front(); #else - const int vtx_size = cl->VtxBuffer.Size * sizeof(ImDrawVert); - const int idx_size = cl->IdxBuffer.Size * sizeof(ImDrawIdx); + const size_t vtx_size = (size_t)cl->VtxBuffer.Size * sizeof(ImDrawVert); + const size_t idx_size = (size_t)cl->IdxBuffer.Size * sizeof(ImDrawIdx); const ImDrawVert* vtx_ptr = cl->VtxBuffer.Data; const ImDrawIdx* idx_ptr = cl->IdxBuffer.Data; #endif if (vtx_ptr) { - vb_offset = sg_append_buffer(bind.vertex_buffers[0], vtx_ptr, vtx_size); + const sg_range vtx_range = { vtx_ptr, vtx_size }; + vb_offset = sg_append_buffer(bind.vertex_buffers[0], &vtx_range); } if (idx_ptr) { - ib_offset = sg_append_buffer(bind.index_buffer, idx_ptr, idx_size); + const sg_range idx_range = { idx_ptr, idx_size }; + ib_offset = sg_append_buffer(bind.index_buffer, &idx_range); } /* don't render anything if the buffer is in overflow state (this is also checked internally in sokol_gfx, draw calls that attempt to draw with @@ -1966,7 +1961,7 @@ SOKOL_API_IMPL void simgui_render(void) { // need to re-apply all state after calling a user callback sg_apply_viewport(0, 0, fb_width, fb_height, true); sg_apply_pipeline(_simgui.pip); - sg_apply_uniforms(SG_SHADERSTAGE_VS, 0, &vs_params, sizeof(vs_params)); + sg_apply_uniforms(SG_SHADERSTAGE_VS, 0, SG_RANGE_REF(vs_params)); sg_apply_bindings(&bind); } else { @@ -1974,7 +1969,7 @@ SOKOL_API_IMPL void simgui_render(void) { tex_id = pcmd->TextureId; vtx_offset = pcmd->VtxOffset; bind.fs_images[0].id = (uint32_t)(uintptr_t)tex_id; - bind.vertex_buffer_offsets[0] = vb_offset + pcmd->VtxOffset * sizeof(ImDrawVert); + bind.vertex_buffer_offsets[0] = vb_offset + (int)(pcmd->VtxOffset * sizeof(ImDrawVert)); sg_apply_bindings(&bind); } const int scissor_x = (int) (pcmd->ClipRect.x * dpi_scale); @@ -1982,9 +1977,9 @@ SOKOL_API_IMPL void simgui_render(void) { const int scissor_w = (int) ((pcmd->ClipRect.z - pcmd->ClipRect.x) * dpi_scale); const int scissor_h = (int) ((pcmd->ClipRect.w - pcmd->ClipRect.y) * dpi_scale); sg_apply_scissor_rect(scissor_x, scissor_y, scissor_w, scissor_h, true); - sg_draw(base_element, pcmd->ElemCount, 1); + sg_draw(base_element, (int)pcmd->ElemCount, 1); } - base_element += pcmd->ElemCount; + base_element += (int)pcmd->ElemCount; } } sg_apply_viewport(0, 0, fb_width, fb_height, true); diff --git a/util/sokol_memtrack.h b/util/sokol_memtrack.h index b795e16f9..c7ba20b9b 100644 --- a/util/sokol_memtrack.h +++ b/util/sokol_memtrack.h @@ -74,7 +74,6 @@ distribution. */ #define SOKOL_MEMTRACK_INCLUDED (1) -#include #include #if defined(SOKOL_API_DECL) && !defined(SOKOL_MEMTRACK_API_DECL) @@ -111,6 +110,7 @@ SOKOL_MEMTRACK_API_DECL smemtrack_info_t smemtrack_info(void); #define SOKOL_MEMTRACK_IMPL_INCLUDED (1) #include /* malloc, free, calloc */ #include /* memset */ +#include /* size_t */ #ifndef SOKOL_API_IMPL #define SOKOL_API_IMPL diff --git a/util/sokol_shape.h b/util/sokol_shape.h index 2c54ac53d..be2a9c6c6 100644 --- a/util/sokol_shape.h +++ b/util/sokol_shape.h @@ -83,12 +83,10 @@ sshape_buffer_t buf = { .vertices = { - .buffer_ptr = vertices, - .buffer_size = sizeof(vertices) + .buffer = SSHAPE_RANGE(vertices), }, .indices = { - .buffer_ptr = indices, - .buffer_size = sizeof(indices) + .buffer = SSHAPE_RANGE(indices), } }; ``` @@ -222,7 +220,7 @@ ```c // create sokol-gfx vertex buffer sg_buffer_desc vbuf_desc = sshape_vertex_buffer_desc(&buf); - sg_buffer vbuf = sg_make_buffer(&vbuf_desc; + sg_buffer vbuf = sg_make_buffer(&vbuf_desc); // create sokol-gfx index buffer sg_buffer_desc ibuf_desc = sshape_index_buffer_desc(&buf); @@ -267,8 +265,8 @@ uint16_t indices[16]; sshape_buffer_t buf = { - .vertices = { .buffer_ptr = vertices, .buffer_size = sizeof(vertices) }, - .indices = { .buffer_ptr = indices, .buffer_size = sizeof(indices) } + .vertices.buffer = SSHAPE_RANGE(vertices), + .indices.buffer = SSHAPE_RANGE(indices) }; // first cube at pos x=-2.0 (with default size of 1x1x1) @@ -312,8 +310,8 @@ sshape_vertex_t vertices[128]; uint16_t indices[16]; sshape_buffer_t buf = { - .vertices = { .buffer_ptr = vertices, .buffer_size = sizeof(vertices) }, - .indices = { .buffer_ptr = indices, .buffer_size = sizeof(indices) } + .vertices.buffer = SSHAPE_RANGE(vertices), + .indices.buffer = SSHAPE_RANGE(indices) }; // build a red cube... @@ -362,6 +360,7 @@ distribution. */ #define SOKOL_SHAPE_INCLUDED +#include // size_t, offsetof #include #include @@ -386,6 +385,27 @@ extern "C" { #endif +/* + sshape_range is a pointer-size-pair struct used to pass memory + blobs into sokol-shape. When initialized from a value type + (array or struct), use the SSHAPE_RANGE() macro to build + an sshape_range struct. +*/ +typedef struct sshape_range { + const void* ptr; + size_t size; +} sshape_range; + +// disabling this for every includer isn't great, but the warning is also quite pointless +#if defined(_MSC_VER) +#pragma warning(disable:4221) /* /W4 only: nonstandard extension used: 'x': cannot be initialized using address of automatic variable 'y' */ +#endif +#if defined(__cplusplus) +#define SSHAPE_RANGE(x) sshape_range{ &x, sizeof(x) } +#else +#define SSHAPE_RANGE(x) (sshape_range){ &x, sizeof(x) } +#endif + /* a 4x4 matrix wrapper struct */ typedef struct sshape_mat4_t { float m[4][4]; } sshape_mat4_t; @@ -401,12 +421,18 @@ typedef struct sshape_vertex_t { typedef struct sshape_element_range_t { int base_element; int num_elements; + #if defined(SOKOL_ZIG_BINDINGS) + uint32_t __pad[3]; + #endif } sshape_element_range_t; /* number of elements and byte size of build actions */ typedef struct sshape_sizes_item_t { uint32_t num; // number of elements uint32_t size; // the same as size in bytes + #if defined(SOKOL_ZIG_BINDINGS) + uint32_t __pad[3]; + #endif } sshape_sizes_item_t; typedef struct sshape_sizes_t { @@ -416,10 +442,9 @@ typedef struct sshape_sizes_t { /* in/out struct to keep track of mesh-build state */ typedef struct sshape_buffer_item_t { - void* buffer_ptr; // pointer to start of output buffer - uint32_t buffer_size; // size in bytes of output buffer - uint32_t data_size; // size in bytes of valid data in buffer - uint32_t shape_offset; // data offset of the most recent shape + sshape_range buffer; // pointer/size pair of output buffer + size_t data_size; // size in bytes of valid data in buffer + size_t shape_offset; // data offset of the most recent shape } sshape_buffer_item_t; typedef struct sshape_buffer_t { @@ -526,7 +551,6 @@ SOKOL_SHAPE_API_DECL sshape_mat4_t sshape_mat4_transpose(const float m[16]); #define SOKOL_SHAPE_IMPL_INCLUDED (1) #include // memcpy -#include // offsetof #include // sinf, cosf #ifdef __clang__ @@ -674,13 +698,13 @@ static uint32_t _sshape_torus_num_indices(uint32_t sides, uint32_t rings) { } static bool _sshape_validate_buffer_item(const sshape_buffer_item_t* item, uint32_t build_size) { - if (0 == item->buffer_ptr) { + if (0 == item->buffer.ptr) { return false; } - if (0 == item->buffer_size) { + if (0 == item->buffer.size) { return false; } - if ((item->data_size + build_size) > item->buffer_size) { + if ((item->data_size + build_size) > item->buffer.size) { return false; } if (item->shape_offset > item->data_size) { @@ -761,10 +785,10 @@ static sshape_torus_t _sshape_torus_defaults(const sshape_torus_t* params) { } static void _sshape_add_vertex(sshape_buffer_t* buf, _sshape_vec4_t pos, _sshape_vec4_t norm, _sshape_vec2_t uv, uint32_t color) { - uint32_t offset = buf->vertices.data_size; - SOKOL_ASSERT((offset + sizeof(sshape_vertex_t)) <= buf->vertices.buffer_size); + size_t offset = buf->vertices.data_size; + SOKOL_ASSERT((offset + sizeof(sshape_vertex_t)) <= buf->vertices.buffer.size); buf->vertices.data_size += sizeof(sshape_vertex_t); - sshape_vertex_t* v_ptr = (sshape_vertex_t*) ((uint8_t*)buf->vertices.buffer_ptr + offset); + sshape_vertex_t* v_ptr = (sshape_vertex_t*) ((uint8_t*)buf->vertices.buffer.ptr + offset); v_ptr->x = pos.x; v_ptr->y = pos.y; v_ptr->z = pos.z; @@ -775,10 +799,10 @@ static void _sshape_add_vertex(sshape_buffer_t* buf, _sshape_vec4_t pos, _sshape } static void _sshape_add_triangle(sshape_buffer_t* buf, uint16_t i0, uint16_t i1, uint16_t i2) { - uint32_t offset = buf->indices.data_size; - SOKOL_ASSERT((offset + 3*sizeof(uint16_t)) <= buf->indices.buffer_size); + size_t offset = buf->indices.data_size; + SOKOL_ASSERT((offset + 3*sizeof(uint16_t)) <= buf->indices.buffer.size); buf->indices.data_size += 3*sizeof(uint16_t); - uint16_t* i_ptr = (uint16_t*) ((uint8_t*)buf->indices.buffer_ptr + offset); + uint16_t* i_ptr = (uint16_t*) ((uint8_t*)buf->indices.buffer.ptr + offset); i_ptr[0] = i0; i_ptr[1] = i1; i_ptr[2] = i2; @@ -1338,10 +1362,10 @@ SOKOL_API_IMPL sg_buffer_desc sshape_vertex_buffer_desc(const sshape_buffer_t* b SOKOL_ASSERT(buf && buf->valid); sg_buffer_desc desc = { 0 }; if (buf->valid) { - desc.size = buf->vertices.data_size; desc.type = SG_BUFFERTYPE_VERTEXBUFFER; desc.usage = SG_USAGE_IMMUTABLE; - desc.content = buf->vertices.buffer_ptr; + desc.data.ptr = buf->vertices.buffer.ptr; + desc.data.size = buf->vertices.data_size; } return desc; } @@ -1350,10 +1374,10 @@ SOKOL_API_IMPL sg_buffer_desc sshape_index_buffer_desc(const sshape_buffer_t* bu SOKOL_ASSERT(buf && buf->valid); sg_buffer_desc desc = { 0 }; if (buf->valid) { - desc.size = buf->indices.data_size; desc.type = SG_BUFFERTYPE_INDEXBUFFER; desc.usage = SG_USAGE_IMMUTABLE; - desc.content = buf->indices.buffer_ptr; + desc.data.ptr = buf->indices.buffer.ptr; + desc.data.size = buf->indices.data_size; } return desc; } @@ -1364,9 +1388,9 @@ SOKOL_SHAPE_API_DECL sshape_element_range_t sshape_element_range(const sshape_bu SOKOL_ASSERT(0 == (buf->indices.shape_offset & (sizeof(uint16_t) - 1))); SOKOL_ASSERT(0 == (buf->indices.data_size & (sizeof(uint16_t) - 1))); sshape_element_range_t range = { 0 }; - range.base_element = buf->indices.shape_offset / sizeof(uint16_t); + range.base_element = (int) (buf->indices.shape_offset / sizeof(uint16_t)); if (buf->valid) { - range.num_elements = (buf->indices.data_size - buf->indices.shape_offset) / sizeof(uint16_t); + range.num_elements = (int) ((buf->indices.data_size - buf->indices.shape_offset) / sizeof(uint16_t)); } else { range.num_elements = 0; From 492146ea6649ec050c7f635ebc006290d03de283 Mon Sep 17 00:00:00 2001 From: Andre Weissflog Date: Wed, 10 Feb 2021 19:12:40 +0100 Subject: [PATCH 4/8] suppress some VS2015 warnings --- sokol_gfx.h | 3 ++- util/sokol_debugtext.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/sokol_gfx.h b/sokol_gfx.h index a1e369bd3..39e59f577 100644 --- a/sokol_gfx.h +++ b/sokol_gfx.h @@ -641,9 +641,10 @@ typedef struct sg_range { size_t size; } sg_range; -// disabling this for every includer isn't great, but the warning is also quite pointless +// disabling this for every includer isn't great, but the warnings are also quite pointless #if defined(_MSC_VER) #pragma warning(disable:4221) /* /W4 only: nonstandard extension used: 'x': cannot be initialized using address of automatic variable 'y' */ +#pragma warning(disable:4202) /* VS2015: nonstandard extension used: non-constant aggregate initializer */ #endif #if defined(__cplusplus) #define SG_RANGE(x) sg_range{ &x, sizeof(x) } diff --git a/util/sokol_debugtext.h b/util/sokol_debugtext.h index 236d4778f..00223c6bf 100644 --- a/util/sokol_debugtext.h +++ b/util/sokol_debugtext.h @@ -443,6 +443,7 @@ typedef struct sdtx_range { // disabling this for every includer isn't great, but the warning is also quite pointless #if defined(_MSC_VER) #pragma warning(disable:4221) /* /W4 only: nonstandard extension used: 'x': cannot be initialized using address of automatic variable 'y' */ +#pragma warning(disable:4204) /* VS2015: nonstandard extension used: non-constant aggregate initializer */ #endif #if defined(__cplusplus) #define SDTX_RANGE(x) sdtx_range{ &x, sizeof(x) } From 9dfe8cd050d7a69deff1f74c3bfd4d1ba0c18cd4 Mon Sep 17 00:00:00 2001 From: Andre Weissflog Date: Wed, 10 Feb 2021 19:23:38 +0100 Subject: [PATCH 5/8] update changelog and readme --- CHANGELOG.md | 6 ++++++ README.md | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 53c70d6d5..ac98ad6aa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,12 @@ > NOTE: this list will usually only be updated with changes that affect the public APIs +- **10-Feb-2021**: The breaking API-update has been merged (mainly sokol_gfx.h). +Please see [this blogpost](https://floooh.github.io/2021/02/07/sokol-api-overhaul.html) +and the updates [sokol samples](https://floooh.github.io/sokol-html5/) for details. +I also created a git tag named 'pre-feb2021-api-changes' which captures the previous +state in all related projects. Please also update the [sokol-tools-bin](https://github.com/floooh/sokol-tools-bin) if you're using the sokol-shdc shader compiler. + - **07-Feb-2021**: A PSA about upcoming breaking changes in (mainly) sokol_gfx.h: https://floooh.github.io/2021/02/07/sokol-api-overhaul.html - **20-Dec-2020**: A couple of minor breaking changes in the sokol_gfx.h and diff --git a/README.md b/README.md index f2340e98d..baea2d2bb 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ Simple [STB-style](https://github.com/nothings/stb/blob/master/docs/stb_howto.txt) cross-platform libraries for C and C++, written in C. -[**See what's new**](https://github.com/floooh/sokol/blob/master/CHANGELOG.md) (**07-Feb-2021**: new blog post about upcoming breaking API changes in sokol_gfx.h) +[**See what's new**](https://github.com/floooh/sokol/blob/master/CHANGELOG.md) (**10-Feb-2021**: big API-breaking update, see the changelog for details) ## Examples and Related Projects From 8c098017b29dbdad44856b457cb05ebd86db118b Mon Sep 17 00:00:00 2001 From: Andre Weissflog Date: Wed, 10 Feb 2021 20:01:54 +0100 Subject: [PATCH 6/8] sokol_gfx.h: suppress a VS2015 type cast warning --- sokol_gfx.h | 1 + 1 file changed, 1 insertion(+) diff --git a/sokol_gfx.h b/sokol_gfx.h index 39e59f577..1328626cd 100644 --- a/sokol_gfx.h +++ b/sokol_gfx.h @@ -2502,6 +2502,7 @@ inline int sg_append_buffer(sg_buffer buf_id, const sg_range& data) { return sg_ #pragma warning(disable:4115) /* named type definition in parentheses */ #pragma warning(disable:4505) /* unreferenced local function has been removed */ #pragma warning(disable:4201) /* nonstandard extension used: nameless struct/union (needed by d3d11.h) */ +#pragma warning(disable:4054) /* 'type cast': from function pointer 'FARPROC' to data pointer 'void *' */ #endif #if defined(SOKOL_GLCORE33) || defined(SOKOL_GLES2) || defined(SOKOL_GLES3) From d4e3b560c3e4c48b2af2b2386eda26899707fc2f Mon Sep 17 00:00:00 2001 From: Andre Weissflog Date: Wed, 10 Feb 2021 22:59:27 +0100 Subject: [PATCH 7/8] sokol_gfx.h: fix VS2015 warning supression --- sokol_gfx.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sokol_gfx.h b/sokol_gfx.h index 1328626cd..d85de83f7 100644 --- a/sokol_gfx.h +++ b/sokol_gfx.h @@ -2502,7 +2502,7 @@ inline int sg_append_buffer(sg_buffer buf_id, const sg_range& data) { return sg_ #pragma warning(disable:4115) /* named type definition in parentheses */ #pragma warning(disable:4505) /* unreferenced local function has been removed */ #pragma warning(disable:4201) /* nonstandard extension used: nameless struct/union (needed by d3d11.h) */ -#pragma warning(disable:4054) /* 'type cast': from function pointer 'FARPROC' to data pointer 'void *' */ +#pragma warning(disable:4055) /* 'type cast': from function pointer 'FARPROC' to data pointer 'void *' */ #endif #if defined(SOKOL_GLCORE33) || defined(SOKOL_GLES2) || defined(SOKOL_GLES3) From 29bbcd84147e287c501fbcbdb105d56e7aee3934 Mon Sep 17 00:00:00 2001 From: Andre Weissflog Date: Wed, 10 Feb 2021 23:42:34 +0100 Subject: [PATCH 8/8] sokol_gfx.h VS2015: disable both warning C4054 and C4055 --- sokol_gfx.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sokol_gfx.h b/sokol_gfx.h index d85de83f7..d9a9e4dae 100644 --- a/sokol_gfx.h +++ b/sokol_gfx.h @@ -2502,7 +2502,8 @@ inline int sg_append_buffer(sg_buffer buf_id, const sg_range& data) { return sg_ #pragma warning(disable:4115) /* named type definition in parentheses */ #pragma warning(disable:4505) /* unreferenced local function has been removed */ #pragma warning(disable:4201) /* nonstandard extension used: nameless struct/union (needed by d3d11.h) */ -#pragma warning(disable:4055) /* 'type cast': from function pointer 'FARPROC' to data pointer 'void *' */ +#pragma warning(disable:4054) /* 'type cast': from function pointer */ +#pragma warning(disable:4055) /* 'type cast': from data pointer */ #endif #if defined(SOKOL_GLCORE33) || defined(SOKOL_GLES2) || defined(SOKOL_GLES3)