Add lexing, parsing and some semantic checking for constant buffers.

This commit is contained in:
2024-06-14 19:56:28 +02:00
parent b1dceb298f
commit a9a67e3fac
45 changed files with 1275 additions and 1119 deletions

View File

@@ -64,6 +64,7 @@ Token_Kind :: enum {
TOKEN_CBUFFER;
TOKEN_COLUMNMAJOR;
TOKEN_CONST;
TOKEN_CONSTANT_BUFFER;
TOKEN_CONTINUE;
TOKEN_DEFAULT;
@@ -128,7 +129,8 @@ Token :: struct {
}
source : *u8;
// This could all be derived on demand
line : int;
length : int;
column : int;
@@ -214,46 +216,47 @@ identifier_kind :: (using lexer : *Lexer) -> Token_Kind {
identifier.data = *input.data[start];
identifier.count = length;
if identifier == "bool" return .TOKEN_BOOL;
if identifier == "case" return .TOKEN_CASE;
if identifier == "columnmajor" return .TOKEN_COLUMNMAJOR;
if identifier == "const" return .TOKEN_CONST;
if identifier == "continue" return .TOKEN_CONTINUE;
if identifier == "default" return .TOKEN_DEFAULT;
if identifier == "directive" return .TOKEN_DIRECTIVE;
if identifier == "discard" return .TOKEN_DIRECTIVE;
if identifier == "discard" return .TOKEN_DISCARD;
if identifier == "do" return .TOKEN_DO;
if identifier == "double" return .TOKEN_DOUBLE;
if identifier == "else" return .TOKEN_ELSE;
if identifier == "export" return .TOKEN_EXPORT;
if identifier == "extern" return .TOKEN_EXTERN;
if identifier == "false" return .TOKEN_FALSE;
if identifier == "for" return .TOKEN_FOR;
if identifier == "half" return .TOKEN_HALF;
if identifier == "hint" return .TOKEN_HINT;
if identifier == "if" return .TOKEN_IF;
if identifier == "in" return .TOKEN_IN;
if identifier == "inout" return .TOKEN_INOUT;
if identifier == "instance" return .TOKEN_INSTANCE;
if identifier == "matrix" return .TOKEN_MATRIX;
if identifier == "meta" return .TOKEN_META;
if identifier == "optional" return .TOKEN_OPTIONAL;
if identifier == "out" return .TOKEN_OUT;
if identifier == "pixel" return .TOKEN_PIXEL;
if identifier == "properties" return .TOKEN_PROPERTIES;
if identifier == "return" return .TOKEN_RETURN;
if identifier == "register" return .TOKEN_REGISTER;
if identifier == "struct" return .TOKEN_STRUCT;
if identifier == "switch" return .TOKEN_SWITCH;
if identifier == "true" return .TOKEN_TRUE;
if identifier == "unorm" return .TOKEN_UNORM;
if identifier == "unsigned" return .TOKEN_UNSIGNED;
if identifier == "uint" return .TOKEN_UINT;
if identifier == "vector" return .TOKEN_VECTOR;
if identifier == "vertex" return .TOKEN_VERTEX;
if identifier == "void" return .TOKEN_VOID;
if identifier == "while" return .TOKEN_WHILE;
if identifier == "bool" return .TOKEN_BOOL;
if identifier == "case" return .TOKEN_CASE;
if identifier == "columnmajor" return .TOKEN_COLUMNMAJOR;
if identifier == "const" return .TOKEN_CONST;
if identifier == "constant_buffer" return .TOKEN_CONSTANT_BUFFER;
if identifier == "continue" return .TOKEN_CONTINUE;
if identifier == "default" return .TOKEN_DEFAULT;
if identifier == "directive" return .TOKEN_DIRECTIVE;
if identifier == "discard" return .TOKEN_DIRECTIVE;
if identifier == "discard" return .TOKEN_DISCARD;
if identifier == "do" return .TOKEN_DO;
if identifier == "double" return .TOKEN_DOUBLE;
if identifier == "else" return .TOKEN_ELSE;
if identifier == "export" return .TOKEN_EXPORT;
if identifier == "extern" return .TOKEN_EXTERN;
if identifier == "false" return .TOKEN_FALSE;
if identifier == "for" return .TOKEN_FOR;
if identifier == "half" return .TOKEN_HALF;
if identifier == "hint" return .TOKEN_HINT;
if identifier == "if" return .TOKEN_IF;
if identifier == "in" return .TOKEN_IN;
if identifier == "inout" return .TOKEN_INOUT;
if identifier == "instance" return .TOKEN_INSTANCE;
if identifier == "matrix" return .TOKEN_MATRIX;
if identifier == "meta" return .TOKEN_META;
if identifier == "optional" return .TOKEN_OPTIONAL;
if identifier == "out" return .TOKEN_OUT;
if identifier == "pixel" return .TOKEN_PIXEL;
if identifier == "properties" return .TOKEN_PROPERTIES;
if identifier == "return" return .TOKEN_RETURN;
if identifier == "register" return .TOKEN_REGISTER;
if identifier == "struct" return .TOKEN_STRUCT;
if identifier == "switch" return .TOKEN_SWITCH;
if identifier == "true" return .TOKEN_TRUE;
if identifier == "unorm" return .TOKEN_UNORM;
if identifier == "unsigned" return .TOKEN_UNSIGNED;
if identifier == "uint" return .TOKEN_UINT;
if identifier == "vector" return .TOKEN_VECTOR;
if identifier == "vertex" return .TOKEN_VERTEX;
if identifier == "void" return .TOKEN_VOID;
if identifier == "while" return .TOKEN_WHILE;
return .TOKEN_IDENTIFIER;
}
@@ -496,8 +499,66 @@ scan_next_token :: (lexer : *Lexer) -> *Token {
// return error_token(lexer, tprint("Invalid token: %", s));
}
lex :: (lexer : *Lexer, allocator : Allocator = context.allocator) -> Lexing_Result {
lexer.result.tokens.allocator = allocator;
token : *Token = scan_next_token(lexer);
while token && token.kind != .TOKEN_EOF {
token = scan_next_token(lexer);
}
return lexer.result;
}
init_lexer_from_string :: (lexer : *Lexer, input : string) {
ok := read_input_from_string(lexer, input);
if !ok {
record_error(lexer, "Unable to initialize from string\n");
lexer.result.had_error = true;
}
}
init_lexer_from_file :: (lexer : *Lexer, file_path : string) {
ok := read_input_from_file(lexer, file_path);
if !ok {
record_error(lexer, tprint("Unable to read file: %\n", file_path));
lexer.result.had_error = true;
}
}
read_input_from_string :: (lexer : *Lexer, input : string) -> bool {
lexer.input = input;
lexer.cursor = 0;
lexer.start = 0;
lexer.current_line = 1;
lexer.current_column = 0;
return true;
}
read_input_from_file :: (lexer : *Lexer, file_path : string) -> bool {
assert(file_path != "");
value, success := read_entire_file(file_path, true, true);
if !success {
free(value);
return false;
}
lexer.path = copy_string(file_path);
lexer.input = value;
lexer.cursor = 0;
lexer.start = 0;
lexer.current_line = 1;
lexer.current_column = 0;
return true;
}
// ===========================================================
// Pretty printing
pretty_print_token :: (token : *Token, builder : *String_Builder) {
MAX :: 18;
MAX :: 21;
kind_name := enum_names(Token_Kind)[cast(int)token.kind];
diff := MAX - kind_name.count;
@@ -637,59 +698,5 @@ print_from_source_location :: (source_location : Source_Range, allocator := cont
return builder_to_string(*builder,, allocator);
}
lex :: (lexer : *Lexer, allocator : Allocator = context.allocator) -> Lexing_Result {
lexer.result.tokens.allocator = allocator;
token : *Token = scan_next_token(lexer);
while token && token.kind != .TOKEN_EOF {
token = scan_next_token(lexer);
}
return lexer.result;
}
init_lexer_from_string :: (lexer : *Lexer, input : string) {
ok := read_input_from_string(lexer, input);
if !ok {
record_error(lexer, "Unable to initialize from string\n");
lexer.result.had_error = true;
}
}
init_lexer_from_file :: (lexer : *Lexer, file_path : string) {
ok := read_input_from_file(lexer, file_path);
if !ok {
record_error(lexer, tprint("Unable to read file: %\n", file_path));
lexer.result.had_error = true;
}
}
read_input_from_string :: (lexer : *Lexer, input : string) -> bool {
lexer.input = input;
lexer.cursor = 0;
lexer.start = 0;
lexer.current_line = 1;
lexer.current_column = 0;
return true;
}
read_input_from_file :: (lexer : *Lexer, file_path : string) -> bool {
assert(file_path != "");
value, success := read_entire_file(file_path, true, true);
if !success {
free(value);
return false;
}
lexer.path = copy_string(file_path);
lexer.input = value;
lexer.cursor = 0;
lexer.start = 0;
lexer.current_line = 1;
lexer.current_column = 0;
return true;
}
#import "Basic";