diff options
author | Aryadev Chavali <aryadev@aryadevchavali.com> | 2024-04-15 04:42:24 +0630 |
---|---|---|
committer | Aryadev Chavali <aryadev@aryadevchavali.com> | 2024-04-15 04:42:24 +0630 |
commit | f01d64b5f4d26611c90394f7b26ff219c4696c33 (patch) | |
tree | d7cf80be4721f7a5b0874fe88f87076850a0df3a | |
parent | 062ed1227849f9954427c8e13c0b720b68c78f63 (diff) | |
download | ovm-f01d64b5f4d26611c90394f7b26ff219c4696c33.tar.gz ovm-f01d64b5f4d26611c90394f7b26ff219c4696c33.tar.bz2 ovm-f01d64b5f4d26611c90394f7b26ff219c4696c33.zip |
lexer now produces a vector of heap allocated tokens
This removes the problem of possibly expensive copies occurring due to
working with tokens produced from the lexer (that C++ just... does):
now we hold pointers where the copy operator is a lot easier to use.
I want expensive stuff to be done by me and for a reason: I want to
be holding the shotgun.
-rw-r--r-- | asm/lexer.cpp | 7 | ||||
-rw-r--r-- | asm/lexer.hpp | 2 |
2 files changed, 5 insertions, 4 deletions
diff --git a/asm/lexer.cpp b/asm/lexer.cpp index d6c9883..a8d0828 100644 --- a/asm/lexer.cpp +++ b/asm/lexer.cpp @@ -310,7 +310,7 @@ token_t tokenise_literal_string(string_view &source, size_t &column, size_t end) return token; } -lerr_t tokenise_buffer(string_view source, std::vector<token_t> &tokens) +lerr_t tokenise_buffer(string_view source, std::vector<token_t *> &tokens) { size_t column = 0, line = 1; while (source.size() > 0) @@ -393,8 +393,9 @@ lerr_t tokenise_buffer(string_view source, std::vector<token_t> &tokens) } if (is_token) { - t.line = line; - tokens.push_back(t); + t.line = line; + token_t *acc = new token_t(t); + tokens.push_back(acc); } } return lerr_t::OK; diff --git a/asm/lexer.hpp b/asm/lexer.hpp index 1257b97..c74228f 100644 --- a/asm/lexer.hpp +++ b/asm/lexer.hpp @@ -91,6 +91,6 @@ enum class lerr_t }; const char *lerr_as_cstr(lerr_t); -lerr_t tokenise_buffer(std::string_view, std::vector<token_t> &); +lerr_t tokenise_buffer(std::string_view, std::vector<token_t *> &); #endif |