forked from gyunggyung/KoAlpaca.cpp
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.cpp
49 lines (38 loc) · 1.5 KB
/
main.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
#include <iostream>
#include <string>
#include "ggml.h"
#include "utils.h"
#include <onnxruntime_c_api.h>
#include <onnxruntime_cxx_api.h>
#include <transformers/transformers.h>
#include <transformers/bert/BertModel.h>
void useKoAlpacaModel() {
using namespace transformers;
// Load the KoAlpaca model and tokenizer
auto model = GPT2Model::from_pretrained("beomi/KoAlpaca");
auto tokenizer = GPT2Tokenizer::from_pretrained("beomi/KoAlpaca");
// Get user input
std::string input_text;
std::cout << "Enter your input text (type 'quit' to exit):" << std::endl;
std::getline(std::cin, input_text);
while (input_text != "quit") {
// Tokenize the input text
auto tokens = tokenizer.encode(input_text);
tokens.input_ids.resize(tokens.input_ids.size() + 1);
tokens.attention_mask.resize(tokens.attention_mask.size() + 1);
// Generate a response using the model
auto outputs = model.generate(tokens);
// Decode the generated tokens and print the result
auto generated_text = tokenizer.decode(outputs[0]);
std::cout << "Generated text: " << generated_text << std::endl;
// Get the next input
std::cout << "Enter your input text (type 'quit' to exit):" << std::endl;
std::getline(std::cin, input_text);
}
}
int main() {
std::cout << "Starting the KoAlpaca application..." << std::endl;
useKoAlpacaModel();
std::cout << "KoAlpaca application finished." << std::endl;
return 0;
}