Change default repeat_penalty to 1.0

I feel this penalty is not really helping.
Especially for the example from the README it makes results pretty bad
This commit is contained in:
Georgi Gerganov 2023-03-21 17:32:14 +02:00
parent eb34620aec
commit 8f644a0a85
No known key found for this signature in database
GPG key ID: 449E073F9DC10735

View file

@ -18,26 +18,25 @@ struct gpt_params {
int32_t n_predict = 128; // new tokens to predict
int32_t repeat_last_n = 64; // last n tokens to penalize
int32_t n_ctx = 512; //context size
bool memory_f16 = false; // use f16 instead of f32 for memory kv
// sampling parameters
int32_t top_k = 40;
float top_p = 0.95f;
float temp = 0.80f;
float repeat_penalty = 1.30f;
float repeat_penalty = 1.10f;
int32_t n_batch = 8; // batch size for prompt processing
std::string model = "models/lamma-7B/ggml-model.bin"; // model path
std::string prompt = "";
bool random_prompt = false;
std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted
bool memory_f16 = false; // use f16 instead of f32 for memory kv
bool random_prompt = false; // do not randomize prompt if none provided
bool use_color = false; // use color to distinguish generations and inputs
bool interactive = false; // interactive mode
bool interactive_start = false; // reverse prompt immediately
std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted
bool instruct = false; // instruction mode (used for Alpaca models)
bool ignore_eos = false; // do not stop generating after eos
};