thinkthink-dev commited on
Commit
d482ce9
·
verified ·
1 Parent(s): 2488607

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +60 -0
  2. README.md +206 -8
  3. adapter_config.json +42 -0
  4. adapter_model.safetensors +3 -0
  5. added_tokens.json +4 -0
  6. chat_template.jinja +282 -0
  7. checkpoint-100/README.md +207 -0
  8. checkpoint-100/adapter_config.json +42 -0
  9. checkpoint-100/adapter_model.safetensors +3 -0
  10. checkpoint-100/added_tokens.json +4 -0
  11. checkpoint-100/chat_template.jinja +282 -0
  12. checkpoint-100/optimizer.pt +3 -0
  13. checkpoint-100/rng_state.pth +3 -0
  14. checkpoint-100/scheduler.pt +3 -0
  15. checkpoint-100/special_tokens_map.json +34 -0
  16. checkpoint-100/tokenizer.json +3 -0
  17. checkpoint-100/tokenizer.model +3 -0
  18. checkpoint-100/tokenizer_config.json +0 -0
  19. checkpoint-100/trainer_state.json +174 -0
  20. checkpoint-100/training_args.bin +3 -0
  21. checkpoint-1000/README.md +207 -0
  22. checkpoint-1000/adapter_config.json +42 -0
  23. checkpoint-1000/adapter_model.safetensors +3 -0
  24. checkpoint-1000/added_tokens.json +4 -0
  25. checkpoint-1000/chat_template.jinja +282 -0
  26. checkpoint-1000/optimizer.pt +3 -0
  27. checkpoint-1000/rng_state.pth +3 -0
  28. checkpoint-1000/scheduler.pt +3 -0
  29. checkpoint-1000/special_tokens_map.json +34 -0
  30. checkpoint-1000/tokenizer.json +3 -0
  31. checkpoint-1000/tokenizer.model +3 -0
  32. checkpoint-1000/tokenizer_config.json +0 -0
  33. checkpoint-1000/trainer_state.json +1434 -0
  34. checkpoint-1000/training_args.bin +3 -0
  35. checkpoint-1100/README.md +207 -0
  36. checkpoint-1100/adapter_config.json +42 -0
  37. checkpoint-1100/adapter_model.safetensors +3 -0
  38. checkpoint-1100/added_tokens.json +4 -0
  39. checkpoint-1100/chat_template.jinja +282 -0
  40. checkpoint-1100/optimizer.pt +3 -0
  41. checkpoint-1100/rng_state.pth +3 -0
  42. checkpoint-1100/scheduler.pt +3 -0
  43. checkpoint-1100/special_tokens_map.json +34 -0
  44. checkpoint-1100/tokenizer.json +3 -0
  45. checkpoint-1100/tokenizer.model +3 -0
  46. checkpoint-1100/tokenizer_config.json +0 -0
  47. checkpoint-1100/trainer_state.json +1574 -0
  48. checkpoint-1100/training_args.bin +3 -0
  49. checkpoint-1200/README.md +207 -0
  50. checkpoint-1200/adapter_config.json +42 -0
.gitattributes CHANGED
@@ -33,3 +33,63 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ checkpoint-100/tokenizer.json filter=lfs diff=lfs merge=lfs -text
37
+ checkpoint-1000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
38
+ checkpoint-1100/tokenizer.json filter=lfs diff=lfs merge=lfs -text
39
+ checkpoint-1200/tokenizer.json filter=lfs diff=lfs merge=lfs -text
40
+ checkpoint-1300/tokenizer.json filter=lfs diff=lfs merge=lfs -text
41
+ checkpoint-1400/tokenizer.json filter=lfs diff=lfs merge=lfs -text
42
+ checkpoint-1500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
43
+ checkpoint-1600/tokenizer.json filter=lfs diff=lfs merge=lfs -text
44
+ checkpoint-1700/tokenizer.json filter=lfs diff=lfs merge=lfs -text
45
+ checkpoint-1800/tokenizer.json filter=lfs diff=lfs merge=lfs -text
46
+ checkpoint-1900/tokenizer.json filter=lfs diff=lfs merge=lfs -text
47
+ checkpoint-200/tokenizer.json filter=lfs diff=lfs merge=lfs -text
48
+ checkpoint-2000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
49
+ checkpoint-2100/tokenizer.json filter=lfs diff=lfs merge=lfs -text
50
+ checkpoint-2200/tokenizer.json filter=lfs diff=lfs merge=lfs -text
51
+ checkpoint-2300/tokenizer.json filter=lfs diff=lfs merge=lfs -text
52
+ checkpoint-2400/tokenizer.json filter=lfs diff=lfs merge=lfs -text
53
+ checkpoint-2500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
54
+ checkpoint-2600/tokenizer.json filter=lfs diff=lfs merge=lfs -text
55
+ checkpoint-2700/tokenizer.json filter=lfs diff=lfs merge=lfs -text
56
+ checkpoint-2800/tokenizer.json filter=lfs diff=lfs merge=lfs -text
57
+ checkpoint-2900/tokenizer.json filter=lfs diff=lfs merge=lfs -text
58
+ checkpoint-300/tokenizer.json filter=lfs diff=lfs merge=lfs -text
59
+ checkpoint-3000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
60
+ checkpoint-3100/tokenizer.json filter=lfs diff=lfs merge=lfs -text
61
+ checkpoint-3200/tokenizer.json filter=lfs diff=lfs merge=lfs -text
62
+ checkpoint-3300/tokenizer.json filter=lfs diff=lfs merge=lfs -text
63
+ checkpoint-3400/tokenizer.json filter=lfs diff=lfs merge=lfs -text
64
+ checkpoint-3500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
65
+ checkpoint-3600/tokenizer.json filter=lfs diff=lfs merge=lfs -text
66
+ checkpoint-3700/tokenizer.json filter=lfs diff=lfs merge=lfs -text
67
+ checkpoint-3800/tokenizer.json filter=lfs diff=lfs merge=lfs -text
68
+ checkpoint-3900/tokenizer.json filter=lfs diff=lfs merge=lfs -text
69
+ checkpoint-400/tokenizer.json filter=lfs diff=lfs merge=lfs -text
70
+ checkpoint-4000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
71
+ checkpoint-4100/tokenizer.json filter=lfs diff=lfs merge=lfs -text
72
+ checkpoint-4200/tokenizer.json filter=lfs diff=lfs merge=lfs -text
73
+ checkpoint-4300/tokenizer.json filter=lfs diff=lfs merge=lfs -text
74
+ checkpoint-4400/tokenizer.json filter=lfs diff=lfs merge=lfs -text
75
+ checkpoint-4500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
76
+ checkpoint-4600/tokenizer.json filter=lfs diff=lfs merge=lfs -text
77
+ checkpoint-4700/tokenizer.json filter=lfs diff=lfs merge=lfs -text
78
+ checkpoint-4800/tokenizer.json filter=lfs diff=lfs merge=lfs -text
79
+ checkpoint-4900/tokenizer.json filter=lfs diff=lfs merge=lfs -text
80
+ checkpoint-50/tokenizer.json filter=lfs diff=lfs merge=lfs -text
81
+ checkpoint-500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
82
+ checkpoint-5000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
83
+ checkpoint-5100/tokenizer.json filter=lfs diff=lfs merge=lfs -text
84
+ checkpoint-5200/tokenizer.json filter=lfs diff=lfs merge=lfs -text
85
+ checkpoint-5300/tokenizer.json filter=lfs diff=lfs merge=lfs -text
86
+ checkpoint-5400/tokenizer.json filter=lfs diff=lfs merge=lfs -text
87
+ checkpoint-5500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
88
+ checkpoint-5600/tokenizer.json filter=lfs diff=lfs merge=lfs -text
89
+ checkpoint-5700/tokenizer.json filter=lfs diff=lfs merge=lfs -text
90
+ checkpoint-5725/tokenizer.json filter=lfs diff=lfs merge=lfs -text
91
+ checkpoint-600/tokenizer.json filter=lfs diff=lfs merge=lfs -text
92
+ checkpoint-700/tokenizer.json filter=lfs diff=lfs merge=lfs -text
93
+ checkpoint-800/tokenizer.json filter=lfs diff=lfs merge=lfs -text
94
+ checkpoint-900/tokenizer.json filter=lfs diff=lfs merge=lfs -text
95
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,9 +1,207 @@
1
  ---
2
- license: apache-2.0
3
- datasets:
4
- - AnishJoshi/nl2bash-custom
5
- language:
6
- - en
7
- base_model:
8
- - google/functiongemma-270m-it
9
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ base_model: unsloth/functiongemma-270m-it
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:unsloth/functiongemma-270m-it
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.17.1
adapter_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "unsloth/functiongemma-270m-it",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 128,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.05,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "qalora_group_size": 16,
24
+ "r": 64,
25
+ "rank_pattern": {},
26
+ "revision": null,
27
+ "target_modules": [
28
+ "gate_proj",
29
+ "down_proj",
30
+ "o_proj",
31
+ "k_proj",
32
+ "q_proj",
33
+ "v_proj",
34
+ "up_proj"
35
+ ],
36
+ "target_parameters": null,
37
+ "task_type": "CAUSAL_LM",
38
+ "trainable_token_indices": null,
39
+ "use_dora": false,
40
+ "use_qalora": false,
41
+ "use_rslora": false
42
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2cf492613544bc73d1dae992745252f8a44cfa4a6bd5389d2ab4849336ed8aa
3
+ size 60785144
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<end_of_image>": 262145,
3
+ "<image_soft_token>": 262144
4
+ }
chat_template.jinja ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- macro format_parameters(properties, required) -%}
2
+ {%- set standard_keys = ['description', 'type', 'properties', 'required', 'nullable'] -%}
3
+ {%- set ns = namespace(found_first=false) -%}
4
+ {%- for key, value in properties | dictsort -%}
5
+ {%- if key not in standard_keys -%}
6
+ {%- if ns.found_first %},{% endif -%}
7
+ {%- set ns.found_first = true -%}
8
+ {{- key }}:{description:<escape>{{ value['description'] }}<escape>
9
+ {%- if value['type'] | upper == 'STRING' -%}
10
+ {%- if value['enum'] -%}
11
+ ,enum:{{ format_argument(value['enum']) }}
12
+ {%- endif -%}
13
+ {%- elif value['type'] | upper == 'OBJECT' -%}
14
+ ,properties:{
15
+ {%- if value['properties'] is defined and value['properties'] is mapping -%}
16
+ {{- format_parameters(value['properties'], value['required'] | default([])) -}}
17
+ {%- elif value is mapping -%}
18
+ {{- format_parameters(value, value['required'] | default([])) -}}
19
+ {%- endif -%}
20
+ }
21
+ {%- if value['required'] -%}
22
+ ,required:[
23
+ {%- for item in value['required'] | default([]) -%}
24
+ <escape>{{- item -}}<escape>
25
+ {%- if not loop.last %},{% endif -%}
26
+ {%- endfor -%}
27
+ ]
28
+ {%- endif -%}
29
+ {%- elif value['type'] | upper == 'ARRAY' -%}
30
+ {%- if value['items'] is mapping and value['items'] -%}
31
+ ,items:{
32
+ {%- set ns_items = namespace(found_first=false) -%}
33
+ {%- for item_key, item_value in value['items'] | dictsort -%}
34
+ {%- if item_value is not none -%}
35
+ {%- if ns_items.found_first %},{% endif -%}
36
+ {%- set ns_items.found_first = true -%}
37
+ {%- if item_key == 'properties' -%}
38
+ properties:{
39
+ {%- if item_value is mapping -%}
40
+ {{- format_parameters(item_value, value['items']['required'] | default([])) -}}
41
+ {%- endif -%}
42
+ }
43
+ {%- elif item_key == 'required' -%}
44
+ required:[
45
+ {%- for req_item in item_value -%}
46
+ <escape>{{- req_item -}}<escape>
47
+ {%- if not loop.last %},{% endif -%}
48
+ {%- endfor -%}
49
+ ]
50
+ {%- elif item_key == 'type' -%}
51
+ {%- if item_value is string -%}
52
+ type:{{ format_argument(item_value | upper) }}
53
+ {%- else -%}
54
+ type:{{ format_argument(item_value | map('upper') | list) }}
55
+ {%- endif -%}
56
+ {%- else -%}
57
+ {{ item_key }}:{{ format_argument(item_value) }}
58
+ {%- endif -%}
59
+ {%- endif -%}
60
+ {%- endfor -%}
61
+ }
62
+ {%- endif -%}
63
+ {%- endif -%}
64
+ ,type:<escape>{{ value['type'] | upper }}<escape>}
65
+ {%- endif -%}
66
+ {%- endfor -%}
67
+ {%- endmacro -%}
68
+ {% macro format_function_declaration(tool_data) -%}
69
+ declaration:{{- tool_data['function']['name'] -}}
70
+ {description:<escape>{{- tool_data['function']['description'] -}}<escape>
71
+ {%- set params = tool_data['function']['parameters'] -%}
72
+ {%- if params -%}
73
+ ,parameters:{
74
+ {%- if params['properties'] -%}
75
+ properties:{ {{- format_parameters(params['properties'], params['required']) -}} },
76
+ {%- endif -%}
77
+ {%- if params['required'] -%}
78
+ required:[
79
+ {%- for item in params['required'] -%}
80
+ <escape>{{- item -}}<escape>
81
+ {{- ',' if not loop.last -}}
82
+ {%- endfor -%}
83
+ ],
84
+ {%- endif -%}
85
+ {%- if params['type'] -%}
86
+ type:<escape>{{- params['type'] | upper -}}<escape>}
87
+ {%- endif -%}
88
+ {%- endif -%}
89
+ }
90
+ {%- endmacro -%}
91
+ {% macro format_argument(argument, escape_keys=True) -%}
92
+ {%- if argument is string -%}
93
+ {{- '<escape>' + argument + '<escape>' -}}
94
+ {%- elif argument is boolean -%}
95
+ {%- if argument -%}
96
+ {{- 'true' -}}
97
+ {%- else -%}
98
+ {{- 'false' -}}
99
+ {%- endif -%}
100
+ {%- elif argument is mapping -%}
101
+ {{- '{' -}}
102
+ {%- set ns = namespace(found_first=false) -%}
103
+ {%- for key, value in argument | dictsort -%}
104
+ {%- if ns.found_first %},{% endif -%}
105
+ {%- set ns.found_first = true -%}
106
+ {%- if escape_keys -%}
107
+ {{- '<escape>' + key + '<escape>' -}}
108
+ {%- else -%}
109
+ {{- key -}}
110
+ {%- endif -%}
111
+ :{{- format_argument(value, escape_keys=escape_keys) -}}
112
+ {%- endfor -%}
113
+ {{- '}' -}}
114
+ {%- elif argument is sequence -%}
115
+ {{- '[' -}}
116
+ {%- for item in argument -%}
117
+ {{- format_argument(item, escape_keys=escape_keys) -}}
118
+ {%- if not loop.last %},{% endif -%}
119
+ {%- endfor -%}
120
+ {{- ']' -}}
121
+ {%- else -%}
122
+ {{- argument -}}
123
+ {%- endif -%}
124
+ {%- endmacro -%}
125
+ {{ bos_token }}
126
+ {%- set ns = namespace(prev_message_type=None) -%}
127
+ {#- Tool Declarations -#}
128
+ {%- set loop_messages = messages -%}
129
+ {%- if tools or messages[0]['role'] == 'system' or messages[0]['role'] == 'developer' -%}
130
+ {{- '<start_of_turn>developer\n' -}}
131
+ {%- if messages[0]['role'] == 'system' or messages[0]['role'] == 'developer' -%}
132
+ {%- if messages[0]['content'] is string -%}
133
+ {{- messages[0]['content'] | trim -}}
134
+ {%- elif messages[0]['content'] is sequence -%}
135
+ {%- for item in messages[0]['content'] -%}
136
+ {%- if item['type'] == 'text' -%}
137
+ {{- item['text'] | trim -}}
138
+ {%- endif -%}
139
+ {%- endfor -%}
140
+ {%- endif -%}
141
+ {%- set loop_messages = messages[1:] -%}
142
+ {%- else -%}
143
+ {{- 'You are a model that can do function calling with the following functions' -}}
144
+ {%- set loop_messages = messages -%}
145
+ {%- endif -%}
146
+ {%- if tools -%}
147
+ {%- for tool in tools %}
148
+ {{- '<start_function_declaration>' -}}
149
+ {{- format_function_declaration(tool) | trim }}
150
+ {{- '<end_function_declaration>' -}}
151
+ {%- endfor %}
152
+ {%- endif -%}
153
+ {{- '<end_of_turn>\n' }}
154
+ {%- endif %}
155
+ {#- Loop through messages. -#}
156
+ {%- for message in loop_messages -%}
157
+ {%- if (message['role'] == 'assistant') -%}
158
+ {#- Rename "assistant" to "model". -#}
159
+ {%- set role = "model" -%}
160
+ {%- else -%}
161
+ {%- set role = message['role'] -%}
162
+ {%- endif -%}
163
+ {%- if role != 'tool' -%}
164
+ {%- if ns.prev_message_type != 'tool_response' -%}
165
+ {{- '<start_of_turn>' + role + '\n' }}
166
+ {%- endif -%}
167
+ {%- set ns.prev_message_type = None -%}
168
+ {%- if 'content' in message and message['content'] is not none -%}
169
+ {%- if message['content'] is string -%}
170
+ {{ message['content'] | trim }}
171
+ {%- elif message['content'] is sequence -%}
172
+ {%- for item in message['content'] -%}
173
+ {%- if item['type'] == 'image' -%}
174
+ {{ '<start_of_image>' }}
175
+ {%- elif item['type'] == 'text' -%}
176
+ {{ item['text'] | trim }}
177
+ {%- endif -%}
178
+ {%- endfor -%}
179
+ {%- else -%}
180
+ {{ raise_exception("Invalid content type in user/assistant message") }}
181
+ {%- endif -%}
182
+ {%- set ns.prev_message_type = 'content' -%}
183
+ {%- endif -%}
184
+ {%- if 'tool_calls' in message and message['tool_calls'] and message['tool_calls'] is iterable -%}
185
+ {#- Tool Calls -#}
186
+ {%- for tool_call in message['tool_calls'] -%}
187
+ {% set function = tool_call['function'] %}
188
+ {{- '<start_function_call>call:' + function['name'] + '{' -}}
189
+ {%- if 'arguments' in function -%}
190
+ {%- if function['arguments'] is mapping -%}
191
+ {%- set ns = namespace(found_first=false) -%}
192
+ {%- for key, value in function['arguments'] | dictsort -%}
193
+ {%- if ns.found_first %},{% endif -%}
194
+ {%- set ns.found_first = true -%}
195
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
196
+ {%- endfor -%}
197
+ {%- elif function['arguments'] is string -%}
198
+ {# This handles string-JSON, just in case #}
199
+ {{ function['arguments'] }}
200
+ {%- endif %}
201
+ {%- endif -%}
202
+ {{- '}<end_function_call>' -}}
203
+ {%- endfor -%}
204
+ {%- if loop.last -%}
205
+ {{ '<start_function_response>' }}
206
+ {%- endif -%}
207
+ {%- set ns.prev_message_type = 'tool_call' -%}
208
+ {%- endif -%}
209
+ {%- else -%}
210
+ {#- Tool Responses -#}
211
+ {%- if 'content' in message and message['content'] -%}
212
+ {%- if message['content'] is mapping -%}
213
+ {%- if 'name' in message['content'] and 'response' in message['content'] -%}
214
+ {{ '<start_function_response>response:' + message['content']['name'] | trim + '{' }}
215
+ {%- set response_ns = namespace(found_first=false) -%}
216
+ {%- for key, value in message['content']['response'] | dictsort -%}
217
+ {%- if response_ns.found_first %},{% endif -%}
218
+ {%- set response_ns.found_first = true -%}
219
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
220
+ {%- endfor -%}
221
+ {{- '}<end_function_response>' -}}
222
+ {%- elif 'name' in message -%}
223
+ {{ '<start_function_response>response:' + message['name'] | trim + '{' }}
224
+ {%- set response_ns = namespace(found_first=false) -%}
225
+ {%- for key, value in message['content'] | dictsort -%}
226
+ {%- if response_ns.found_first %},{% endif -%}
227
+ {%- set response_ns.found_first = true -%}
228
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
229
+ {%- endfor -%}
230
+ {{- '}<end_function_response>' -}}
231
+ {%- else -%}
232
+ {{ raise_exception("Invalid tool response mapping: must contain 'name' and 'response' keys, or 'name' must be in the message.") }}
233
+ {%- endif -%}
234
+ {%- elif message['content'] is string -%}
235
+ {%- if 'name' in message -%}
236
+ {{ '<start_function_response>response:' + message['name'] | trim + '{value:' + format_argument(message['content'], escape_keys=False) + '}<end_function_response>' }}
237
+ {%- else -%}
238
+ {{ raise_exception("Invalid tool response: 'name' must be provided.") }}
239
+ {%- endif -%}
240
+ {%- elif message['content'] is sequence -%}
241
+ {%- for item in message['content'] -%}
242
+ {%- if item is mapping -%}
243
+ {%- if 'name' in item and 'response' in item -%}
244
+ {{ '<start_function_response>response:' + item['name'] | trim + '{' }}
245
+ {%- set response_ns = namespace(found_first=false) -%}
246
+ {%- for key, value in item['response'] | dictsort -%}
247
+ {%- if response_ns.found_first %},{% endif -%}
248
+ {%- set response_ns.found_first = true -%}
249
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
250
+ {%- endfor -%}
251
+ {{- '}<end_function_response>' -}}
252
+ {%- elif 'name' in message -%}
253
+ {{ '<start_function_response>response:' + message['name'] | trim + '{' }}
254
+ {%- set response_ns = namespace(found_first=false) -%}
255
+ {%- for key, value in item | dictsort -%}
256
+ {%- if response_ns.found_first %},{% endif -%}
257
+ {%- set response_ns.found_first = true -%}
258
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
259
+ {%- endfor -%}
260
+ {{- '}<end_function_response>' -}}
261
+ {%- else -%}
262
+ {{ raise_exception("Invalid tool response mapping: must contain 'name' and 'response' keys, or 'name' must be in the message.") }}
263
+ {%- endif -%}
264
+ {%- else -%}
265
+ {{ raise_exception("Invalid tool response message: multiple responses must all be mappings") }}
266
+ {%- endif -%}
267
+ {%- endfor -%}
268
+ {%- else -%}
269
+ {{ raise_exception("Invalid content type in tool message: must be mapping, sequence of mappings, or string.") }}
270
+ {%- endif -%}
271
+ {%- endif -%}
272
+ {%- set ns.prev_message_type = 'tool_response' -%}
273
+ {%- endif -%}
274
+ {%- if ns.prev_message_type not in ['tool_call', 'tool_response'] -%}
275
+ {{ '<end_of_turn>\n' }}
276
+ {%- endif -%}
277
+ {%- endfor -%}
278
+ {%- if add_generation_prompt -%}
279
+ {%- if ns.prev_message_type != 'tool_response' -%}
280
+ {{- '<start_of_turn>model\n' -}}
281
+ {%- endif -%}
282
+ {%- endif -%}
checkpoint-100/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: unsloth/functiongemma-270m-it
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:unsloth/functiongemma-270m-it
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.17.1
checkpoint-100/adapter_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "unsloth/functiongemma-270m-it",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 128,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.05,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "qalora_group_size": 16,
24
+ "r": 64,
25
+ "rank_pattern": {},
26
+ "revision": null,
27
+ "target_modules": [
28
+ "gate_proj",
29
+ "down_proj",
30
+ "o_proj",
31
+ "k_proj",
32
+ "q_proj",
33
+ "v_proj",
34
+ "up_proj"
35
+ ],
36
+ "target_parameters": null,
37
+ "task_type": "CAUSAL_LM",
38
+ "trainable_token_indices": null,
39
+ "use_dora": false,
40
+ "use_qalora": false,
41
+ "use_rslora": false
42
+ }
checkpoint-100/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b41ff92fff37b5682d4b5f3367e2231e5fd6183251401b9206874b535cd02de7
3
+ size 60785144
checkpoint-100/added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<end_of_image>": 262145,
3
+ "<image_soft_token>": 262144
4
+ }
checkpoint-100/chat_template.jinja ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- macro format_parameters(properties, required) -%}
2
+ {%- set standard_keys = ['description', 'type', 'properties', 'required', 'nullable'] -%}
3
+ {%- set ns = namespace(found_first=false) -%}
4
+ {%- for key, value in properties | dictsort -%}
5
+ {%- if key not in standard_keys -%}
6
+ {%- if ns.found_first %},{% endif -%}
7
+ {%- set ns.found_first = true -%}
8
+ {{- key }}:{description:<escape>{{ value['description'] }}<escape>
9
+ {%- if value['type'] | upper == 'STRING' -%}
10
+ {%- if value['enum'] -%}
11
+ ,enum:{{ format_argument(value['enum']) }}
12
+ {%- endif -%}
13
+ {%- elif value['type'] | upper == 'OBJECT' -%}
14
+ ,properties:{
15
+ {%- if value['properties'] is defined and value['properties'] is mapping -%}
16
+ {{- format_parameters(value['properties'], value['required'] | default([])) -}}
17
+ {%- elif value is mapping -%}
18
+ {{- format_parameters(value, value['required'] | default([])) -}}
19
+ {%- endif -%}
20
+ }
21
+ {%- if value['required'] -%}
22
+ ,required:[
23
+ {%- for item in value['required'] | default([]) -%}
24
+ <escape>{{- item -}}<escape>
25
+ {%- if not loop.last %},{% endif -%}
26
+ {%- endfor -%}
27
+ ]
28
+ {%- endif -%}
29
+ {%- elif value['type'] | upper == 'ARRAY' -%}
30
+ {%- if value['items'] is mapping and value['items'] -%}
31
+ ,items:{
32
+ {%- set ns_items = namespace(found_first=false) -%}
33
+ {%- for item_key, item_value in value['items'] | dictsort -%}
34
+ {%- if item_value is not none -%}
35
+ {%- if ns_items.found_first %},{% endif -%}
36
+ {%- set ns_items.found_first = true -%}
37
+ {%- if item_key == 'properties' -%}
38
+ properties:{
39
+ {%- if item_value is mapping -%}
40
+ {{- format_parameters(item_value, value['items']['required'] | default([])) -}}
41
+ {%- endif -%}
42
+ }
43
+ {%- elif item_key == 'required' -%}
44
+ required:[
45
+ {%- for req_item in item_value -%}
46
+ <escape>{{- req_item -}}<escape>
47
+ {%- if not loop.last %},{% endif -%}
48
+ {%- endfor -%}
49
+ ]
50
+ {%- elif item_key == 'type' -%}
51
+ {%- if item_value is string -%}
52
+ type:{{ format_argument(item_value | upper) }}
53
+ {%- else -%}
54
+ type:{{ format_argument(item_value | map('upper') | list) }}
55
+ {%- endif -%}
56
+ {%- else -%}
57
+ {{ item_key }}:{{ format_argument(item_value) }}
58
+ {%- endif -%}
59
+ {%- endif -%}
60
+ {%- endfor -%}
61
+ }
62
+ {%- endif -%}
63
+ {%- endif -%}
64
+ ,type:<escape>{{ value['type'] | upper }}<escape>}
65
+ {%- endif -%}
66
+ {%- endfor -%}
67
+ {%- endmacro -%}
68
+ {% macro format_function_declaration(tool_data) -%}
69
+ declaration:{{- tool_data['function']['name'] -}}
70
+ {description:<escape>{{- tool_data['function']['description'] -}}<escape>
71
+ {%- set params = tool_data['function']['parameters'] -%}
72
+ {%- if params -%}
73
+ ,parameters:{
74
+ {%- if params['properties'] -%}
75
+ properties:{ {{- format_parameters(params['properties'], params['required']) -}} },
76
+ {%- endif -%}
77
+ {%- if params['required'] -%}
78
+ required:[
79
+ {%- for item in params['required'] -%}
80
+ <escape>{{- item -}}<escape>
81
+ {{- ',' if not loop.last -}}
82
+ {%- endfor -%}
83
+ ],
84
+ {%- endif -%}
85
+ {%- if params['type'] -%}
86
+ type:<escape>{{- params['type'] | upper -}}<escape>}
87
+ {%- endif -%}
88
+ {%- endif -%}
89
+ }
90
+ {%- endmacro -%}
91
+ {% macro format_argument(argument, escape_keys=True) -%}
92
+ {%- if argument is string -%}
93
+ {{- '<escape>' + argument + '<escape>' -}}
94
+ {%- elif argument is boolean -%}
95
+ {%- if argument -%}
96
+ {{- 'true' -}}
97
+ {%- else -%}
98
+ {{- 'false' -}}
99
+ {%- endif -%}
100
+ {%- elif argument is mapping -%}
101
+ {{- '{' -}}
102
+ {%- set ns = namespace(found_first=false) -%}
103
+ {%- for key, value in argument | dictsort -%}
104
+ {%- if ns.found_first %},{% endif -%}
105
+ {%- set ns.found_first = true -%}
106
+ {%- if escape_keys -%}
107
+ {{- '<escape>' + key + '<escape>' -}}
108
+ {%- else -%}
109
+ {{- key -}}
110
+ {%- endif -%}
111
+ :{{- format_argument(value, escape_keys=escape_keys) -}}
112
+ {%- endfor -%}
113
+ {{- '}' -}}
114
+ {%- elif argument is sequence -%}
115
+ {{- '[' -}}
116
+ {%- for item in argument -%}
117
+ {{- format_argument(item, escape_keys=escape_keys) -}}
118
+ {%- if not loop.last %},{% endif -%}
119
+ {%- endfor -%}
120
+ {{- ']' -}}
121
+ {%- else -%}
122
+ {{- argument -}}
123
+ {%- endif -%}
124
+ {%- endmacro -%}
125
+ {{ bos_token }}
126
+ {%- set ns = namespace(prev_message_type=None) -%}
127
+ {#- Tool Declarations -#}
128
+ {%- set loop_messages = messages -%}
129
+ {%- if tools or messages[0]['role'] == 'system' or messages[0]['role'] == 'developer' -%}
130
+ {{- '<start_of_turn>developer\n' -}}
131
+ {%- if messages[0]['role'] == 'system' or messages[0]['role'] == 'developer' -%}
132
+ {%- if messages[0]['content'] is string -%}
133
+ {{- messages[0]['content'] | trim -}}
134
+ {%- elif messages[0]['content'] is sequence -%}
135
+ {%- for item in messages[0]['content'] -%}
136
+ {%- if item['type'] == 'text' -%}
137
+ {{- item['text'] | trim -}}
138
+ {%- endif -%}
139
+ {%- endfor -%}
140
+ {%- endif -%}
141
+ {%- set loop_messages = messages[1:] -%}
142
+ {%- else -%}
143
+ {{- 'You are a model that can do function calling with the following functions' -}}
144
+ {%- set loop_messages = messages -%}
145
+ {%- endif -%}
146
+ {%- if tools -%}
147
+ {%- for tool in tools %}
148
+ {{- '<start_function_declaration>' -}}
149
+ {{- format_function_declaration(tool) | trim }}
150
+ {{- '<end_function_declaration>' -}}
151
+ {%- endfor %}
152
+ {%- endif -%}
153
+ {{- '<end_of_turn>\n' }}
154
+ {%- endif %}
155
+ {#- Loop through messages. -#}
156
+ {%- for message in loop_messages -%}
157
+ {%- if (message['role'] == 'assistant') -%}
158
+ {#- Rename "assistant" to "model". -#}
159
+ {%- set role = "model" -%}
160
+ {%- else -%}
161
+ {%- set role = message['role'] -%}
162
+ {%- endif -%}
163
+ {%- if role != 'tool' -%}
164
+ {%- if ns.prev_message_type != 'tool_response' -%}
165
+ {{- '<start_of_turn>' + role + '\n' }}
166
+ {%- endif -%}
167
+ {%- set ns.prev_message_type = None -%}
168
+ {%- if 'content' in message and message['content'] is not none -%}
169
+ {%- if message['content'] is string -%}
170
+ {{ message['content'] | trim }}
171
+ {%- elif message['content'] is sequence -%}
172
+ {%- for item in message['content'] -%}
173
+ {%- if item['type'] == 'image' -%}
174
+ {{ '<start_of_image>' }}
175
+ {%- elif item['type'] == 'text' -%}
176
+ {{ item['text'] | trim }}
177
+ {%- endif -%}
178
+ {%- endfor -%}
179
+ {%- else -%}
180
+ {{ raise_exception("Invalid content type in user/assistant message") }}
181
+ {%- endif -%}
182
+ {%- set ns.prev_message_type = 'content' -%}
183
+ {%- endif -%}
184
+ {%- if 'tool_calls' in message and message['tool_calls'] and message['tool_calls'] is iterable -%}
185
+ {#- Tool Calls -#}
186
+ {%- for tool_call in message['tool_calls'] -%}
187
+ {% set function = tool_call['function'] %}
188
+ {{- '<start_function_call>call:' + function['name'] + '{' -}}
189
+ {%- if 'arguments' in function -%}
190
+ {%- if function['arguments'] is mapping -%}
191
+ {%- set ns = namespace(found_first=false) -%}
192
+ {%- for key, value in function['arguments'] | dictsort -%}
193
+ {%- if ns.found_first %},{% endif -%}
194
+ {%- set ns.found_first = true -%}
195
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
196
+ {%- endfor -%}
197
+ {%- elif function['arguments'] is string -%}
198
+ {# This handles string-JSON, just in case #}
199
+ {{ function['arguments'] }}
200
+ {%- endif %}
201
+ {%- endif -%}
202
+ {{- '}<end_function_call>' -}}
203
+ {%- endfor -%}
204
+ {%- if loop.last -%}
205
+ {{ '<start_function_response>' }}
206
+ {%- endif -%}
207
+ {%- set ns.prev_message_type = 'tool_call' -%}
208
+ {%- endif -%}
209
+ {%- else -%}
210
+ {#- Tool Responses -#}
211
+ {%- if 'content' in message and message['content'] -%}
212
+ {%- if message['content'] is mapping -%}
213
+ {%- if 'name' in message['content'] and 'response' in message['content'] -%}
214
+ {{ '<start_function_response>response:' + message['content']['name'] | trim + '{' }}
215
+ {%- set response_ns = namespace(found_first=false) -%}
216
+ {%- for key, value in message['content']['response'] | dictsort -%}
217
+ {%- if response_ns.found_first %},{% endif -%}
218
+ {%- set response_ns.found_first = true -%}
219
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
220
+ {%- endfor -%}
221
+ {{- '}<end_function_response>' -}}
222
+ {%- elif 'name' in message -%}
223
+ {{ '<start_function_response>response:' + message['name'] | trim + '{' }}
224
+ {%- set response_ns = namespace(found_first=false) -%}
225
+ {%- for key, value in message['content'] | dictsort -%}
226
+ {%- if response_ns.found_first %},{% endif -%}
227
+ {%- set response_ns.found_first = true -%}
228
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
229
+ {%- endfor -%}
230
+ {{- '}<end_function_response>' -}}
231
+ {%- else -%}
232
+ {{ raise_exception("Invalid tool response mapping: must contain 'name' and 'response' keys, or 'name' must be in the message.") }}
233
+ {%- endif -%}
234
+ {%- elif message['content'] is string -%}
235
+ {%- if 'name' in message -%}
236
+ {{ '<start_function_response>response:' + message['name'] | trim + '{value:' + format_argument(message['content'], escape_keys=False) + '}<end_function_response>' }}
237
+ {%- else -%}
238
+ {{ raise_exception("Invalid tool response: 'name' must be provided.") }}
239
+ {%- endif -%}
240
+ {%- elif message['content'] is sequence -%}
241
+ {%- for item in message['content'] -%}
242
+ {%- if item is mapping -%}
243
+ {%- if 'name' in item and 'response' in item -%}
244
+ {{ '<start_function_response>response:' + item['name'] | trim + '{' }}
245
+ {%- set response_ns = namespace(found_first=false) -%}
246
+ {%- for key, value in item['response'] | dictsort -%}
247
+ {%- if response_ns.found_first %},{% endif -%}
248
+ {%- set response_ns.found_first = true -%}
249
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
250
+ {%- endfor -%}
251
+ {{- '}<end_function_response>' -}}
252
+ {%- elif 'name' in message -%}
253
+ {{ '<start_function_response>response:' + message['name'] | trim + '{' }}
254
+ {%- set response_ns = namespace(found_first=false) -%}
255
+ {%- for key, value in item | dictsort -%}
256
+ {%- if response_ns.found_first %},{% endif -%}
257
+ {%- set response_ns.found_first = true -%}
258
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
259
+ {%- endfor -%}
260
+ {{- '}<end_function_response>' -}}
261
+ {%- else -%}
262
+ {{ raise_exception("Invalid tool response mapping: must contain 'name' and 'response' keys, or 'name' must be in the message.") }}
263
+ {%- endif -%}
264
+ {%- else -%}
265
+ {{ raise_exception("Invalid tool response message: multiple responses must all be mappings") }}
266
+ {%- endif -%}
267
+ {%- endfor -%}
268
+ {%- else -%}
269
+ {{ raise_exception("Invalid content type in tool message: must be mapping, sequence of mappings, or string.") }}
270
+ {%- endif -%}
271
+ {%- endif -%}
272
+ {%- set ns.prev_message_type = 'tool_response' -%}
273
+ {%- endif -%}
274
+ {%- if ns.prev_message_type not in ['tool_call', 'tool_response'] -%}
275
+ {{ '<end_of_turn>\n' }}
276
+ {%- endif -%}
277
+ {%- endfor -%}
278
+ {%- if add_generation_prompt -%}
279
+ {%- if ns.prev_message_type != 'tool_response' -%}
280
+ {{- '<start_of_turn>model\n' -}}
281
+ {%- endif -%}
282
+ {%- endif -%}
checkpoint-100/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:356da361c0a6d635b5d9c3aa1b51d9790c3a346c7730a5d0ca962946d04ea8ff
3
+ size 121708939
checkpoint-100/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb03eac9b58e52273c3aa5727841719fd6c82afc9062bbaf8eb6f842de779278
3
+ size 14391
checkpoint-100/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5862e5e0650ed902623f865842c02c99e7054853606c904f823dfbad3f1915b
3
+ size 1465
checkpoint-100/special_tokens_map.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "boi_token": "<start_of_image>",
3
+ "bos_token": {
4
+ "content": "<bos>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ "eoi_token": "<end_of_image>",
11
+ "eos_token": {
12
+ "content": "<end_of_turn>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "image_token": "<image_soft_token>",
19
+ "pad_token": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "sfr_token": "<start_function_response>",
27
+ "unk_token": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ }
34
+ }
checkpoint-100/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6b09a0b4a803ad453063ca4bb49a784540e8120004e2450e025df2b27d41fb2
3
+ size 33384899
checkpoint-100/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa009fcbc3589a9904d30d04834094fea4653c2ac6d2de2cd1262d4f7a50ceb3
3
+ size 4689144
checkpoint-100/tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-100/trainer_state.json ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 0.08737439930100481,
6
+ "eval_steps": 500,
7
+ "global_step": 100,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.00436871996505024,
14
+ "grad_norm": 9.839941024780273,
15
+ "learning_rate": 8e-05,
16
+ "loss": 2.5246,
17
+ "step": 5
18
+ },
19
+ {
20
+ "epoch": 0.00873743993010048,
21
+ "grad_norm": 13.773455619812012,
22
+ "learning_rate": 0.00018,
23
+ "loss": 1.1343,
24
+ "step": 10
25
+ },
26
+ {
27
+ "epoch": 0.01310615989515072,
28
+ "grad_norm": 5.6580424308776855,
29
+ "learning_rate": 0.0001999997582552296,
30
+ "loss": 0.7712,
31
+ "step": 15
32
+ },
33
+ {
34
+ "epoch": 0.01747487986020096,
35
+ "grad_norm": 5.294467926025391,
36
+ "learning_rate": 0.0001999987761691029,
37
+ "loss": 0.73,
38
+ "step": 20
39
+ },
40
+ {
41
+ "epoch": 0.021843599825251202,
42
+ "grad_norm": 2.8633503913879395,
43
+ "learning_rate": 0.00019999703863998527,
44
+ "loss": 0.7289,
45
+ "step": 25
46
+ },
47
+ {
48
+ "epoch": 0.02621231979030144,
49
+ "grad_norm": 3.2836177349090576,
50
+ "learning_rate": 0.00019999454568100293,
51
+ "loss": 0.4686,
52
+ "step": 30
53
+ },
54
+ {
55
+ "epoch": 0.03058103975535168,
56
+ "grad_norm": 4.878258228302002,
57
+ "learning_rate": 0.00019999129731098898,
58
+ "loss": 0.6629,
59
+ "step": 35
60
+ },
61
+ {
62
+ "epoch": 0.03494975972040192,
63
+ "grad_norm": 2.899914026260376,
64
+ "learning_rate": 0.00019998729355448326,
65
+ "loss": 0.6038,
66
+ "step": 40
67
+ },
68
+ {
69
+ "epoch": 0.039318479685452164,
70
+ "grad_norm": 3.289844274520874,
71
+ "learning_rate": 0.00019998253444173235,
72
+ "loss": 0.4573,
73
+ "step": 45
74
+ },
75
+ {
76
+ "epoch": 0.043687199650502405,
77
+ "grad_norm": 2.957254648208618,
78
+ "learning_rate": 0.00019997702000868896,
79
+ "loss": 0.594,
80
+ "step": 50
81
+ },
82
+ {
83
+ "epoch": 0.048055919615552646,
84
+ "grad_norm": 3.171276807785034,
85
+ "learning_rate": 0.00019997075029701207,
86
+ "loss": 0.5719,
87
+ "step": 55
88
+ },
89
+ {
90
+ "epoch": 0.05242463958060288,
91
+ "grad_norm": 2.55605149269104,
92
+ "learning_rate": 0.0001999637253540663,
93
+ "loss": 0.5971,
94
+ "step": 60
95
+ },
96
+ {
97
+ "epoch": 0.05679335954565312,
98
+ "grad_norm": 2.127289295196533,
99
+ "learning_rate": 0.00019995594523292178,
100
+ "loss": 0.5712,
101
+ "step": 65
102
+ },
103
+ {
104
+ "epoch": 0.06116207951070336,
105
+ "grad_norm": 3.3928685188293457,
106
+ "learning_rate": 0.00019994740999235359,
107
+ "loss": 0.5712,
108
+ "step": 70
109
+ },
110
+ {
111
+ "epoch": 0.0655307994757536,
112
+ "grad_norm": 2.6700279712677,
113
+ "learning_rate": 0.00019993811969684142,
114
+ "loss": 0.427,
115
+ "step": 75
116
+ },
117
+ {
118
+ "epoch": 0.06989951944080385,
119
+ "grad_norm": 2.6936633586883545,
120
+ "learning_rate": 0.00019992807441656898,
121
+ "loss": 0.5321,
122
+ "step": 80
123
+ },
124
+ {
125
+ "epoch": 0.07426823940585409,
126
+ "grad_norm": 3.9897687435150146,
127
+ "learning_rate": 0.00019991727422742362,
128
+ "loss": 0.6025,
129
+ "step": 85
130
+ },
131
+ {
132
+ "epoch": 0.07863695937090433,
133
+ "grad_norm": 2.3496663570404053,
134
+ "learning_rate": 0.00019990571921099553,
135
+ "loss": 0.5975,
136
+ "step": 90
137
+ },
138
+ {
139
+ "epoch": 0.08300567933595457,
140
+ "grad_norm": 3.3796467781066895,
141
+ "learning_rate": 0.0001998934094545774,
142
+ "loss": 0.5255,
143
+ "step": 95
144
+ },
145
+ {
146
+ "epoch": 0.08737439930100481,
147
+ "grad_norm": 3.1103007793426514,
148
+ "learning_rate": 0.00019988034505116352,
149
+ "loss": 0.4946,
150
+ "step": 100
151
+ }
152
+ ],
153
+ "logging_steps": 5,
154
+ "max_steps": 5725,
155
+ "num_input_tokens_seen": 0,
156
+ "num_train_epochs": 5,
157
+ "save_steps": 100,
158
+ "stateful_callbacks": {
159
+ "TrainerControl": {
160
+ "args": {
161
+ "should_epoch_stop": false,
162
+ "should_evaluate": false,
163
+ "should_log": false,
164
+ "should_save": true,
165
+ "should_training_stop": false
166
+ },
167
+ "attributes": {}
168
+ }
169
+ },
170
+ "total_flos": 48194282348544.0,
171
+ "train_batch_size": 4,
172
+ "trial_name": null,
173
+ "trial_params": null
174
+ }
checkpoint-100/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:629a35960f565450f5c9e65334fb9c14eb136182b8af4bc22c885bdde32de5f3
3
+ size 5777
checkpoint-1000/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: unsloth/functiongemma-270m-it
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:unsloth/functiongemma-270m-it
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.17.1
checkpoint-1000/adapter_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "unsloth/functiongemma-270m-it",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 128,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.05,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "qalora_group_size": 16,
24
+ "r": 64,
25
+ "rank_pattern": {},
26
+ "revision": null,
27
+ "target_modules": [
28
+ "gate_proj",
29
+ "down_proj",
30
+ "o_proj",
31
+ "k_proj",
32
+ "q_proj",
33
+ "v_proj",
34
+ "up_proj"
35
+ ],
36
+ "target_parameters": null,
37
+ "task_type": "CAUSAL_LM",
38
+ "trainable_token_indices": null,
39
+ "use_dora": false,
40
+ "use_qalora": false,
41
+ "use_rslora": false
42
+ }
checkpoint-1000/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:133ec76379027ec222f56f4cc1b04ae3342877261ab6c165c6b4fd334e8022bb
3
+ size 60785144
checkpoint-1000/added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<end_of_image>": 262145,
3
+ "<image_soft_token>": 262144
4
+ }
checkpoint-1000/chat_template.jinja ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- macro format_parameters(properties, required) -%}
2
+ {%- set standard_keys = ['description', 'type', 'properties', 'required', 'nullable'] -%}
3
+ {%- set ns = namespace(found_first=false) -%}
4
+ {%- for key, value in properties | dictsort -%}
5
+ {%- if key not in standard_keys -%}
6
+ {%- if ns.found_first %},{% endif -%}
7
+ {%- set ns.found_first = true -%}
8
+ {{- key }}:{description:<escape>{{ value['description'] }}<escape>
9
+ {%- if value['type'] | upper == 'STRING' -%}
10
+ {%- if value['enum'] -%}
11
+ ,enum:{{ format_argument(value['enum']) }}
12
+ {%- endif -%}
13
+ {%- elif value['type'] | upper == 'OBJECT' -%}
14
+ ,properties:{
15
+ {%- if value['properties'] is defined and value['properties'] is mapping -%}
16
+ {{- format_parameters(value['properties'], value['required'] | default([])) -}}
17
+ {%- elif value is mapping -%}
18
+ {{- format_parameters(value, value['required'] | default([])) -}}
19
+ {%- endif -%}
20
+ }
21
+ {%- if value['required'] -%}
22
+ ,required:[
23
+ {%- for item in value['required'] | default([]) -%}
24
+ <escape>{{- item -}}<escape>
25
+ {%- if not loop.last %},{% endif -%}
26
+ {%- endfor -%}
27
+ ]
28
+ {%- endif -%}
29
+ {%- elif value['type'] | upper == 'ARRAY' -%}
30
+ {%- if value['items'] is mapping and value['items'] -%}
31
+ ,items:{
32
+ {%- set ns_items = namespace(found_first=false) -%}
33
+ {%- for item_key, item_value in value['items'] | dictsort -%}
34
+ {%- if item_value is not none -%}
35
+ {%- if ns_items.found_first %},{% endif -%}
36
+ {%- set ns_items.found_first = true -%}
37
+ {%- if item_key == 'properties' -%}
38
+ properties:{
39
+ {%- if item_value is mapping -%}
40
+ {{- format_parameters(item_value, value['items']['required'] | default([])) -}}
41
+ {%- endif -%}
42
+ }
43
+ {%- elif item_key == 'required' -%}
44
+ required:[
45
+ {%- for req_item in item_value -%}
46
+ <escape>{{- req_item -}}<escape>
47
+ {%- if not loop.last %},{% endif -%}
48
+ {%- endfor -%}
49
+ ]
50
+ {%- elif item_key == 'type' -%}
51
+ {%- if item_value is string -%}
52
+ type:{{ format_argument(item_value | upper) }}
53
+ {%- else -%}
54
+ type:{{ format_argument(item_value | map('upper') | list) }}
55
+ {%- endif -%}
56
+ {%- else -%}
57
+ {{ item_key }}:{{ format_argument(item_value) }}
58
+ {%- endif -%}
59
+ {%- endif -%}
60
+ {%- endfor -%}
61
+ }
62
+ {%- endif -%}
63
+ {%- endif -%}
64
+ ,type:<escape>{{ value['type'] | upper }}<escape>}
65
+ {%- endif -%}
66
+ {%- endfor -%}
67
+ {%- endmacro -%}
68
+ {% macro format_function_declaration(tool_data) -%}
69
+ declaration:{{- tool_data['function']['name'] -}}
70
+ {description:<escape>{{- tool_data['function']['description'] -}}<escape>
71
+ {%- set params = tool_data['function']['parameters'] -%}
72
+ {%- if params -%}
73
+ ,parameters:{
74
+ {%- if params['properties'] -%}
75
+ properties:{ {{- format_parameters(params['properties'], params['required']) -}} },
76
+ {%- endif -%}
77
+ {%- if params['required'] -%}
78
+ required:[
79
+ {%- for item in params['required'] -%}
80
+ <escape>{{- item -}}<escape>
81
+ {{- ',' if not loop.last -}}
82
+ {%- endfor -%}
83
+ ],
84
+ {%- endif -%}
85
+ {%- if params['type'] -%}
86
+ type:<escape>{{- params['type'] | upper -}}<escape>}
87
+ {%- endif -%}
88
+ {%- endif -%}
89
+ }
90
+ {%- endmacro -%}
91
+ {% macro format_argument(argument, escape_keys=True) -%}
92
+ {%- if argument is string -%}
93
+ {{- '<escape>' + argument + '<escape>' -}}
94
+ {%- elif argument is boolean -%}
95
+ {%- if argument -%}
96
+ {{- 'true' -}}
97
+ {%- else -%}
98
+ {{- 'false' -}}
99
+ {%- endif -%}
100
+ {%- elif argument is mapping -%}
101
+ {{- '{' -}}
102
+ {%- set ns = namespace(found_first=false) -%}
103
+ {%- for key, value in argument | dictsort -%}
104
+ {%- if ns.found_first %},{% endif -%}
105
+ {%- set ns.found_first = true -%}
106
+ {%- if escape_keys -%}
107
+ {{- '<escape>' + key + '<escape>' -}}
108
+ {%- else -%}
109
+ {{- key -}}
110
+ {%- endif -%}
111
+ :{{- format_argument(value, escape_keys=escape_keys) -}}
112
+ {%- endfor -%}
113
+ {{- '}' -}}
114
+ {%- elif argument is sequence -%}
115
+ {{- '[' -}}
116
+ {%- for item in argument -%}
117
+ {{- format_argument(item, escape_keys=escape_keys) -}}
118
+ {%- if not loop.last %},{% endif -%}
119
+ {%- endfor -%}
120
+ {{- ']' -}}
121
+ {%- else -%}
122
+ {{- argument -}}
123
+ {%- endif -%}
124
+ {%- endmacro -%}
125
+ {{ bos_token }}
126
+ {%- set ns = namespace(prev_message_type=None) -%}
127
+ {#- Tool Declarations -#}
128
+ {%- set loop_messages = messages -%}
129
+ {%- if tools or messages[0]['role'] == 'system' or messages[0]['role'] == 'developer' -%}
130
+ {{- '<start_of_turn>developer\n' -}}
131
+ {%- if messages[0]['role'] == 'system' or messages[0]['role'] == 'developer' -%}
132
+ {%- if messages[0]['content'] is string -%}
133
+ {{- messages[0]['content'] | trim -}}
134
+ {%- elif messages[0]['content'] is sequence -%}
135
+ {%- for item in messages[0]['content'] -%}
136
+ {%- if item['type'] == 'text' -%}
137
+ {{- item['text'] | trim -}}
138
+ {%- endif -%}
139
+ {%- endfor -%}
140
+ {%- endif -%}
141
+ {%- set loop_messages = messages[1:] -%}
142
+ {%- else -%}
143
+ {{- 'You are a model that can do function calling with the following functions' -}}
144
+ {%- set loop_messages = messages -%}
145
+ {%- endif -%}
146
+ {%- if tools -%}
147
+ {%- for tool in tools %}
148
+ {{- '<start_function_declaration>' -}}
149
+ {{- format_function_declaration(tool) | trim }}
150
+ {{- '<end_function_declaration>' -}}
151
+ {%- endfor %}
152
+ {%- endif -%}
153
+ {{- '<end_of_turn>\n' }}
154
+ {%- endif %}
155
+ {#- Loop through messages. -#}
156
+ {%- for message in loop_messages -%}
157
+ {%- if (message['role'] == 'assistant') -%}
158
+ {#- Rename "assistant" to "model". -#}
159
+ {%- set role = "model" -%}
160
+ {%- else -%}
161
+ {%- set role = message['role'] -%}
162
+ {%- endif -%}
163
+ {%- if role != 'tool' -%}
164
+ {%- if ns.prev_message_type != 'tool_response' -%}
165
+ {{- '<start_of_turn>' + role + '\n' }}
166
+ {%- endif -%}
167
+ {%- set ns.prev_message_type = None -%}
168
+ {%- if 'content' in message and message['content'] is not none -%}
169
+ {%- if message['content'] is string -%}
170
+ {{ message['content'] | trim }}
171
+ {%- elif message['content'] is sequence -%}
172
+ {%- for item in message['content'] -%}
173
+ {%- if item['type'] == 'image' -%}
174
+ {{ '<start_of_image>' }}
175
+ {%- elif item['type'] == 'text' -%}
176
+ {{ item['text'] | trim }}
177
+ {%- endif -%}
178
+ {%- endfor -%}
179
+ {%- else -%}
180
+ {{ raise_exception("Invalid content type in user/assistant message") }}
181
+ {%- endif -%}
182
+ {%- set ns.prev_message_type = 'content' -%}
183
+ {%- endif -%}
184
+ {%- if 'tool_calls' in message and message['tool_calls'] and message['tool_calls'] is iterable -%}
185
+ {#- Tool Calls -#}
186
+ {%- for tool_call in message['tool_calls'] -%}
187
+ {% set function = tool_call['function'] %}
188
+ {{- '<start_function_call>call:' + function['name'] + '{' -}}
189
+ {%- if 'arguments' in function -%}
190
+ {%- if function['arguments'] is mapping -%}
191
+ {%- set ns = namespace(found_first=false) -%}
192
+ {%- for key, value in function['arguments'] | dictsort -%}
193
+ {%- if ns.found_first %},{% endif -%}
194
+ {%- set ns.found_first = true -%}
195
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
196
+ {%- endfor -%}
197
+ {%- elif function['arguments'] is string -%}
198
+ {# This handles string-JSON, just in case #}
199
+ {{ function['arguments'] }}
200
+ {%- endif %}
201
+ {%- endif -%}
202
+ {{- '}<end_function_call>' -}}
203
+ {%- endfor -%}
204
+ {%- if loop.last -%}
205
+ {{ '<start_function_response>' }}
206
+ {%- endif -%}
207
+ {%- set ns.prev_message_type = 'tool_call' -%}
208
+ {%- endif -%}
209
+ {%- else -%}
210
+ {#- Tool Responses -#}
211
+ {%- if 'content' in message and message['content'] -%}
212
+ {%- if message['content'] is mapping -%}
213
+ {%- if 'name' in message['content'] and 'response' in message['content'] -%}
214
+ {{ '<start_function_response>response:' + message['content']['name'] | trim + '{' }}
215
+ {%- set response_ns = namespace(found_first=false) -%}
216
+ {%- for key, value in message['content']['response'] | dictsort -%}
217
+ {%- if response_ns.found_first %},{% endif -%}
218
+ {%- set response_ns.found_first = true -%}
219
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
220
+ {%- endfor -%}
221
+ {{- '}<end_function_response>' -}}
222
+ {%- elif 'name' in message -%}
223
+ {{ '<start_function_response>response:' + message['name'] | trim + '{' }}
224
+ {%- set response_ns = namespace(found_first=false) -%}
225
+ {%- for key, value in message['content'] | dictsort -%}
226
+ {%- if response_ns.found_first %},{% endif -%}
227
+ {%- set response_ns.found_first = true -%}
228
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
229
+ {%- endfor -%}
230
+ {{- '}<end_function_response>' -}}
231
+ {%- else -%}
232
+ {{ raise_exception("Invalid tool response mapping: must contain 'name' and 'response' keys, or 'name' must be in the message.") }}
233
+ {%- endif -%}
234
+ {%- elif message['content'] is string -%}
235
+ {%- if 'name' in message -%}
236
+ {{ '<start_function_response>response:' + message['name'] | trim + '{value:' + format_argument(message['content'], escape_keys=False) + '}<end_function_response>' }}
237
+ {%- else -%}
238
+ {{ raise_exception("Invalid tool response: 'name' must be provided.") }}
239
+ {%- endif -%}
240
+ {%- elif message['content'] is sequence -%}
241
+ {%- for item in message['content'] -%}
242
+ {%- if item is mapping -%}
243
+ {%- if 'name' in item and 'response' in item -%}
244
+ {{ '<start_function_response>response:' + item['name'] | trim + '{' }}
245
+ {%- set response_ns = namespace(found_first=false) -%}
246
+ {%- for key, value in item['response'] | dictsort -%}
247
+ {%- if response_ns.found_first %},{% endif -%}
248
+ {%- set response_ns.found_first = true -%}
249
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
250
+ {%- endfor -%}
251
+ {{- '}<end_function_response>' -}}
252
+ {%- elif 'name' in message -%}
253
+ {{ '<start_function_response>response:' + message['name'] | trim + '{' }}
254
+ {%- set response_ns = namespace(found_first=false) -%}
255
+ {%- for key, value in item | dictsort -%}
256
+ {%- if response_ns.found_first %},{% endif -%}
257
+ {%- set response_ns.found_first = true -%}
258
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
259
+ {%- endfor -%}
260
+ {{- '}<end_function_response>' -}}
261
+ {%- else -%}
262
+ {{ raise_exception("Invalid tool response mapping: must contain 'name' and 'response' keys, or 'name' must be in the message.") }}
263
+ {%- endif -%}
264
+ {%- else -%}
265
+ {{ raise_exception("Invalid tool response message: multiple responses must all be mappings") }}
266
+ {%- endif -%}
267
+ {%- endfor -%}
268
+ {%- else -%}
269
+ {{ raise_exception("Invalid content type in tool message: must be mapping, sequence of mappings, or string.") }}
270
+ {%- endif -%}
271
+ {%- endif -%}
272
+ {%- set ns.prev_message_type = 'tool_response' -%}
273
+ {%- endif -%}
274
+ {%- if ns.prev_message_type not in ['tool_call', 'tool_response'] -%}
275
+ {{ '<end_of_turn>\n' }}
276
+ {%- endif -%}
277
+ {%- endfor -%}
278
+ {%- if add_generation_prompt -%}
279
+ {%- if ns.prev_message_type != 'tool_response' -%}
280
+ {{- '<start_of_turn>model\n' -}}
281
+ {%- endif -%}
282
+ {%- endif -%}
checkpoint-1000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc305b4ad5727d63269f4d0392a3c827627bff7f82346016ac21b101c20d0e6f
3
+ size 121708939
checkpoint-1000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb03eac9b58e52273c3aa5727841719fd6c82afc9062bbaf8eb6f842de779278
3
+ size 14391
checkpoint-1000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67d07d0c8b6dccd1d2b3163f43a15635d0989081116d1ae61ad5b1d2af1ac480
3
+ size 1465
checkpoint-1000/special_tokens_map.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "boi_token": "<start_of_image>",
3
+ "bos_token": {
4
+ "content": "<bos>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ "eoi_token": "<end_of_image>",
11
+ "eos_token": {
12
+ "content": "<end_of_turn>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "image_token": "<image_soft_token>",
19
+ "pad_token": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "sfr_token": "<start_function_response>",
27
+ "unk_token": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ }
34
+ }
checkpoint-1000/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6b09a0b4a803ad453063ca4bb49a784540e8120004e2450e025df2b27d41fb2
3
+ size 33384899
checkpoint-1000/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa009fcbc3589a9904d30d04834094fea4653c2ac6d2de2cd1262d4f7a50ceb3
3
+ size 4689144
checkpoint-1000/tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1000/trainer_state.json ADDED
@@ -0,0 +1,1434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 0.873743993010048,
6
+ "eval_steps": 500,
7
+ "global_step": 1000,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.00436871996505024,
14
+ "grad_norm": 9.839941024780273,
15
+ "learning_rate": 8e-05,
16
+ "loss": 2.5246,
17
+ "step": 5
18
+ },
19
+ {
20
+ "epoch": 0.00873743993010048,
21
+ "grad_norm": 13.773455619812012,
22
+ "learning_rate": 0.00018,
23
+ "loss": 1.1343,
24
+ "step": 10
25
+ },
26
+ {
27
+ "epoch": 0.01310615989515072,
28
+ "grad_norm": 5.6580424308776855,
29
+ "learning_rate": 0.0001999997582552296,
30
+ "loss": 0.7712,
31
+ "step": 15
32
+ },
33
+ {
34
+ "epoch": 0.01747487986020096,
35
+ "grad_norm": 5.294467926025391,
36
+ "learning_rate": 0.0001999987761691029,
37
+ "loss": 0.73,
38
+ "step": 20
39
+ },
40
+ {
41
+ "epoch": 0.021843599825251202,
42
+ "grad_norm": 2.8633503913879395,
43
+ "learning_rate": 0.00019999703863998527,
44
+ "loss": 0.7289,
45
+ "step": 25
46
+ },
47
+ {
48
+ "epoch": 0.02621231979030144,
49
+ "grad_norm": 3.2836177349090576,
50
+ "learning_rate": 0.00019999454568100293,
51
+ "loss": 0.4686,
52
+ "step": 30
53
+ },
54
+ {
55
+ "epoch": 0.03058103975535168,
56
+ "grad_norm": 4.878258228302002,
57
+ "learning_rate": 0.00019999129731098898,
58
+ "loss": 0.6629,
59
+ "step": 35
60
+ },
61
+ {
62
+ "epoch": 0.03494975972040192,
63
+ "grad_norm": 2.899914026260376,
64
+ "learning_rate": 0.00019998729355448326,
65
+ "loss": 0.6038,
66
+ "step": 40
67
+ },
68
+ {
69
+ "epoch": 0.039318479685452164,
70
+ "grad_norm": 3.289844274520874,
71
+ "learning_rate": 0.00019998253444173235,
72
+ "loss": 0.4573,
73
+ "step": 45
74
+ },
75
+ {
76
+ "epoch": 0.043687199650502405,
77
+ "grad_norm": 2.957254648208618,
78
+ "learning_rate": 0.00019997702000868896,
79
+ "loss": 0.594,
80
+ "step": 50
81
+ },
82
+ {
83
+ "epoch": 0.048055919615552646,
84
+ "grad_norm": 3.171276807785034,
85
+ "learning_rate": 0.00019997075029701207,
86
+ "loss": 0.5719,
87
+ "step": 55
88
+ },
89
+ {
90
+ "epoch": 0.05242463958060288,
91
+ "grad_norm": 2.55605149269104,
92
+ "learning_rate": 0.0001999637253540663,
93
+ "loss": 0.5971,
94
+ "step": 60
95
+ },
96
+ {
97
+ "epoch": 0.05679335954565312,
98
+ "grad_norm": 2.127289295196533,
99
+ "learning_rate": 0.00019995594523292178,
100
+ "loss": 0.5712,
101
+ "step": 65
102
+ },
103
+ {
104
+ "epoch": 0.06116207951070336,
105
+ "grad_norm": 3.3928685188293457,
106
+ "learning_rate": 0.00019994740999235359,
107
+ "loss": 0.5712,
108
+ "step": 70
109
+ },
110
+ {
111
+ "epoch": 0.0655307994757536,
112
+ "grad_norm": 2.6700279712677,
113
+ "learning_rate": 0.00019993811969684142,
114
+ "loss": 0.427,
115
+ "step": 75
116
+ },
117
+ {
118
+ "epoch": 0.06989951944080385,
119
+ "grad_norm": 2.6936633586883545,
120
+ "learning_rate": 0.00019992807441656898,
121
+ "loss": 0.5321,
122
+ "step": 80
123
+ },
124
+ {
125
+ "epoch": 0.07426823940585409,
126
+ "grad_norm": 3.9897687435150146,
127
+ "learning_rate": 0.00019991727422742362,
128
+ "loss": 0.6025,
129
+ "step": 85
130
+ },
131
+ {
132
+ "epoch": 0.07863695937090433,
133
+ "grad_norm": 2.3496663570404053,
134
+ "learning_rate": 0.00019990571921099553,
135
+ "loss": 0.5975,
136
+ "step": 90
137
+ },
138
+ {
139
+ "epoch": 0.08300567933595457,
140
+ "grad_norm": 3.3796467781066895,
141
+ "learning_rate": 0.0001998934094545774,
142
+ "loss": 0.5255,
143
+ "step": 95
144
+ },
145
+ {
146
+ "epoch": 0.08737439930100481,
147
+ "grad_norm": 3.1103007793426514,
148
+ "learning_rate": 0.00019988034505116352,
149
+ "loss": 0.4946,
150
+ "step": 100
151
+ },
152
+ {
153
+ "epoch": 0.09174311926605505,
154
+ "grad_norm": 2.002304792404175,
155
+ "learning_rate": 0.00019986652609944926,
156
+ "loss": 0.425,
157
+ "step": 105
158
+ },
159
+ {
160
+ "epoch": 0.09611183923110529,
161
+ "grad_norm": 1.7572168111801147,
162
+ "learning_rate": 0.00019985195270383018,
163
+ "loss": 0.6073,
164
+ "step": 110
165
+ },
166
+ {
167
+ "epoch": 0.10048055919615553,
168
+ "grad_norm": 2.745215654373169,
169
+ "learning_rate": 0.00019983662497440133,
170
+ "loss": 0.586,
171
+ "step": 115
172
+ },
173
+ {
174
+ "epoch": 0.10484927916120576,
175
+ "grad_norm": 1.8170915842056274,
176
+ "learning_rate": 0.0001998205430269564,
177
+ "loss": 0.5255,
178
+ "step": 120
179
+ },
180
+ {
181
+ "epoch": 0.109217999126256,
182
+ "grad_norm": 1.4944056272506714,
183
+ "learning_rate": 0.00019980370698298677,
184
+ "loss": 0.4219,
185
+ "step": 125
186
+ },
187
+ {
188
+ "epoch": 0.11358671909130624,
189
+ "grad_norm": 1.6616989374160767,
190
+ "learning_rate": 0.00019978611696968074,
191
+ "loss": 0.4231,
192
+ "step": 130
193
+ },
194
+ {
195
+ "epoch": 0.11795543905635648,
196
+ "grad_norm": 2.0523645877838135,
197
+ "learning_rate": 0.00019976777311992247,
198
+ "loss": 0.5298,
199
+ "step": 135
200
+ },
201
+ {
202
+ "epoch": 0.12232415902140673,
203
+ "grad_norm": 2.065765619277954,
204
+ "learning_rate": 0.00019974867557229098,
205
+ "loss": 0.5228,
206
+ "step": 140
207
+ },
208
+ {
209
+ "epoch": 0.12669287898645698,
210
+ "grad_norm": 1.7283438444137573,
211
+ "learning_rate": 0.00019972882447105912,
212
+ "loss": 0.3452,
213
+ "step": 145
214
+ },
215
+ {
216
+ "epoch": 0.1310615989515072,
217
+ "grad_norm": 2.655750274658203,
218
+ "learning_rate": 0.00019970821996619244,
219
+ "loss": 0.508,
220
+ "step": 150
221
+ },
222
+ {
223
+ "epoch": 0.13543031891655744,
224
+ "grad_norm": 2.67799973487854,
225
+ "learning_rate": 0.0001996868622133482,
226
+ "loss": 0.4359,
227
+ "step": 155
228
+ },
229
+ {
230
+ "epoch": 0.1397990388816077,
231
+ "grad_norm": 1.6298809051513672,
232
+ "learning_rate": 0.00019966475137387396,
233
+ "loss": 0.5447,
234
+ "step": 160
235
+ },
236
+ {
237
+ "epoch": 0.14416775884665792,
238
+ "grad_norm": 1.4772286415100098,
239
+ "learning_rate": 0.00019964188761480657,
240
+ "loss": 0.4105,
241
+ "step": 165
242
+ },
243
+ {
244
+ "epoch": 0.14853647881170817,
245
+ "grad_norm": 2.2986271381378174,
246
+ "learning_rate": 0.00019961827110887083,
247
+ "loss": 0.603,
248
+ "step": 170
249
+ },
250
+ {
251
+ "epoch": 0.1529051987767584,
252
+ "grad_norm": 2.8261911869049072,
253
+ "learning_rate": 0.00019959390203447817,
254
+ "loss": 0.4649,
255
+ "step": 175
256
+ },
257
+ {
258
+ "epoch": 0.15727391874180865,
259
+ "grad_norm": 1.7771011590957642,
260
+ "learning_rate": 0.00019956878057572524,
261
+ "loss": 0.4394,
262
+ "step": 180
263
+ },
264
+ {
265
+ "epoch": 0.16164263870685888,
266
+ "grad_norm": 1.7315421104431152,
267
+ "learning_rate": 0.00019954290692239274,
268
+ "loss": 0.5289,
269
+ "step": 185
270
+ },
271
+ {
272
+ "epoch": 0.16601135867190914,
273
+ "grad_norm": 1.6124423742294312,
274
+ "learning_rate": 0.00019951628126994373,
275
+ "loss": 0.4173,
276
+ "step": 190
277
+ },
278
+ {
279
+ "epoch": 0.17038007863695936,
280
+ "grad_norm": 1.792577862739563,
281
+ "learning_rate": 0.00019948890381952232,
282
+ "loss": 0.4331,
283
+ "step": 195
284
+ },
285
+ {
286
+ "epoch": 0.17474879860200962,
287
+ "grad_norm": 1.9038774967193604,
288
+ "learning_rate": 0.000199460774777952,
289
+ "loss": 0.4247,
290
+ "step": 200
291
+ },
292
+ {
293
+ "epoch": 0.17911751856705985,
294
+ "grad_norm": 2.457122802734375,
295
+ "learning_rate": 0.00019943189435773432,
296
+ "loss": 0.4519,
297
+ "step": 205
298
+ },
299
+ {
300
+ "epoch": 0.1834862385321101,
301
+ "grad_norm": 1.97683584690094,
302
+ "learning_rate": 0.00019940226277704706,
303
+ "loss": 0.4761,
304
+ "step": 210
305
+ },
306
+ {
307
+ "epoch": 0.18785495849716033,
308
+ "grad_norm": 2.1646862030029297,
309
+ "learning_rate": 0.0001993718802597426,
310
+ "loss": 0.5294,
311
+ "step": 215
312
+ },
313
+ {
314
+ "epoch": 0.19222367846221058,
315
+ "grad_norm": 1.565412998199463,
316
+ "learning_rate": 0.00019934074703534637,
317
+ "loss": 0.3999,
318
+ "step": 220
319
+ },
320
+ {
321
+ "epoch": 0.1965923984272608,
322
+ "grad_norm": 2.4315876960754395,
323
+ "learning_rate": 0.00019930886333905504,
324
+ "loss": 0.378,
325
+ "step": 225
326
+ },
327
+ {
328
+ "epoch": 0.20096111839231107,
329
+ "grad_norm": 2.7567529678344727,
330
+ "learning_rate": 0.00019927622941173467,
331
+ "loss": 0.5075,
332
+ "step": 230
333
+ },
334
+ {
335
+ "epoch": 0.2053298383573613,
336
+ "grad_norm": 1.8640387058258057,
337
+ "learning_rate": 0.00019924284549991902,
338
+ "loss": 0.4749,
339
+ "step": 235
340
+ },
341
+ {
342
+ "epoch": 0.20969855832241152,
343
+ "grad_norm": 2.090924024581909,
344
+ "learning_rate": 0.00019920871185580757,
345
+ "loss": 0.4353,
346
+ "step": 240
347
+ },
348
+ {
349
+ "epoch": 0.21406727828746178,
350
+ "grad_norm": 1.9691081047058105,
351
+ "learning_rate": 0.00019917382873726376,
352
+ "loss": 0.4051,
353
+ "step": 245
354
+ },
355
+ {
356
+ "epoch": 0.218435998252512,
357
+ "grad_norm": 1.8130213022232056,
358
+ "learning_rate": 0.0001991381964078128,
359
+ "loss": 0.526,
360
+ "step": 250
361
+ },
362
+ {
363
+ "epoch": 0.22280471821756226,
364
+ "grad_norm": 2.078805923461914,
365
+ "learning_rate": 0.00019910181513664,
366
+ "loss": 0.5654,
367
+ "step": 255
368
+ },
369
+ {
370
+ "epoch": 0.22717343818261249,
371
+ "grad_norm": 2.0686287879943848,
372
+ "learning_rate": 0.0001990646851985884,
373
+ "loss": 0.43,
374
+ "step": 260
375
+ },
376
+ {
377
+ "epoch": 0.23154215814766274,
378
+ "grad_norm": 1.475821614265442,
379
+ "learning_rate": 0.00019902680687415705,
380
+ "loss": 0.355,
381
+ "step": 265
382
+ },
383
+ {
384
+ "epoch": 0.23591087811271297,
385
+ "grad_norm": 1.901236891746521,
386
+ "learning_rate": 0.0001989881804494985,
387
+ "loss": 0.4522,
388
+ "step": 270
389
+ },
390
+ {
391
+ "epoch": 0.24027959807776322,
392
+ "grad_norm": 1.2583553791046143,
393
+ "learning_rate": 0.00019894880621641704,
394
+ "loss": 0.3869,
395
+ "step": 275
396
+ },
397
+ {
398
+ "epoch": 0.24464831804281345,
399
+ "grad_norm": 1.712336540222168,
400
+ "learning_rate": 0.00019890868447236613,
401
+ "loss": 0.454,
402
+ "step": 280
403
+ },
404
+ {
405
+ "epoch": 0.2490170380078637,
406
+ "grad_norm": 2.3967206478118896,
407
+ "learning_rate": 0.00019886781552044634,
408
+ "loss": 0.4074,
409
+ "step": 285
410
+ },
411
+ {
412
+ "epoch": 0.25338575797291396,
413
+ "grad_norm": 2.0578925609588623,
414
+ "learning_rate": 0.0001988261996694032,
415
+ "loss": 0.4268,
416
+ "step": 290
417
+ },
418
+ {
419
+ "epoch": 0.2577544779379642,
420
+ "grad_norm": 1.7411088943481445,
421
+ "learning_rate": 0.0001987838372336245,
422
+ "loss": 0.334,
423
+ "step": 295
424
+ },
425
+ {
426
+ "epoch": 0.2621231979030144,
427
+ "grad_norm": 1.8145533800125122,
428
+ "learning_rate": 0.0001987407285331382,
429
+ "loss": 0.4019,
430
+ "step": 300
431
+ },
432
+ {
433
+ "epoch": 0.26649191786806464,
434
+ "grad_norm": 1.3501653671264648,
435
+ "learning_rate": 0.00019869687389361,
436
+ "loss": 0.32,
437
+ "step": 305
438
+ },
439
+ {
440
+ "epoch": 0.27086063783311487,
441
+ "grad_norm": 1.208422303199768,
442
+ "learning_rate": 0.00019865227364634073,
443
+ "loss": 0.4548,
444
+ "step": 310
445
+ },
446
+ {
447
+ "epoch": 0.27522935779816515,
448
+ "grad_norm": 1.521690011024475,
449
+ "learning_rate": 0.00019860692812826396,
450
+ "loss": 0.3572,
451
+ "step": 315
452
+ },
453
+ {
454
+ "epoch": 0.2795980777632154,
455
+ "grad_norm": 2.2849714756011963,
456
+ "learning_rate": 0.0001985608376819434,
457
+ "loss": 0.4555,
458
+ "step": 320
459
+ },
460
+ {
461
+ "epoch": 0.2839667977282656,
462
+ "grad_norm": 2.7733798027038574,
463
+ "learning_rate": 0.00019851400265557037,
464
+ "loss": 0.4726,
465
+ "step": 325
466
+ },
467
+ {
468
+ "epoch": 0.28833551769331583,
469
+ "grad_norm": 1.973522424697876,
470
+ "learning_rate": 0.00019846642340296114,
471
+ "loss": 0.4585,
472
+ "step": 330
473
+ },
474
+ {
475
+ "epoch": 0.2927042376583661,
476
+ "grad_norm": 1.7133642435073853,
477
+ "learning_rate": 0.0001984181002835542,
478
+ "loss": 0.4679,
479
+ "step": 335
480
+ },
481
+ {
482
+ "epoch": 0.29707295762341634,
483
+ "grad_norm": 2.8383235931396484,
484
+ "learning_rate": 0.00019836903366240768,
485
+ "loss": 0.4119,
486
+ "step": 340
487
+ },
488
+ {
489
+ "epoch": 0.30144167758846657,
490
+ "grad_norm": 2.798276901245117,
491
+ "learning_rate": 0.00019831922391019645,
492
+ "loss": 0.3665,
493
+ "step": 345
494
+ },
495
+ {
496
+ "epoch": 0.3058103975535168,
497
+ "grad_norm": 2.171276569366455,
498
+ "learning_rate": 0.00019826867140320938,
499
+ "loss": 0.5691,
500
+ "step": 350
501
+ },
502
+ {
503
+ "epoch": 0.3101791175185671,
504
+ "grad_norm": 2.0866177082061768,
505
+ "learning_rate": 0.00019821737652334653,
506
+ "loss": 0.4074,
507
+ "step": 355
508
+ },
509
+ {
510
+ "epoch": 0.3145478374836173,
511
+ "grad_norm": 1.3713918924331665,
512
+ "learning_rate": 0.0001981653396581162,
513
+ "loss": 0.3379,
514
+ "step": 360
515
+ },
516
+ {
517
+ "epoch": 0.31891655744866754,
518
+ "grad_norm": 1.6086684465408325,
519
+ "learning_rate": 0.0001981125612006321,
520
+ "loss": 0.3563,
521
+ "step": 365
522
+ },
523
+ {
524
+ "epoch": 0.32328527741371776,
525
+ "grad_norm": 2.655686378479004,
526
+ "learning_rate": 0.0001980590415496102,
527
+ "loss": 0.3988,
528
+ "step": 370
529
+ },
530
+ {
531
+ "epoch": 0.32765399737876805,
532
+ "grad_norm": 1.5271559953689575,
533
+ "learning_rate": 0.00019800478110936596,
534
+ "loss": 0.5784,
535
+ "step": 375
536
+ },
537
+ {
538
+ "epoch": 0.3320227173438183,
539
+ "grad_norm": 1.3043195009231567,
540
+ "learning_rate": 0.00019794978028981106,
541
+ "loss": 0.2637,
542
+ "step": 380
543
+ },
544
+ {
545
+ "epoch": 0.3363914373088685,
546
+ "grad_norm": 2.539109706878662,
547
+ "learning_rate": 0.0001978940395064504,
548
+ "loss": 0.4658,
549
+ "step": 385
550
+ },
551
+ {
552
+ "epoch": 0.34076015727391873,
553
+ "grad_norm": 1.7521268129348755,
554
+ "learning_rate": 0.00019783755918037903,
555
+ "loss": 0.4253,
556
+ "step": 390
557
+ },
558
+ {
559
+ "epoch": 0.34512887723896896,
560
+ "grad_norm": 1.5679692029953003,
561
+ "learning_rate": 0.00019778033973827882,
562
+ "loss": 0.4528,
563
+ "step": 395
564
+ },
565
+ {
566
+ "epoch": 0.34949759720401924,
567
+ "grad_norm": 1.670640468597412,
568
+ "learning_rate": 0.00019772238161241528,
569
+ "loss": 0.3724,
570
+ "step": 400
571
+ },
572
+ {
573
+ "epoch": 0.35386631716906947,
574
+ "grad_norm": 1.520856261253357,
575
+ "learning_rate": 0.00019766368524063438,
576
+ "loss": 0.4141,
577
+ "step": 405
578
+ },
579
+ {
580
+ "epoch": 0.3582350371341197,
581
+ "grad_norm": 1.0802158117294312,
582
+ "learning_rate": 0.00019760425106635926,
583
+ "loss": 0.3268,
584
+ "step": 410
585
+ },
586
+ {
587
+ "epoch": 0.3626037570991699,
588
+ "grad_norm": 1.7306379079818726,
589
+ "learning_rate": 0.0001975440795385866,
590
+ "loss": 0.3654,
591
+ "step": 415
592
+ },
593
+ {
594
+ "epoch": 0.3669724770642202,
595
+ "grad_norm": 1.5037274360656738,
596
+ "learning_rate": 0.0001974831711118836,
597
+ "loss": 0.4285,
598
+ "step": 420
599
+ },
600
+ {
601
+ "epoch": 0.37134119702927043,
602
+ "grad_norm": 1.4654844999313354,
603
+ "learning_rate": 0.00019742152624638437,
604
+ "loss": 0.2548,
605
+ "step": 425
606
+ },
607
+ {
608
+ "epoch": 0.37570991699432066,
609
+ "grad_norm": 2.6770753860473633,
610
+ "learning_rate": 0.00019735914540778638,
611
+ "loss": 0.4238,
612
+ "step": 430
613
+ },
614
+ {
615
+ "epoch": 0.3800786369593709,
616
+ "grad_norm": 1.1864055395126343,
617
+ "learning_rate": 0.00019729602906734704,
618
+ "loss": 0.3959,
619
+ "step": 435
620
+ },
621
+ {
622
+ "epoch": 0.38444735692442117,
623
+ "grad_norm": 1.904876708984375,
624
+ "learning_rate": 0.00019723217770188024,
625
+ "loss": 0.3603,
626
+ "step": 440
627
+ },
628
+ {
629
+ "epoch": 0.3888160768894714,
630
+ "grad_norm": 1.7086598873138428,
631
+ "learning_rate": 0.0001971675917937525,
632
+ "loss": 0.551,
633
+ "step": 445
634
+ },
635
+ {
636
+ "epoch": 0.3931847968545216,
637
+ "grad_norm": 1.4635995626449585,
638
+ "learning_rate": 0.00019710227183087947,
639
+ "loss": 0.3738,
640
+ "step": 450
641
+ },
642
+ {
643
+ "epoch": 0.39755351681957185,
644
+ "grad_norm": 1.6047295331954956,
645
+ "learning_rate": 0.00019703621830672238,
646
+ "loss": 0.475,
647
+ "step": 455
648
+ },
649
+ {
650
+ "epoch": 0.40192223678462213,
651
+ "grad_norm": 1.4741933345794678,
652
+ "learning_rate": 0.00019696943172028394,
653
+ "loss": 0.4021,
654
+ "step": 460
655
+ },
656
+ {
657
+ "epoch": 0.40629095674967236,
658
+ "grad_norm": 2.8138020038604736,
659
+ "learning_rate": 0.00019690191257610497,
660
+ "loss": 0.3665,
661
+ "step": 465
662
+ },
663
+ {
664
+ "epoch": 0.4106596767147226,
665
+ "grad_norm": 1.6264874935150146,
666
+ "learning_rate": 0.00019683366138426034,
667
+ "loss": 0.3598,
668
+ "step": 470
669
+ },
670
+ {
671
+ "epoch": 0.4150283966797728,
672
+ "grad_norm": 1.6185061931610107,
673
+ "learning_rate": 0.00019676467866035525,
674
+ "loss": 0.5003,
675
+ "step": 475
676
+ },
677
+ {
678
+ "epoch": 0.41939711664482304,
679
+ "grad_norm": 1.8654040098190308,
680
+ "learning_rate": 0.00019669496492552113,
681
+ "loss": 0.397,
682
+ "step": 480
683
+ },
684
+ {
685
+ "epoch": 0.4237658366098733,
686
+ "grad_norm": 1.2525237798690796,
687
+ "learning_rate": 0.00019662452070641205,
688
+ "loss": 0.3235,
689
+ "step": 485
690
+ },
691
+ {
692
+ "epoch": 0.42813455657492355,
693
+ "grad_norm": 1.7755401134490967,
694
+ "learning_rate": 0.00019655334653520036,
695
+ "loss": 0.2978,
696
+ "step": 490
697
+ },
698
+ {
699
+ "epoch": 0.4325032765399738,
700
+ "grad_norm": 1.6025470495224,
701
+ "learning_rate": 0.00019648144294957297,
702
+ "loss": 0.4436,
703
+ "step": 495
704
+ },
705
+ {
706
+ "epoch": 0.436871996505024,
707
+ "grad_norm": 1.085461974143982,
708
+ "learning_rate": 0.00019640881049272713,
709
+ "loss": 0.22,
710
+ "step": 500
711
+ },
712
+ {
713
+ "epoch": 0.4412407164700743,
714
+ "grad_norm": 1.491818904876709,
715
+ "learning_rate": 0.00019633544971336636,
716
+ "loss": 0.2714,
717
+ "step": 505
718
+ },
719
+ {
720
+ "epoch": 0.4456094364351245,
721
+ "grad_norm": 0.9479840993881226,
722
+ "learning_rate": 0.0001962613611656963,
723
+ "loss": 0.3735,
724
+ "step": 510
725
+ },
726
+ {
727
+ "epoch": 0.44997815640017474,
728
+ "grad_norm": 3.0529448986053467,
729
+ "learning_rate": 0.0001961865454094205,
730
+ "loss": 0.4779,
731
+ "step": 515
732
+ },
733
+ {
734
+ "epoch": 0.45434687636522497,
735
+ "grad_norm": 2.831089973449707,
736
+ "learning_rate": 0.00019611100300973635,
737
+ "loss": 0.469,
738
+ "step": 520
739
+ },
740
+ {
741
+ "epoch": 0.45871559633027525,
742
+ "grad_norm": 2.1834311485290527,
743
+ "learning_rate": 0.00019603473453733052,
744
+ "loss": 0.4163,
745
+ "step": 525
746
+ },
747
+ {
748
+ "epoch": 0.4630843162953255,
749
+ "grad_norm": 1.3152204751968384,
750
+ "learning_rate": 0.00019595774056837493,
751
+ "loss": 0.3744,
752
+ "step": 530
753
+ },
754
+ {
755
+ "epoch": 0.4674530362603757,
756
+ "grad_norm": 1.4493387937545776,
757
+ "learning_rate": 0.00019588002168452223,
758
+ "loss": 0.3117,
759
+ "step": 535
760
+ },
761
+ {
762
+ "epoch": 0.47182175622542594,
763
+ "grad_norm": 1.1412076950073242,
764
+ "learning_rate": 0.00019580157847290147,
765
+ "loss": 0.3152,
766
+ "step": 540
767
+ },
768
+ {
769
+ "epoch": 0.47619047619047616,
770
+ "grad_norm": 1.5004645586013794,
771
+ "learning_rate": 0.00019572241152611365,
772
+ "loss": 0.3271,
773
+ "step": 545
774
+ },
775
+ {
776
+ "epoch": 0.48055919615552645,
777
+ "grad_norm": 2.3333992958068848,
778
+ "learning_rate": 0.0001956425214422272,
779
+ "loss": 0.3626,
780
+ "step": 550
781
+ },
782
+ {
783
+ "epoch": 0.4849279161205767,
784
+ "grad_norm": 1.5423107147216797,
785
+ "learning_rate": 0.0001955619088247736,
786
+ "loss": 0.4588,
787
+ "step": 555
788
+ },
789
+ {
790
+ "epoch": 0.4892966360856269,
791
+ "grad_norm": 3.008280038833618,
792
+ "learning_rate": 0.00019548057428274266,
793
+ "loss": 0.5275,
794
+ "step": 560
795
+ },
796
+ {
797
+ "epoch": 0.49366535605067713,
798
+ "grad_norm": 1.0968583822250366,
799
+ "learning_rate": 0.00019539851843057798,
800
+ "loss": 0.3233,
801
+ "step": 565
802
+ },
803
+ {
804
+ "epoch": 0.4980340760157274,
805
+ "grad_norm": 1.265228271484375,
806
+ "learning_rate": 0.00019531574188817234,
807
+ "loss": 0.2743,
808
+ "step": 570
809
+ },
810
+ {
811
+ "epoch": 0.5024027959807776,
812
+ "grad_norm": 1.9382916688919067,
813
+ "learning_rate": 0.000195232245280863,
814
+ "loss": 0.3189,
815
+ "step": 575
816
+ },
817
+ {
818
+ "epoch": 0.5067715159458279,
819
+ "grad_norm": 1.6710058450698853,
820
+ "learning_rate": 0.00019514802923942687,
821
+ "loss": 0.345,
822
+ "step": 580
823
+ },
824
+ {
825
+ "epoch": 0.5111402359108781,
826
+ "grad_norm": 1.8377633094787598,
827
+ "learning_rate": 0.000195063094400076,
828
+ "loss": 0.4441,
829
+ "step": 585
830
+ },
831
+ {
832
+ "epoch": 0.5155089558759284,
833
+ "grad_norm": 1.432173728942871,
834
+ "learning_rate": 0.0001949774414044525,
835
+ "loss": 0.3277,
836
+ "step": 590
837
+ },
838
+ {
839
+ "epoch": 0.5198776758409785,
840
+ "grad_norm": 1.096330165863037,
841
+ "learning_rate": 0.0001948910708996239,
842
+ "loss": 0.3821,
843
+ "step": 595
844
+ },
845
+ {
846
+ "epoch": 0.5242463958060288,
847
+ "grad_norm": 1.1951391696929932,
848
+ "learning_rate": 0.00019480398353807798,
849
+ "loss": 0.4303,
850
+ "step": 600
851
+ },
852
+ {
853
+ "epoch": 0.5286151157710791,
854
+ "grad_norm": 0.9764880537986755,
855
+ "learning_rate": 0.0001947161799777183,
856
+ "loss": 0.2693,
857
+ "step": 605
858
+ },
859
+ {
860
+ "epoch": 0.5329838357361293,
861
+ "grad_norm": 1.2566354274749756,
862
+ "learning_rate": 0.00019462766088185874,
863
+ "loss": 0.2851,
864
+ "step": 610
865
+ },
866
+ {
867
+ "epoch": 0.5373525557011796,
868
+ "grad_norm": 1.494903802871704,
869
+ "learning_rate": 0.0001945384269192188,
870
+ "loss": 0.36,
871
+ "step": 615
872
+ },
873
+ {
874
+ "epoch": 0.5417212756662297,
875
+ "grad_norm": 1.5508995056152344,
876
+ "learning_rate": 0.00019444847876391844,
877
+ "loss": 0.3682,
878
+ "step": 620
879
+ },
880
+ {
881
+ "epoch": 0.54608999563128,
882
+ "grad_norm": 2.227889060974121,
883
+ "learning_rate": 0.00019435781709547305,
884
+ "loss": 0.3889,
885
+ "step": 625
886
+ },
887
+ {
888
+ "epoch": 0.5504587155963303,
889
+ "grad_norm": 0.9221494197845459,
890
+ "learning_rate": 0.0001942664425987882,
891
+ "loss": 0.3375,
892
+ "step": 630
893
+ },
894
+ {
895
+ "epoch": 0.5548274355613805,
896
+ "grad_norm": 1.3386973142623901,
897
+ "learning_rate": 0.00019417435596415458,
898
+ "loss": 0.4833,
899
+ "step": 635
900
+ },
901
+ {
902
+ "epoch": 0.5591961555264308,
903
+ "grad_norm": 1.9686752557754517,
904
+ "learning_rate": 0.00019408155788724272,
905
+ "loss": 0.4739,
906
+ "step": 640
907
+ },
908
+ {
909
+ "epoch": 0.563564875491481,
910
+ "grad_norm": 2.3978073596954346,
911
+ "learning_rate": 0.00019398804906909777,
912
+ "loss": 0.4681,
913
+ "step": 645
914
+ },
915
+ {
916
+ "epoch": 0.5679335954565312,
917
+ "grad_norm": 1.536699652671814,
918
+ "learning_rate": 0.0001938938302161342,
919
+ "loss": 0.2684,
920
+ "step": 650
921
+ },
922
+ {
923
+ "epoch": 0.5723023154215815,
924
+ "grad_norm": 1.691787600517273,
925
+ "learning_rate": 0.00019379890204013043,
926
+ "loss": 0.3512,
927
+ "step": 655
928
+ },
929
+ {
930
+ "epoch": 0.5766710353866317,
931
+ "grad_norm": 1.7557870149612427,
932
+ "learning_rate": 0.0001937032652582235,
933
+ "loss": 0.3423,
934
+ "step": 660
935
+ },
936
+ {
937
+ "epoch": 0.581039755351682,
938
+ "grad_norm": 1.7950220108032227,
939
+ "learning_rate": 0.0001936069205929036,
940
+ "loss": 0.2831,
941
+ "step": 665
942
+ },
943
+ {
944
+ "epoch": 0.5854084753167322,
945
+ "grad_norm": 1.928232192993164,
946
+ "learning_rate": 0.00019350986877200867,
947
+ "loss": 0.323,
948
+ "step": 670
949
+ },
950
+ {
951
+ "epoch": 0.5897771952817824,
952
+ "grad_norm": 1.86429762840271,
953
+ "learning_rate": 0.00019341211052871887,
954
+ "loss": 0.4248,
955
+ "step": 675
956
+ },
957
+ {
958
+ "epoch": 0.5941459152468327,
959
+ "grad_norm": 2.022738456726074,
960
+ "learning_rate": 0.00019331364660155103,
961
+ "loss": 0.3411,
962
+ "step": 680
963
+ },
964
+ {
965
+ "epoch": 0.598514635211883,
966
+ "grad_norm": 1.2337995767593384,
967
+ "learning_rate": 0.00019321447773435306,
968
+ "loss": 0.2368,
969
+ "step": 685
970
+ },
971
+ {
972
+ "epoch": 0.6028833551769331,
973
+ "grad_norm": 2.015075445175171,
974
+ "learning_rate": 0.00019311460467629843,
975
+ "loss": 0.5116,
976
+ "step": 690
977
+ },
978
+ {
979
+ "epoch": 0.6072520751419834,
980
+ "grad_norm": 1.2344030141830444,
981
+ "learning_rate": 0.00019301402818188036,
982
+ "loss": 0.3313,
983
+ "step": 695
984
+ },
985
+ {
986
+ "epoch": 0.6116207951070336,
987
+ "grad_norm": 1.129764437675476,
988
+ "learning_rate": 0.00019291274901090625,
989
+ "loss": 0.408,
990
+ "step": 700
991
+ },
992
+ {
993
+ "epoch": 0.6159895150720839,
994
+ "grad_norm": 1.4350385665893555,
995
+ "learning_rate": 0.00019281076792849184,
996
+ "loss": 0.3729,
997
+ "step": 705
998
+ },
999
+ {
1000
+ "epoch": 0.6203582350371342,
1001
+ "grad_norm": 1.9586119651794434,
1002
+ "learning_rate": 0.00019270808570505553,
1003
+ "loss": 0.4315,
1004
+ "step": 710
1005
+ },
1006
+ {
1007
+ "epoch": 0.6247269550021843,
1008
+ "grad_norm": 1.0157238245010376,
1009
+ "learning_rate": 0.00019260470311631243,
1010
+ "loss": 0.2861,
1011
+ "step": 715
1012
+ },
1013
+ {
1014
+ "epoch": 0.6290956749672346,
1015
+ "grad_norm": 1.3841652870178223,
1016
+ "learning_rate": 0.00019250062094326864,
1017
+ "loss": 0.4037,
1018
+ "step": 720
1019
+ },
1020
+ {
1021
+ "epoch": 0.6334643949322848,
1022
+ "grad_norm": 1.848821997642517,
1023
+ "learning_rate": 0.00019239583997221525,
1024
+ "loss": 0.3665,
1025
+ "step": 725
1026
+ },
1027
+ {
1028
+ "epoch": 0.6378331148973351,
1029
+ "grad_norm": 0.9416481256484985,
1030
+ "learning_rate": 0.0001922903609947225,
1031
+ "loss": 0.339,
1032
+ "step": 730
1033
+ },
1034
+ {
1035
+ "epoch": 0.6422018348623854,
1036
+ "grad_norm": 1.0696804523468018,
1037
+ "learning_rate": 0.0001921841848076336,
1038
+ "loss": 0.2783,
1039
+ "step": 735
1040
+ },
1041
+ {
1042
+ "epoch": 0.6465705548274355,
1043
+ "grad_norm": 1.9199622869491577,
1044
+ "learning_rate": 0.00019207731221305903,
1045
+ "loss": 0.2904,
1046
+ "step": 740
1047
+ },
1048
+ {
1049
+ "epoch": 0.6509392747924858,
1050
+ "grad_norm": 1.347430944442749,
1051
+ "learning_rate": 0.00019196974401837008,
1052
+ "loss": 0.2719,
1053
+ "step": 745
1054
+ },
1055
+ {
1056
+ "epoch": 0.6553079947575361,
1057
+ "grad_norm": 0.9743670225143433,
1058
+ "learning_rate": 0.0001918614810361932,
1059
+ "loss": 0.2748,
1060
+ "step": 750
1061
+ },
1062
+ {
1063
+ "epoch": 0.6596767147225863,
1064
+ "grad_norm": 1.4043099880218506,
1065
+ "learning_rate": 0.00019175252408440343,
1066
+ "loss": 0.3285,
1067
+ "step": 755
1068
+ },
1069
+ {
1070
+ "epoch": 0.6640454346876365,
1071
+ "grad_norm": 2.9343338012695312,
1072
+ "learning_rate": 0.0001916428739861185,
1073
+ "loss": 0.4962,
1074
+ "step": 760
1075
+ },
1076
+ {
1077
+ "epoch": 0.6684141546526867,
1078
+ "grad_norm": 2.3201515674591064,
1079
+ "learning_rate": 0.0001915325315696926,
1080
+ "loss": 0.3243,
1081
+ "step": 765
1082
+ },
1083
+ {
1084
+ "epoch": 0.672782874617737,
1085
+ "grad_norm": 1.675564169883728,
1086
+ "learning_rate": 0.00019142149766870992,
1087
+ "loss": 0.4596,
1088
+ "step": 770
1089
+ },
1090
+ {
1091
+ "epoch": 0.6771515945827873,
1092
+ "grad_norm": 1.664604663848877,
1093
+ "learning_rate": 0.00019130977312197854,
1094
+ "loss": 0.3024,
1095
+ "step": 775
1096
+ },
1097
+ {
1098
+ "epoch": 0.6815203145478375,
1099
+ "grad_norm": 1.8358148336410522,
1100
+ "learning_rate": 0.00019119735877352412,
1101
+ "loss": 0.3862,
1102
+ "step": 780
1103
+ },
1104
+ {
1105
+ "epoch": 0.6858890345128877,
1106
+ "grad_norm": 1.3632128238677979,
1107
+ "learning_rate": 0.00019108425547258328,
1108
+ "loss": 0.2374,
1109
+ "step": 785
1110
+ },
1111
+ {
1112
+ "epoch": 0.6902577544779379,
1113
+ "grad_norm": 2.0279934406280518,
1114
+ "learning_rate": 0.0001909704640735975,
1115
+ "loss": 0.4392,
1116
+ "step": 790
1117
+ },
1118
+ {
1119
+ "epoch": 0.6946264744429882,
1120
+ "grad_norm": 1.2824902534484863,
1121
+ "learning_rate": 0.0001908559854362064,
1122
+ "loss": 0.2782,
1123
+ "step": 795
1124
+ },
1125
+ {
1126
+ "epoch": 0.6989951944080385,
1127
+ "grad_norm": 1.3477047681808472,
1128
+ "learning_rate": 0.00019074082042524145,
1129
+ "loss": 0.3631,
1130
+ "step": 800
1131
+ },
1132
+ {
1133
+ "epoch": 0.7033639143730887,
1134
+ "grad_norm": 1.8478046655654907,
1135
+ "learning_rate": 0.00019062496991071928,
1136
+ "loss": 0.3788,
1137
+ "step": 805
1138
+ },
1139
+ {
1140
+ "epoch": 0.7077326343381389,
1141
+ "grad_norm": 1.470382571220398,
1142
+ "learning_rate": 0.0001905084347678352,
1143
+ "loss": 0.3825,
1144
+ "step": 810
1145
+ },
1146
+ {
1147
+ "epoch": 0.7121013543031892,
1148
+ "grad_norm": 2.4951813220977783,
1149
+ "learning_rate": 0.00019039121587695652,
1150
+ "loss": 0.3359,
1151
+ "step": 815
1152
+ },
1153
+ {
1154
+ "epoch": 0.7164700742682394,
1155
+ "grad_norm": 2.3441359996795654,
1156
+ "learning_rate": 0.000190273314123616,
1157
+ "loss": 0.32,
1158
+ "step": 820
1159
+ },
1160
+ {
1161
+ "epoch": 0.7208387942332897,
1162
+ "grad_norm": 2.372884750366211,
1163
+ "learning_rate": 0.00019015473039850513,
1164
+ "loss": 0.3651,
1165
+ "step": 825
1166
+ },
1167
+ {
1168
+ "epoch": 0.7252075141983398,
1169
+ "grad_norm": 2.4474101066589355,
1170
+ "learning_rate": 0.0001900354655974672,
1171
+ "loss": 0.4401,
1172
+ "step": 830
1173
+ },
1174
+ {
1175
+ "epoch": 0.7295762341633901,
1176
+ "grad_norm": 1.4031054973602295,
1177
+ "learning_rate": 0.0001899155206214909,
1178
+ "loss": 0.308,
1179
+ "step": 835
1180
+ },
1181
+ {
1182
+ "epoch": 0.7339449541284404,
1183
+ "grad_norm": 1.6008141040802002,
1184
+ "learning_rate": 0.00018979489637670322,
1185
+ "loss": 0.2937,
1186
+ "step": 840
1187
+ },
1188
+ {
1189
+ "epoch": 0.7383136740934906,
1190
+ "grad_norm": 0.9202178120613098,
1191
+ "learning_rate": 0.0001896735937743627,
1192
+ "loss": 0.3157,
1193
+ "step": 845
1194
+ },
1195
+ {
1196
+ "epoch": 0.7426823940585409,
1197
+ "grad_norm": 1.024746298789978,
1198
+ "learning_rate": 0.00018955161373085253,
1199
+ "loss": 0.2934,
1200
+ "step": 850
1201
+ },
1202
+ {
1203
+ "epoch": 0.747051114023591,
1204
+ "grad_norm": 1.1573566198349,
1205
+ "learning_rate": 0.00018942895716767374,
1206
+ "loss": 0.3617,
1207
+ "step": 855
1208
+ },
1209
+ {
1210
+ "epoch": 0.7514198339886413,
1211
+ "grad_norm": 1.227409839630127,
1212
+ "learning_rate": 0.00018930562501143805,
1213
+ "loss": 0.3581,
1214
+ "step": 860
1215
+ },
1216
+ {
1217
+ "epoch": 0.7557885539536916,
1218
+ "grad_norm": 1.5460100173950195,
1219
+ "learning_rate": 0.00018918161819386095,
1220
+ "loss": 0.3393,
1221
+ "step": 865
1222
+ },
1223
+ {
1224
+ "epoch": 0.7601572739187418,
1225
+ "grad_norm": 1.688852310180664,
1226
+ "learning_rate": 0.0001890569376517548,
1227
+ "loss": 0.4389,
1228
+ "step": 870
1229
+ },
1230
+ {
1231
+ "epoch": 0.764525993883792,
1232
+ "grad_norm": 1.5271598100662231,
1233
+ "learning_rate": 0.00018893158432702149,
1234
+ "loss": 0.2915,
1235
+ "step": 875
1236
+ },
1237
+ {
1238
+ "epoch": 0.7688947138488423,
1239
+ "grad_norm": 1.695788860321045,
1240
+ "learning_rate": 0.00018880555916664555,
1241
+ "loss": 0.4026,
1242
+ "step": 880
1243
+ },
1244
+ {
1245
+ "epoch": 0.7732634338138925,
1246
+ "grad_norm": 1.6879792213439941,
1247
+ "learning_rate": 0.00018867886312268683,
1248
+ "loss": 0.2857,
1249
+ "step": 885
1250
+ },
1251
+ {
1252
+ "epoch": 0.7776321537789428,
1253
+ "grad_norm": 2.0718719959259033,
1254
+ "learning_rate": 0.00018855149715227344,
1255
+ "loss": 0.4236,
1256
+ "step": 890
1257
+ },
1258
+ {
1259
+ "epoch": 0.782000873743993,
1260
+ "grad_norm": 1.5112775564193726,
1261
+ "learning_rate": 0.00018842346221759448,
1262
+ "loss": 0.325,
1263
+ "step": 895
1264
+ },
1265
+ {
1266
+ "epoch": 0.7863695937090432,
1267
+ "grad_norm": 1.2844749689102173,
1268
+ "learning_rate": 0.00018829475928589271,
1269
+ "loss": 0.3782,
1270
+ "step": 900
1271
+ },
1272
+ {
1273
+ "epoch": 0.7907383136740935,
1274
+ "grad_norm": 2.150299072265625,
1275
+ "learning_rate": 0.00018816538932945728,
1276
+ "loss": 0.3726,
1277
+ "step": 905
1278
+ },
1279
+ {
1280
+ "epoch": 0.7951070336391437,
1281
+ "grad_norm": 1.7050650119781494,
1282
+ "learning_rate": 0.00018803535332561646,
1283
+ "loss": 0.3824,
1284
+ "step": 910
1285
+ },
1286
+ {
1287
+ "epoch": 0.799475753604194,
1288
+ "grad_norm": 1.8164982795715332,
1289
+ "learning_rate": 0.00018790465225673012,
1290
+ "loss": 0.3664,
1291
+ "step": 915
1292
+ },
1293
+ {
1294
+ "epoch": 0.8038444735692443,
1295
+ "grad_norm": 1.1102941036224365,
1296
+ "learning_rate": 0.00018777328711018244,
1297
+ "loss": 0.3166,
1298
+ "step": 920
1299
+ },
1300
+ {
1301
+ "epoch": 0.8082131935342944,
1302
+ "grad_norm": 1.4220764636993408,
1303
+ "learning_rate": 0.0001876412588783743,
1304
+ "loss": 0.3049,
1305
+ "step": 925
1306
+ },
1307
+ {
1308
+ "epoch": 0.8125819134993447,
1309
+ "grad_norm": 2.11336088180542,
1310
+ "learning_rate": 0.000187508568558716,
1311
+ "loss": 0.3076,
1312
+ "step": 930
1313
+ },
1314
+ {
1315
+ "epoch": 0.8169506334643949,
1316
+ "grad_norm": 1.9948710203170776,
1317
+ "learning_rate": 0.00018737521715361948,
1318
+ "loss": 0.3846,
1319
+ "step": 935
1320
+ },
1321
+ {
1322
+ "epoch": 0.8213193534294452,
1323
+ "grad_norm": 1.8913676738739014,
1324
+ "learning_rate": 0.00018724120567049094,
1325
+ "loss": 0.4296,
1326
+ "step": 940
1327
+ },
1328
+ {
1329
+ "epoch": 0.8256880733944955,
1330
+ "grad_norm": 1.3633447885513306,
1331
+ "learning_rate": 0.0001871065351217231,
1332
+ "loss": 0.3569,
1333
+ "step": 945
1334
+ },
1335
+ {
1336
+ "epoch": 0.8300567933595456,
1337
+ "grad_norm": 1.4957417249679565,
1338
+ "learning_rate": 0.00018697120652468762,
1339
+ "loss": 0.3085,
1340
+ "step": 950
1341
+ },
1342
+ {
1343
+ "epoch": 0.8344255133245959,
1344
+ "grad_norm": 2.076399803161621,
1345
+ "learning_rate": 0.0001868352209017275,
1346
+ "loss": 0.3331,
1347
+ "step": 955
1348
+ },
1349
+ {
1350
+ "epoch": 0.8387942332896461,
1351
+ "grad_norm": 1.1817855834960938,
1352
+ "learning_rate": 0.00018669857928014906,
1353
+ "loss": 0.3414,
1354
+ "step": 960
1355
+ },
1356
+ {
1357
+ "epoch": 0.8431629532546964,
1358
+ "grad_norm": 1.4255414009094238,
1359
+ "learning_rate": 0.00018656128269221454,
1360
+ "loss": 0.2782,
1361
+ "step": 965
1362
+ },
1363
+ {
1364
+ "epoch": 0.8475316732197467,
1365
+ "grad_norm": 1.326687216758728,
1366
+ "learning_rate": 0.0001864233321751341,
1367
+ "loss": 0.2998,
1368
+ "step": 970
1369
+ },
1370
+ {
1371
+ "epoch": 0.8519003931847968,
1372
+ "grad_norm": 2.222280263900757,
1373
+ "learning_rate": 0.00018628472877105793,
1374
+ "loss": 0.3348,
1375
+ "step": 975
1376
+ },
1377
+ {
1378
+ "epoch": 0.8562691131498471,
1379
+ "grad_norm": 1.518401026725769,
1380
+ "learning_rate": 0.00018614547352706863,
1381
+ "loss": 0.3816,
1382
+ "step": 980
1383
+ },
1384
+ {
1385
+ "epoch": 0.8606378331148974,
1386
+ "grad_norm": 1.1030207872390747,
1387
+ "learning_rate": 0.00018600556749517305,
1388
+ "loss": 0.3222,
1389
+ "step": 985
1390
+ },
1391
+ {
1392
+ "epoch": 0.8650065530799476,
1393
+ "grad_norm": 2.406994104385376,
1394
+ "learning_rate": 0.00018586501173229437,
1395
+ "loss": 0.3754,
1396
+ "step": 990
1397
+ },
1398
+ {
1399
+ "epoch": 0.8693752730449978,
1400
+ "grad_norm": 1.2401646375656128,
1401
+ "learning_rate": 0.00018572380730026434,
1402
+ "loss": 0.4402,
1403
+ "step": 995
1404
+ },
1405
+ {
1406
+ "epoch": 0.873743993010048,
1407
+ "grad_norm": 2.0233402252197266,
1408
+ "learning_rate": 0.0001855819552658149,
1409
+ "loss": 0.3323,
1410
+ "step": 1000
1411
+ }
1412
+ ],
1413
+ "logging_steps": 5,
1414
+ "max_steps": 5725,
1415
+ "num_input_tokens_seen": 0,
1416
+ "num_train_epochs": 5,
1417
+ "save_steps": 100,
1418
+ "stateful_callbacks": {
1419
+ "TrainerControl": {
1420
+ "args": {
1421
+ "should_epoch_stop": false,
1422
+ "should_evaluate": false,
1423
+ "should_log": false,
1424
+ "should_save": true,
1425
+ "should_training_stop": false
1426
+ },
1427
+ "attributes": {}
1428
+ }
1429
+ },
1430
+ "total_flos": 485735378608128.0,
1431
+ "train_batch_size": 4,
1432
+ "trial_name": null,
1433
+ "trial_params": null
1434
+ }
checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:629a35960f565450f5c9e65334fb9c14eb136182b8af4bc22c885bdde32de5f3
3
+ size 5777
checkpoint-1100/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: unsloth/functiongemma-270m-it
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:unsloth/functiongemma-270m-it
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.17.1
checkpoint-1100/adapter_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "unsloth/functiongemma-270m-it",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 128,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.05,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "qalora_group_size": 16,
24
+ "r": 64,
25
+ "rank_pattern": {},
26
+ "revision": null,
27
+ "target_modules": [
28
+ "gate_proj",
29
+ "down_proj",
30
+ "o_proj",
31
+ "k_proj",
32
+ "q_proj",
33
+ "v_proj",
34
+ "up_proj"
35
+ ],
36
+ "target_parameters": null,
37
+ "task_type": "CAUSAL_LM",
38
+ "trainable_token_indices": null,
39
+ "use_dora": false,
40
+ "use_qalora": false,
41
+ "use_rslora": false
42
+ }
checkpoint-1100/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfa7e5dcebe3d84bc2f13e835e8c7c167e2bd913f8acb028dace29615ac4a2ab
3
+ size 60785144
checkpoint-1100/added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<end_of_image>": 262145,
3
+ "<image_soft_token>": 262144
4
+ }
checkpoint-1100/chat_template.jinja ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- macro format_parameters(properties, required) -%}
2
+ {%- set standard_keys = ['description', 'type', 'properties', 'required', 'nullable'] -%}
3
+ {%- set ns = namespace(found_first=false) -%}
4
+ {%- for key, value in properties | dictsort -%}
5
+ {%- if key not in standard_keys -%}
6
+ {%- if ns.found_first %},{% endif -%}
7
+ {%- set ns.found_first = true -%}
8
+ {{- key }}:{description:<escape>{{ value['description'] }}<escape>
9
+ {%- if value['type'] | upper == 'STRING' -%}
10
+ {%- if value['enum'] -%}
11
+ ,enum:{{ format_argument(value['enum']) }}
12
+ {%- endif -%}
13
+ {%- elif value['type'] | upper == 'OBJECT' -%}
14
+ ,properties:{
15
+ {%- if value['properties'] is defined and value['properties'] is mapping -%}
16
+ {{- format_parameters(value['properties'], value['required'] | default([])) -}}
17
+ {%- elif value is mapping -%}
18
+ {{- format_parameters(value, value['required'] | default([])) -}}
19
+ {%- endif -%}
20
+ }
21
+ {%- if value['required'] -%}
22
+ ,required:[
23
+ {%- for item in value['required'] | default([]) -%}
24
+ <escape>{{- item -}}<escape>
25
+ {%- if not loop.last %},{% endif -%}
26
+ {%- endfor -%}
27
+ ]
28
+ {%- endif -%}
29
+ {%- elif value['type'] | upper == 'ARRAY' -%}
30
+ {%- if value['items'] is mapping and value['items'] -%}
31
+ ,items:{
32
+ {%- set ns_items = namespace(found_first=false) -%}
33
+ {%- for item_key, item_value in value['items'] | dictsort -%}
34
+ {%- if item_value is not none -%}
35
+ {%- if ns_items.found_first %},{% endif -%}
36
+ {%- set ns_items.found_first = true -%}
37
+ {%- if item_key == 'properties' -%}
38
+ properties:{
39
+ {%- if item_value is mapping -%}
40
+ {{- format_parameters(item_value, value['items']['required'] | default([])) -}}
41
+ {%- endif -%}
42
+ }
43
+ {%- elif item_key == 'required' -%}
44
+ required:[
45
+ {%- for req_item in item_value -%}
46
+ <escape>{{- req_item -}}<escape>
47
+ {%- if not loop.last %},{% endif -%}
48
+ {%- endfor -%}
49
+ ]
50
+ {%- elif item_key == 'type' -%}
51
+ {%- if item_value is string -%}
52
+ type:{{ format_argument(item_value | upper) }}
53
+ {%- else -%}
54
+ type:{{ format_argument(item_value | map('upper') | list) }}
55
+ {%- endif -%}
56
+ {%- else -%}
57
+ {{ item_key }}:{{ format_argument(item_value) }}
58
+ {%- endif -%}
59
+ {%- endif -%}
60
+ {%- endfor -%}
61
+ }
62
+ {%- endif -%}
63
+ {%- endif -%}
64
+ ,type:<escape>{{ value['type'] | upper }}<escape>}
65
+ {%- endif -%}
66
+ {%- endfor -%}
67
+ {%- endmacro -%}
68
+ {% macro format_function_declaration(tool_data) -%}
69
+ declaration:{{- tool_data['function']['name'] -}}
70
+ {description:<escape>{{- tool_data['function']['description'] -}}<escape>
71
+ {%- set params = tool_data['function']['parameters'] -%}
72
+ {%- if params -%}
73
+ ,parameters:{
74
+ {%- if params['properties'] -%}
75
+ properties:{ {{- format_parameters(params['properties'], params['required']) -}} },
76
+ {%- endif -%}
77
+ {%- if params['required'] -%}
78
+ required:[
79
+ {%- for item in params['required'] -%}
80
+ <escape>{{- item -}}<escape>
81
+ {{- ',' if not loop.last -}}
82
+ {%- endfor -%}
83
+ ],
84
+ {%- endif -%}
85
+ {%- if params['type'] -%}
86
+ type:<escape>{{- params['type'] | upper -}}<escape>}
87
+ {%- endif -%}
88
+ {%- endif -%}
89
+ }
90
+ {%- endmacro -%}
91
+ {% macro format_argument(argument, escape_keys=True) -%}
92
+ {%- if argument is string -%}
93
+ {{- '<escape>' + argument + '<escape>' -}}
94
+ {%- elif argument is boolean -%}
95
+ {%- if argument -%}
96
+ {{- 'true' -}}
97
+ {%- else -%}
98
+ {{- 'false' -}}
99
+ {%- endif -%}
100
+ {%- elif argument is mapping -%}
101
+ {{- '{' -}}
102
+ {%- set ns = namespace(found_first=false) -%}
103
+ {%- for key, value in argument | dictsort -%}
104
+ {%- if ns.found_first %},{% endif -%}
105
+ {%- set ns.found_first = true -%}
106
+ {%- if escape_keys -%}
107
+ {{- '<escape>' + key + '<escape>' -}}
108
+ {%- else -%}
109
+ {{- key -}}
110
+ {%- endif -%}
111
+ :{{- format_argument(value, escape_keys=escape_keys) -}}
112
+ {%- endfor -%}
113
+ {{- '}' -}}
114
+ {%- elif argument is sequence -%}
115
+ {{- '[' -}}
116
+ {%- for item in argument -%}
117
+ {{- format_argument(item, escape_keys=escape_keys) -}}
118
+ {%- if not loop.last %},{% endif -%}
119
+ {%- endfor -%}
120
+ {{- ']' -}}
121
+ {%- else -%}
122
+ {{- argument -}}
123
+ {%- endif -%}
124
+ {%- endmacro -%}
125
+ {{ bos_token }}
126
+ {%- set ns = namespace(prev_message_type=None) -%}
127
+ {#- Tool Declarations -#}
128
+ {%- set loop_messages = messages -%}
129
+ {%- if tools or messages[0]['role'] == 'system' or messages[0]['role'] == 'developer' -%}
130
+ {{- '<start_of_turn>developer\n' -}}
131
+ {%- if messages[0]['role'] == 'system' or messages[0]['role'] == 'developer' -%}
132
+ {%- if messages[0]['content'] is string -%}
133
+ {{- messages[0]['content'] | trim -}}
134
+ {%- elif messages[0]['content'] is sequence -%}
135
+ {%- for item in messages[0]['content'] -%}
136
+ {%- if item['type'] == 'text' -%}
137
+ {{- item['text'] | trim -}}
138
+ {%- endif -%}
139
+ {%- endfor -%}
140
+ {%- endif -%}
141
+ {%- set loop_messages = messages[1:] -%}
142
+ {%- else -%}
143
+ {{- 'You are a model that can do function calling with the following functions' -}}
144
+ {%- set loop_messages = messages -%}
145
+ {%- endif -%}
146
+ {%- if tools -%}
147
+ {%- for tool in tools %}
148
+ {{- '<start_function_declaration>' -}}
149
+ {{- format_function_declaration(tool) | trim }}
150
+ {{- '<end_function_declaration>' -}}
151
+ {%- endfor %}
152
+ {%- endif -%}
153
+ {{- '<end_of_turn>\n' }}
154
+ {%- endif %}
155
+ {#- Loop through messages. -#}
156
+ {%- for message in loop_messages -%}
157
+ {%- if (message['role'] == 'assistant') -%}
158
+ {#- Rename "assistant" to "model". -#}
159
+ {%- set role = "model" -%}
160
+ {%- else -%}
161
+ {%- set role = message['role'] -%}
162
+ {%- endif -%}
163
+ {%- if role != 'tool' -%}
164
+ {%- if ns.prev_message_type != 'tool_response' -%}
165
+ {{- '<start_of_turn>' + role + '\n' }}
166
+ {%- endif -%}
167
+ {%- set ns.prev_message_type = None -%}
168
+ {%- if 'content' in message and message['content'] is not none -%}
169
+ {%- if message['content'] is string -%}
170
+ {{ message['content'] | trim }}
171
+ {%- elif message['content'] is sequence -%}
172
+ {%- for item in message['content'] -%}
173
+ {%- if item['type'] == 'image' -%}
174
+ {{ '<start_of_image>' }}
175
+ {%- elif item['type'] == 'text' -%}
176
+ {{ item['text'] | trim }}
177
+ {%- endif -%}
178
+ {%- endfor -%}
179
+ {%- else -%}
180
+ {{ raise_exception("Invalid content type in user/assistant message") }}
181
+ {%- endif -%}
182
+ {%- set ns.prev_message_type = 'content' -%}
183
+ {%- endif -%}
184
+ {%- if 'tool_calls' in message and message['tool_calls'] and message['tool_calls'] is iterable -%}
185
+ {#- Tool Calls -#}
186
+ {%- for tool_call in message['tool_calls'] -%}
187
+ {% set function = tool_call['function'] %}
188
+ {{- '<start_function_call>call:' + function['name'] + '{' -}}
189
+ {%- if 'arguments' in function -%}
190
+ {%- if function['arguments'] is mapping -%}
191
+ {%- set ns = namespace(found_first=false) -%}
192
+ {%- for key, value in function['arguments'] | dictsort -%}
193
+ {%- if ns.found_first %},{% endif -%}
194
+ {%- set ns.found_first = true -%}
195
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
196
+ {%- endfor -%}
197
+ {%- elif function['arguments'] is string -%}
198
+ {# This handles string-JSON, just in case #}
199
+ {{ function['arguments'] }}
200
+ {%- endif %}
201
+ {%- endif -%}
202
+ {{- '}<end_function_call>' -}}
203
+ {%- endfor -%}
204
+ {%- if loop.last -%}
205
+ {{ '<start_function_response>' }}
206
+ {%- endif -%}
207
+ {%- set ns.prev_message_type = 'tool_call' -%}
208
+ {%- endif -%}
209
+ {%- else -%}
210
+ {#- Tool Responses -#}
211
+ {%- if 'content' in message and message['content'] -%}
212
+ {%- if message['content'] is mapping -%}
213
+ {%- if 'name' in message['content'] and 'response' in message['content'] -%}
214
+ {{ '<start_function_response>response:' + message['content']['name'] | trim + '{' }}
215
+ {%- set response_ns = namespace(found_first=false) -%}
216
+ {%- for key, value in message['content']['response'] | dictsort -%}
217
+ {%- if response_ns.found_first %},{% endif -%}
218
+ {%- set response_ns.found_first = true -%}
219
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
220
+ {%- endfor -%}
221
+ {{- '}<end_function_response>' -}}
222
+ {%- elif 'name' in message -%}
223
+ {{ '<start_function_response>response:' + message['name'] | trim + '{' }}
224
+ {%- set response_ns = namespace(found_first=false) -%}
225
+ {%- for key, value in message['content'] | dictsort -%}
226
+ {%- if response_ns.found_first %},{% endif -%}
227
+ {%- set response_ns.found_first = true -%}
228
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
229
+ {%- endfor -%}
230
+ {{- '}<end_function_response>' -}}
231
+ {%- else -%}
232
+ {{ raise_exception("Invalid tool response mapping: must contain 'name' and 'response' keys, or 'name' must be in the message.") }}
233
+ {%- endif -%}
234
+ {%- elif message['content'] is string -%}
235
+ {%- if 'name' in message -%}
236
+ {{ '<start_function_response>response:' + message['name'] | trim + '{value:' + format_argument(message['content'], escape_keys=False) + '}<end_function_response>' }}
237
+ {%- else -%}
238
+ {{ raise_exception("Invalid tool response: 'name' must be provided.") }}
239
+ {%- endif -%}
240
+ {%- elif message['content'] is sequence -%}
241
+ {%- for item in message['content'] -%}
242
+ {%- if item is mapping -%}
243
+ {%- if 'name' in item and 'response' in item -%}
244
+ {{ '<start_function_response>response:' + item['name'] | trim + '{' }}
245
+ {%- set response_ns = namespace(found_first=false) -%}
246
+ {%- for key, value in item['response'] | dictsort -%}
247
+ {%- if response_ns.found_first %},{% endif -%}
248
+ {%- set response_ns.found_first = true -%}
249
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
250
+ {%- endfor -%}
251
+ {{- '}<end_function_response>' -}}
252
+ {%- elif 'name' in message -%}
253
+ {{ '<start_function_response>response:' + message['name'] | trim + '{' }}
254
+ {%- set response_ns = namespace(found_first=false) -%}
255
+ {%- for key, value in item | dictsort -%}
256
+ {%- if response_ns.found_first %},{% endif -%}
257
+ {%- set response_ns.found_first = true -%}
258
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
259
+ {%- endfor -%}
260
+ {{- '}<end_function_response>' -}}
261
+ {%- else -%}
262
+ {{ raise_exception("Invalid tool response mapping: must contain 'name' and 'response' keys, or 'name' must be in the message.") }}
263
+ {%- endif -%}
264
+ {%- else -%}
265
+ {{ raise_exception("Invalid tool response message: multiple responses must all be mappings") }}
266
+ {%- endif -%}
267
+ {%- endfor -%}
268
+ {%- else -%}
269
+ {{ raise_exception("Invalid content type in tool message: must be mapping, sequence of mappings, or string.") }}
270
+ {%- endif -%}
271
+ {%- endif -%}
272
+ {%- set ns.prev_message_type = 'tool_response' -%}
273
+ {%- endif -%}
274
+ {%- if ns.prev_message_type not in ['tool_call', 'tool_response'] -%}
275
+ {{ '<end_of_turn>\n' }}
276
+ {%- endif -%}
277
+ {%- endfor -%}
278
+ {%- if add_generation_prompt -%}
279
+ {%- if ns.prev_message_type != 'tool_response' -%}
280
+ {{- '<start_of_turn>model\n' -}}
281
+ {%- endif -%}
282
+ {%- endif -%}
checkpoint-1100/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dcbc3235ecf9adc47a1cd14cefc2f03e87bcb237ac474db8b3f600fcc811cd54
3
+ size 121708939
checkpoint-1100/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb03eac9b58e52273c3aa5727841719fd6c82afc9062bbaf8eb6f842de779278
3
+ size 14391
checkpoint-1100/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08c9ae0af668f7241f64d293f2b41e3dfe0e41af6f89f5b50e74617068844c5e
3
+ size 1465
checkpoint-1100/special_tokens_map.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "boi_token": "<start_of_image>",
3
+ "bos_token": {
4
+ "content": "<bos>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ "eoi_token": "<end_of_image>",
11
+ "eos_token": {
12
+ "content": "<end_of_turn>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "image_token": "<image_soft_token>",
19
+ "pad_token": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "sfr_token": "<start_function_response>",
27
+ "unk_token": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ }
34
+ }
checkpoint-1100/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6b09a0b4a803ad453063ca4bb49a784540e8120004e2450e025df2b27d41fb2
3
+ size 33384899
checkpoint-1100/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa009fcbc3589a9904d30d04834094fea4653c2ac6d2de2cd1262d4f7a50ceb3
3
+ size 4689144
checkpoint-1100/tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1100/trainer_state.json ADDED
@@ -0,0 +1,1574 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 0.9611183923110529,
6
+ "eval_steps": 500,
7
+ "global_step": 1100,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.00436871996505024,
14
+ "grad_norm": 9.839941024780273,
15
+ "learning_rate": 8e-05,
16
+ "loss": 2.5246,
17
+ "step": 5
18
+ },
19
+ {
20
+ "epoch": 0.00873743993010048,
21
+ "grad_norm": 13.773455619812012,
22
+ "learning_rate": 0.00018,
23
+ "loss": 1.1343,
24
+ "step": 10
25
+ },
26
+ {
27
+ "epoch": 0.01310615989515072,
28
+ "grad_norm": 5.6580424308776855,
29
+ "learning_rate": 0.0001999997582552296,
30
+ "loss": 0.7712,
31
+ "step": 15
32
+ },
33
+ {
34
+ "epoch": 0.01747487986020096,
35
+ "grad_norm": 5.294467926025391,
36
+ "learning_rate": 0.0001999987761691029,
37
+ "loss": 0.73,
38
+ "step": 20
39
+ },
40
+ {
41
+ "epoch": 0.021843599825251202,
42
+ "grad_norm": 2.8633503913879395,
43
+ "learning_rate": 0.00019999703863998527,
44
+ "loss": 0.7289,
45
+ "step": 25
46
+ },
47
+ {
48
+ "epoch": 0.02621231979030144,
49
+ "grad_norm": 3.2836177349090576,
50
+ "learning_rate": 0.00019999454568100293,
51
+ "loss": 0.4686,
52
+ "step": 30
53
+ },
54
+ {
55
+ "epoch": 0.03058103975535168,
56
+ "grad_norm": 4.878258228302002,
57
+ "learning_rate": 0.00019999129731098898,
58
+ "loss": 0.6629,
59
+ "step": 35
60
+ },
61
+ {
62
+ "epoch": 0.03494975972040192,
63
+ "grad_norm": 2.899914026260376,
64
+ "learning_rate": 0.00019998729355448326,
65
+ "loss": 0.6038,
66
+ "step": 40
67
+ },
68
+ {
69
+ "epoch": 0.039318479685452164,
70
+ "grad_norm": 3.289844274520874,
71
+ "learning_rate": 0.00019998253444173235,
72
+ "loss": 0.4573,
73
+ "step": 45
74
+ },
75
+ {
76
+ "epoch": 0.043687199650502405,
77
+ "grad_norm": 2.957254648208618,
78
+ "learning_rate": 0.00019997702000868896,
79
+ "loss": 0.594,
80
+ "step": 50
81
+ },
82
+ {
83
+ "epoch": 0.048055919615552646,
84
+ "grad_norm": 3.171276807785034,
85
+ "learning_rate": 0.00019997075029701207,
86
+ "loss": 0.5719,
87
+ "step": 55
88
+ },
89
+ {
90
+ "epoch": 0.05242463958060288,
91
+ "grad_norm": 2.55605149269104,
92
+ "learning_rate": 0.0001999637253540663,
93
+ "loss": 0.5971,
94
+ "step": 60
95
+ },
96
+ {
97
+ "epoch": 0.05679335954565312,
98
+ "grad_norm": 2.127289295196533,
99
+ "learning_rate": 0.00019995594523292178,
100
+ "loss": 0.5712,
101
+ "step": 65
102
+ },
103
+ {
104
+ "epoch": 0.06116207951070336,
105
+ "grad_norm": 3.3928685188293457,
106
+ "learning_rate": 0.00019994740999235359,
107
+ "loss": 0.5712,
108
+ "step": 70
109
+ },
110
+ {
111
+ "epoch": 0.0655307994757536,
112
+ "grad_norm": 2.6700279712677,
113
+ "learning_rate": 0.00019993811969684142,
114
+ "loss": 0.427,
115
+ "step": 75
116
+ },
117
+ {
118
+ "epoch": 0.06989951944080385,
119
+ "grad_norm": 2.6936633586883545,
120
+ "learning_rate": 0.00019992807441656898,
121
+ "loss": 0.5321,
122
+ "step": 80
123
+ },
124
+ {
125
+ "epoch": 0.07426823940585409,
126
+ "grad_norm": 3.9897687435150146,
127
+ "learning_rate": 0.00019991727422742362,
128
+ "loss": 0.6025,
129
+ "step": 85
130
+ },
131
+ {
132
+ "epoch": 0.07863695937090433,
133
+ "grad_norm": 2.3496663570404053,
134
+ "learning_rate": 0.00019990571921099553,
135
+ "loss": 0.5975,
136
+ "step": 90
137
+ },
138
+ {
139
+ "epoch": 0.08300567933595457,
140
+ "grad_norm": 3.3796467781066895,
141
+ "learning_rate": 0.0001998934094545774,
142
+ "loss": 0.5255,
143
+ "step": 95
144
+ },
145
+ {
146
+ "epoch": 0.08737439930100481,
147
+ "grad_norm": 3.1103007793426514,
148
+ "learning_rate": 0.00019988034505116352,
149
+ "loss": 0.4946,
150
+ "step": 100
151
+ },
152
+ {
153
+ "epoch": 0.09174311926605505,
154
+ "grad_norm": 2.002304792404175,
155
+ "learning_rate": 0.00019986652609944926,
156
+ "loss": 0.425,
157
+ "step": 105
158
+ },
159
+ {
160
+ "epoch": 0.09611183923110529,
161
+ "grad_norm": 1.7572168111801147,
162
+ "learning_rate": 0.00019985195270383018,
163
+ "loss": 0.6073,
164
+ "step": 110
165
+ },
166
+ {
167
+ "epoch": 0.10048055919615553,
168
+ "grad_norm": 2.745215654373169,
169
+ "learning_rate": 0.00019983662497440133,
170
+ "loss": 0.586,
171
+ "step": 115
172
+ },
173
+ {
174
+ "epoch": 0.10484927916120576,
175
+ "grad_norm": 1.8170915842056274,
176
+ "learning_rate": 0.0001998205430269564,
177
+ "loss": 0.5255,
178
+ "step": 120
179
+ },
180
+ {
181
+ "epoch": 0.109217999126256,
182
+ "grad_norm": 1.4944056272506714,
183
+ "learning_rate": 0.00019980370698298677,
184
+ "loss": 0.4219,
185
+ "step": 125
186
+ },
187
+ {
188
+ "epoch": 0.11358671909130624,
189
+ "grad_norm": 1.6616989374160767,
190
+ "learning_rate": 0.00019978611696968074,
191
+ "loss": 0.4231,
192
+ "step": 130
193
+ },
194
+ {
195
+ "epoch": 0.11795543905635648,
196
+ "grad_norm": 2.0523645877838135,
197
+ "learning_rate": 0.00019976777311992247,
198
+ "loss": 0.5298,
199
+ "step": 135
200
+ },
201
+ {
202
+ "epoch": 0.12232415902140673,
203
+ "grad_norm": 2.065765619277954,
204
+ "learning_rate": 0.00019974867557229098,
205
+ "loss": 0.5228,
206
+ "step": 140
207
+ },
208
+ {
209
+ "epoch": 0.12669287898645698,
210
+ "grad_norm": 1.7283438444137573,
211
+ "learning_rate": 0.00019972882447105912,
212
+ "loss": 0.3452,
213
+ "step": 145
214
+ },
215
+ {
216
+ "epoch": 0.1310615989515072,
217
+ "grad_norm": 2.655750274658203,
218
+ "learning_rate": 0.00019970821996619244,
219
+ "loss": 0.508,
220
+ "step": 150
221
+ },
222
+ {
223
+ "epoch": 0.13543031891655744,
224
+ "grad_norm": 2.67799973487854,
225
+ "learning_rate": 0.0001996868622133482,
226
+ "loss": 0.4359,
227
+ "step": 155
228
+ },
229
+ {
230
+ "epoch": 0.1397990388816077,
231
+ "grad_norm": 1.6298809051513672,
232
+ "learning_rate": 0.00019966475137387396,
233
+ "loss": 0.5447,
234
+ "step": 160
235
+ },
236
+ {
237
+ "epoch": 0.14416775884665792,
238
+ "grad_norm": 1.4772286415100098,
239
+ "learning_rate": 0.00019964188761480657,
240
+ "loss": 0.4105,
241
+ "step": 165
242
+ },
243
+ {
244
+ "epoch": 0.14853647881170817,
245
+ "grad_norm": 2.2986271381378174,
246
+ "learning_rate": 0.00019961827110887083,
247
+ "loss": 0.603,
248
+ "step": 170
249
+ },
250
+ {
251
+ "epoch": 0.1529051987767584,
252
+ "grad_norm": 2.8261911869049072,
253
+ "learning_rate": 0.00019959390203447817,
254
+ "loss": 0.4649,
255
+ "step": 175
256
+ },
257
+ {
258
+ "epoch": 0.15727391874180865,
259
+ "grad_norm": 1.7771011590957642,
260
+ "learning_rate": 0.00019956878057572524,
261
+ "loss": 0.4394,
262
+ "step": 180
263
+ },
264
+ {
265
+ "epoch": 0.16164263870685888,
266
+ "grad_norm": 1.7315421104431152,
267
+ "learning_rate": 0.00019954290692239274,
268
+ "loss": 0.5289,
269
+ "step": 185
270
+ },
271
+ {
272
+ "epoch": 0.16601135867190914,
273
+ "grad_norm": 1.6124423742294312,
274
+ "learning_rate": 0.00019951628126994373,
275
+ "loss": 0.4173,
276
+ "step": 190
277
+ },
278
+ {
279
+ "epoch": 0.17038007863695936,
280
+ "grad_norm": 1.792577862739563,
281
+ "learning_rate": 0.00019948890381952232,
282
+ "loss": 0.4331,
283
+ "step": 195
284
+ },
285
+ {
286
+ "epoch": 0.17474879860200962,
287
+ "grad_norm": 1.9038774967193604,
288
+ "learning_rate": 0.000199460774777952,
289
+ "loss": 0.4247,
290
+ "step": 200
291
+ },
292
+ {
293
+ "epoch": 0.17911751856705985,
294
+ "grad_norm": 2.457122802734375,
295
+ "learning_rate": 0.00019943189435773432,
296
+ "loss": 0.4519,
297
+ "step": 205
298
+ },
299
+ {
300
+ "epoch": 0.1834862385321101,
301
+ "grad_norm": 1.97683584690094,
302
+ "learning_rate": 0.00019940226277704706,
303
+ "loss": 0.4761,
304
+ "step": 210
305
+ },
306
+ {
307
+ "epoch": 0.18785495849716033,
308
+ "grad_norm": 2.1646862030029297,
309
+ "learning_rate": 0.0001993718802597426,
310
+ "loss": 0.5294,
311
+ "step": 215
312
+ },
313
+ {
314
+ "epoch": 0.19222367846221058,
315
+ "grad_norm": 1.565412998199463,
316
+ "learning_rate": 0.00019934074703534637,
317
+ "loss": 0.3999,
318
+ "step": 220
319
+ },
320
+ {
321
+ "epoch": 0.1965923984272608,
322
+ "grad_norm": 2.4315876960754395,
323
+ "learning_rate": 0.00019930886333905504,
324
+ "loss": 0.378,
325
+ "step": 225
326
+ },
327
+ {
328
+ "epoch": 0.20096111839231107,
329
+ "grad_norm": 2.7567529678344727,
330
+ "learning_rate": 0.00019927622941173467,
331
+ "loss": 0.5075,
332
+ "step": 230
333
+ },
334
+ {
335
+ "epoch": 0.2053298383573613,
336
+ "grad_norm": 1.8640387058258057,
337
+ "learning_rate": 0.00019924284549991902,
338
+ "loss": 0.4749,
339
+ "step": 235
340
+ },
341
+ {
342
+ "epoch": 0.20969855832241152,
343
+ "grad_norm": 2.090924024581909,
344
+ "learning_rate": 0.00019920871185580757,
345
+ "loss": 0.4353,
346
+ "step": 240
347
+ },
348
+ {
349
+ "epoch": 0.21406727828746178,
350
+ "grad_norm": 1.9691081047058105,
351
+ "learning_rate": 0.00019917382873726376,
352
+ "loss": 0.4051,
353
+ "step": 245
354
+ },
355
+ {
356
+ "epoch": 0.218435998252512,
357
+ "grad_norm": 1.8130213022232056,
358
+ "learning_rate": 0.0001991381964078128,
359
+ "loss": 0.526,
360
+ "step": 250
361
+ },
362
+ {
363
+ "epoch": 0.22280471821756226,
364
+ "grad_norm": 2.078805923461914,
365
+ "learning_rate": 0.00019910181513664,
366
+ "loss": 0.5654,
367
+ "step": 255
368
+ },
369
+ {
370
+ "epoch": 0.22717343818261249,
371
+ "grad_norm": 2.0686287879943848,
372
+ "learning_rate": 0.0001990646851985884,
373
+ "loss": 0.43,
374
+ "step": 260
375
+ },
376
+ {
377
+ "epoch": 0.23154215814766274,
378
+ "grad_norm": 1.475821614265442,
379
+ "learning_rate": 0.00019902680687415705,
380
+ "loss": 0.355,
381
+ "step": 265
382
+ },
383
+ {
384
+ "epoch": 0.23591087811271297,
385
+ "grad_norm": 1.901236891746521,
386
+ "learning_rate": 0.0001989881804494985,
387
+ "loss": 0.4522,
388
+ "step": 270
389
+ },
390
+ {
391
+ "epoch": 0.24027959807776322,
392
+ "grad_norm": 1.2583553791046143,
393
+ "learning_rate": 0.00019894880621641704,
394
+ "loss": 0.3869,
395
+ "step": 275
396
+ },
397
+ {
398
+ "epoch": 0.24464831804281345,
399
+ "grad_norm": 1.712336540222168,
400
+ "learning_rate": 0.00019890868447236613,
401
+ "loss": 0.454,
402
+ "step": 280
403
+ },
404
+ {
405
+ "epoch": 0.2490170380078637,
406
+ "grad_norm": 2.3967206478118896,
407
+ "learning_rate": 0.00019886781552044634,
408
+ "loss": 0.4074,
409
+ "step": 285
410
+ },
411
+ {
412
+ "epoch": 0.25338575797291396,
413
+ "grad_norm": 2.0578925609588623,
414
+ "learning_rate": 0.0001988261996694032,
415
+ "loss": 0.4268,
416
+ "step": 290
417
+ },
418
+ {
419
+ "epoch": 0.2577544779379642,
420
+ "grad_norm": 1.7411088943481445,
421
+ "learning_rate": 0.0001987838372336245,
422
+ "loss": 0.334,
423
+ "step": 295
424
+ },
425
+ {
426
+ "epoch": 0.2621231979030144,
427
+ "grad_norm": 1.8145533800125122,
428
+ "learning_rate": 0.0001987407285331382,
429
+ "loss": 0.4019,
430
+ "step": 300
431
+ },
432
+ {
433
+ "epoch": 0.26649191786806464,
434
+ "grad_norm": 1.3501653671264648,
435
+ "learning_rate": 0.00019869687389361,
436
+ "loss": 0.32,
437
+ "step": 305
438
+ },
439
+ {
440
+ "epoch": 0.27086063783311487,
441
+ "grad_norm": 1.208422303199768,
442
+ "learning_rate": 0.00019865227364634073,
443
+ "loss": 0.4548,
444
+ "step": 310
445
+ },
446
+ {
447
+ "epoch": 0.27522935779816515,
448
+ "grad_norm": 1.521690011024475,
449
+ "learning_rate": 0.00019860692812826396,
450
+ "loss": 0.3572,
451
+ "step": 315
452
+ },
453
+ {
454
+ "epoch": 0.2795980777632154,
455
+ "grad_norm": 2.2849714756011963,
456
+ "learning_rate": 0.0001985608376819434,
457
+ "loss": 0.4555,
458
+ "step": 320
459
+ },
460
+ {
461
+ "epoch": 0.2839667977282656,
462
+ "grad_norm": 2.7733798027038574,
463
+ "learning_rate": 0.00019851400265557037,
464
+ "loss": 0.4726,
465
+ "step": 325
466
+ },
467
+ {
468
+ "epoch": 0.28833551769331583,
469
+ "grad_norm": 1.973522424697876,
470
+ "learning_rate": 0.00019846642340296114,
471
+ "loss": 0.4585,
472
+ "step": 330
473
+ },
474
+ {
475
+ "epoch": 0.2927042376583661,
476
+ "grad_norm": 1.7133642435073853,
477
+ "learning_rate": 0.0001984181002835542,
478
+ "loss": 0.4679,
479
+ "step": 335
480
+ },
481
+ {
482
+ "epoch": 0.29707295762341634,
483
+ "grad_norm": 2.8383235931396484,
484
+ "learning_rate": 0.00019836903366240768,
485
+ "loss": 0.4119,
486
+ "step": 340
487
+ },
488
+ {
489
+ "epoch": 0.30144167758846657,
490
+ "grad_norm": 2.798276901245117,
491
+ "learning_rate": 0.00019831922391019645,
492
+ "loss": 0.3665,
493
+ "step": 345
494
+ },
495
+ {
496
+ "epoch": 0.3058103975535168,
497
+ "grad_norm": 2.171276569366455,
498
+ "learning_rate": 0.00019826867140320938,
499
+ "loss": 0.5691,
500
+ "step": 350
501
+ },
502
+ {
503
+ "epoch": 0.3101791175185671,
504
+ "grad_norm": 2.0866177082061768,
505
+ "learning_rate": 0.00019821737652334653,
506
+ "loss": 0.4074,
507
+ "step": 355
508
+ },
509
+ {
510
+ "epoch": 0.3145478374836173,
511
+ "grad_norm": 1.3713918924331665,
512
+ "learning_rate": 0.0001981653396581162,
513
+ "loss": 0.3379,
514
+ "step": 360
515
+ },
516
+ {
517
+ "epoch": 0.31891655744866754,
518
+ "grad_norm": 1.6086684465408325,
519
+ "learning_rate": 0.0001981125612006321,
520
+ "loss": 0.3563,
521
+ "step": 365
522
+ },
523
+ {
524
+ "epoch": 0.32328527741371776,
525
+ "grad_norm": 2.655686378479004,
526
+ "learning_rate": 0.0001980590415496102,
527
+ "loss": 0.3988,
528
+ "step": 370
529
+ },
530
+ {
531
+ "epoch": 0.32765399737876805,
532
+ "grad_norm": 1.5271559953689575,
533
+ "learning_rate": 0.00019800478110936596,
534
+ "loss": 0.5784,
535
+ "step": 375
536
+ },
537
+ {
538
+ "epoch": 0.3320227173438183,
539
+ "grad_norm": 1.3043195009231567,
540
+ "learning_rate": 0.00019794978028981106,
541
+ "loss": 0.2637,
542
+ "step": 380
543
+ },
544
+ {
545
+ "epoch": 0.3363914373088685,
546
+ "grad_norm": 2.539109706878662,
547
+ "learning_rate": 0.0001978940395064504,
548
+ "loss": 0.4658,
549
+ "step": 385
550
+ },
551
+ {
552
+ "epoch": 0.34076015727391873,
553
+ "grad_norm": 1.7521268129348755,
554
+ "learning_rate": 0.00019783755918037903,
555
+ "loss": 0.4253,
556
+ "step": 390
557
+ },
558
+ {
559
+ "epoch": 0.34512887723896896,
560
+ "grad_norm": 1.5679692029953003,
561
+ "learning_rate": 0.00019778033973827882,
562
+ "loss": 0.4528,
563
+ "step": 395
564
+ },
565
+ {
566
+ "epoch": 0.34949759720401924,
567
+ "grad_norm": 1.670640468597412,
568
+ "learning_rate": 0.00019772238161241528,
569
+ "loss": 0.3724,
570
+ "step": 400
571
+ },
572
+ {
573
+ "epoch": 0.35386631716906947,
574
+ "grad_norm": 1.520856261253357,
575
+ "learning_rate": 0.00019766368524063438,
576
+ "loss": 0.4141,
577
+ "step": 405
578
+ },
579
+ {
580
+ "epoch": 0.3582350371341197,
581
+ "grad_norm": 1.0802158117294312,
582
+ "learning_rate": 0.00019760425106635926,
583
+ "loss": 0.3268,
584
+ "step": 410
585
+ },
586
+ {
587
+ "epoch": 0.3626037570991699,
588
+ "grad_norm": 1.7306379079818726,
589
+ "learning_rate": 0.0001975440795385866,
590
+ "loss": 0.3654,
591
+ "step": 415
592
+ },
593
+ {
594
+ "epoch": 0.3669724770642202,
595
+ "grad_norm": 1.5037274360656738,
596
+ "learning_rate": 0.0001974831711118836,
597
+ "loss": 0.4285,
598
+ "step": 420
599
+ },
600
+ {
601
+ "epoch": 0.37134119702927043,
602
+ "grad_norm": 1.4654844999313354,
603
+ "learning_rate": 0.00019742152624638437,
604
+ "loss": 0.2548,
605
+ "step": 425
606
+ },
607
+ {
608
+ "epoch": 0.37570991699432066,
609
+ "grad_norm": 2.6770753860473633,
610
+ "learning_rate": 0.00019735914540778638,
611
+ "loss": 0.4238,
612
+ "step": 430
613
+ },
614
+ {
615
+ "epoch": 0.3800786369593709,
616
+ "grad_norm": 1.1864055395126343,
617
+ "learning_rate": 0.00019729602906734704,
618
+ "loss": 0.3959,
619
+ "step": 435
620
+ },
621
+ {
622
+ "epoch": 0.38444735692442117,
623
+ "grad_norm": 1.904876708984375,
624
+ "learning_rate": 0.00019723217770188024,
625
+ "loss": 0.3603,
626
+ "step": 440
627
+ },
628
+ {
629
+ "epoch": 0.3888160768894714,
630
+ "grad_norm": 1.7086598873138428,
631
+ "learning_rate": 0.0001971675917937525,
632
+ "loss": 0.551,
633
+ "step": 445
634
+ },
635
+ {
636
+ "epoch": 0.3931847968545216,
637
+ "grad_norm": 1.4635995626449585,
638
+ "learning_rate": 0.00019710227183087947,
639
+ "loss": 0.3738,
640
+ "step": 450
641
+ },
642
+ {
643
+ "epoch": 0.39755351681957185,
644
+ "grad_norm": 1.6047295331954956,
645
+ "learning_rate": 0.00019703621830672238,
646
+ "loss": 0.475,
647
+ "step": 455
648
+ },
649
+ {
650
+ "epoch": 0.40192223678462213,
651
+ "grad_norm": 1.4741933345794678,
652
+ "learning_rate": 0.00019696943172028394,
653
+ "loss": 0.4021,
654
+ "step": 460
655
+ },
656
+ {
657
+ "epoch": 0.40629095674967236,
658
+ "grad_norm": 2.8138020038604736,
659
+ "learning_rate": 0.00019690191257610497,
660
+ "loss": 0.3665,
661
+ "step": 465
662
+ },
663
+ {
664
+ "epoch": 0.4106596767147226,
665
+ "grad_norm": 1.6264874935150146,
666
+ "learning_rate": 0.00019683366138426034,
667
+ "loss": 0.3598,
668
+ "step": 470
669
+ },
670
+ {
671
+ "epoch": 0.4150283966797728,
672
+ "grad_norm": 1.6185061931610107,
673
+ "learning_rate": 0.00019676467866035525,
674
+ "loss": 0.5003,
675
+ "step": 475
676
+ },
677
+ {
678
+ "epoch": 0.41939711664482304,
679
+ "grad_norm": 1.8654040098190308,
680
+ "learning_rate": 0.00019669496492552113,
681
+ "loss": 0.397,
682
+ "step": 480
683
+ },
684
+ {
685
+ "epoch": 0.4237658366098733,
686
+ "grad_norm": 1.2525237798690796,
687
+ "learning_rate": 0.00019662452070641205,
688
+ "loss": 0.3235,
689
+ "step": 485
690
+ },
691
+ {
692
+ "epoch": 0.42813455657492355,
693
+ "grad_norm": 1.7755401134490967,
694
+ "learning_rate": 0.00019655334653520036,
695
+ "loss": 0.2978,
696
+ "step": 490
697
+ },
698
+ {
699
+ "epoch": 0.4325032765399738,
700
+ "grad_norm": 1.6025470495224,
701
+ "learning_rate": 0.00019648144294957297,
702
+ "loss": 0.4436,
703
+ "step": 495
704
+ },
705
+ {
706
+ "epoch": 0.436871996505024,
707
+ "grad_norm": 1.085461974143982,
708
+ "learning_rate": 0.00019640881049272713,
709
+ "loss": 0.22,
710
+ "step": 500
711
+ },
712
+ {
713
+ "epoch": 0.4412407164700743,
714
+ "grad_norm": 1.491818904876709,
715
+ "learning_rate": 0.00019633544971336636,
716
+ "loss": 0.2714,
717
+ "step": 505
718
+ },
719
+ {
720
+ "epoch": 0.4456094364351245,
721
+ "grad_norm": 0.9479840993881226,
722
+ "learning_rate": 0.0001962613611656963,
723
+ "loss": 0.3735,
724
+ "step": 510
725
+ },
726
+ {
727
+ "epoch": 0.44997815640017474,
728
+ "grad_norm": 3.0529448986053467,
729
+ "learning_rate": 0.0001961865454094205,
730
+ "loss": 0.4779,
731
+ "step": 515
732
+ },
733
+ {
734
+ "epoch": 0.45434687636522497,
735
+ "grad_norm": 2.831089973449707,
736
+ "learning_rate": 0.00019611100300973635,
737
+ "loss": 0.469,
738
+ "step": 520
739
+ },
740
+ {
741
+ "epoch": 0.45871559633027525,
742
+ "grad_norm": 2.1834311485290527,
743
+ "learning_rate": 0.00019603473453733052,
744
+ "loss": 0.4163,
745
+ "step": 525
746
+ },
747
+ {
748
+ "epoch": 0.4630843162953255,
749
+ "grad_norm": 1.3152204751968384,
750
+ "learning_rate": 0.00019595774056837493,
751
+ "loss": 0.3744,
752
+ "step": 530
753
+ },
754
+ {
755
+ "epoch": 0.4674530362603757,
756
+ "grad_norm": 1.4493387937545776,
757
+ "learning_rate": 0.00019588002168452223,
758
+ "loss": 0.3117,
759
+ "step": 535
760
+ },
761
+ {
762
+ "epoch": 0.47182175622542594,
763
+ "grad_norm": 1.1412076950073242,
764
+ "learning_rate": 0.00019580157847290147,
765
+ "loss": 0.3152,
766
+ "step": 540
767
+ },
768
+ {
769
+ "epoch": 0.47619047619047616,
770
+ "grad_norm": 1.5004645586013794,
771
+ "learning_rate": 0.00019572241152611365,
772
+ "loss": 0.3271,
773
+ "step": 545
774
+ },
775
+ {
776
+ "epoch": 0.48055919615552645,
777
+ "grad_norm": 2.3333992958068848,
778
+ "learning_rate": 0.0001956425214422272,
779
+ "loss": 0.3626,
780
+ "step": 550
781
+ },
782
+ {
783
+ "epoch": 0.4849279161205767,
784
+ "grad_norm": 1.5423107147216797,
785
+ "learning_rate": 0.0001955619088247736,
786
+ "loss": 0.4588,
787
+ "step": 555
788
+ },
789
+ {
790
+ "epoch": 0.4892966360856269,
791
+ "grad_norm": 3.008280038833618,
792
+ "learning_rate": 0.00019548057428274266,
793
+ "loss": 0.5275,
794
+ "step": 560
795
+ },
796
+ {
797
+ "epoch": 0.49366535605067713,
798
+ "grad_norm": 1.0968583822250366,
799
+ "learning_rate": 0.00019539851843057798,
800
+ "loss": 0.3233,
801
+ "step": 565
802
+ },
803
+ {
804
+ "epoch": 0.4980340760157274,
805
+ "grad_norm": 1.265228271484375,
806
+ "learning_rate": 0.00019531574188817234,
807
+ "loss": 0.2743,
808
+ "step": 570
809
+ },
810
+ {
811
+ "epoch": 0.5024027959807776,
812
+ "grad_norm": 1.9382916688919067,
813
+ "learning_rate": 0.000195232245280863,
814
+ "loss": 0.3189,
815
+ "step": 575
816
+ },
817
+ {
818
+ "epoch": 0.5067715159458279,
819
+ "grad_norm": 1.6710058450698853,
820
+ "learning_rate": 0.00019514802923942687,
821
+ "loss": 0.345,
822
+ "step": 580
823
+ },
824
+ {
825
+ "epoch": 0.5111402359108781,
826
+ "grad_norm": 1.8377633094787598,
827
+ "learning_rate": 0.000195063094400076,
828
+ "loss": 0.4441,
829
+ "step": 585
830
+ },
831
+ {
832
+ "epoch": 0.5155089558759284,
833
+ "grad_norm": 1.432173728942871,
834
+ "learning_rate": 0.0001949774414044525,
835
+ "loss": 0.3277,
836
+ "step": 590
837
+ },
838
+ {
839
+ "epoch": 0.5198776758409785,
840
+ "grad_norm": 1.096330165863037,
841
+ "learning_rate": 0.0001948910708996239,
842
+ "loss": 0.3821,
843
+ "step": 595
844
+ },
845
+ {
846
+ "epoch": 0.5242463958060288,
847
+ "grad_norm": 1.1951391696929932,
848
+ "learning_rate": 0.00019480398353807798,
849
+ "loss": 0.4303,
850
+ "step": 600
851
+ },
852
+ {
853
+ "epoch": 0.5286151157710791,
854
+ "grad_norm": 0.9764880537986755,
855
+ "learning_rate": 0.0001947161799777183,
856
+ "loss": 0.2693,
857
+ "step": 605
858
+ },
859
+ {
860
+ "epoch": 0.5329838357361293,
861
+ "grad_norm": 1.2566354274749756,
862
+ "learning_rate": 0.00019462766088185874,
863
+ "loss": 0.2851,
864
+ "step": 610
865
+ },
866
+ {
867
+ "epoch": 0.5373525557011796,
868
+ "grad_norm": 1.494903802871704,
869
+ "learning_rate": 0.0001945384269192188,
870
+ "loss": 0.36,
871
+ "step": 615
872
+ },
873
+ {
874
+ "epoch": 0.5417212756662297,
875
+ "grad_norm": 1.5508995056152344,
876
+ "learning_rate": 0.00019444847876391844,
877
+ "loss": 0.3682,
878
+ "step": 620
879
+ },
880
+ {
881
+ "epoch": 0.54608999563128,
882
+ "grad_norm": 2.227889060974121,
883
+ "learning_rate": 0.00019435781709547305,
884
+ "loss": 0.3889,
885
+ "step": 625
886
+ },
887
+ {
888
+ "epoch": 0.5504587155963303,
889
+ "grad_norm": 0.9221494197845459,
890
+ "learning_rate": 0.0001942664425987882,
891
+ "loss": 0.3375,
892
+ "step": 630
893
+ },
894
+ {
895
+ "epoch": 0.5548274355613805,
896
+ "grad_norm": 1.3386973142623901,
897
+ "learning_rate": 0.00019417435596415458,
898
+ "loss": 0.4833,
899
+ "step": 635
900
+ },
901
+ {
902
+ "epoch": 0.5591961555264308,
903
+ "grad_norm": 1.9686752557754517,
904
+ "learning_rate": 0.00019408155788724272,
905
+ "loss": 0.4739,
906
+ "step": 640
907
+ },
908
+ {
909
+ "epoch": 0.563564875491481,
910
+ "grad_norm": 2.3978073596954346,
911
+ "learning_rate": 0.00019398804906909777,
912
+ "loss": 0.4681,
913
+ "step": 645
914
+ },
915
+ {
916
+ "epoch": 0.5679335954565312,
917
+ "grad_norm": 1.536699652671814,
918
+ "learning_rate": 0.0001938938302161342,
919
+ "loss": 0.2684,
920
+ "step": 650
921
+ },
922
+ {
923
+ "epoch": 0.5723023154215815,
924
+ "grad_norm": 1.691787600517273,
925
+ "learning_rate": 0.00019379890204013043,
926
+ "loss": 0.3512,
927
+ "step": 655
928
+ },
929
+ {
930
+ "epoch": 0.5766710353866317,
931
+ "grad_norm": 1.7557870149612427,
932
+ "learning_rate": 0.0001937032652582235,
933
+ "loss": 0.3423,
934
+ "step": 660
935
+ },
936
+ {
937
+ "epoch": 0.581039755351682,
938
+ "grad_norm": 1.7950220108032227,
939
+ "learning_rate": 0.0001936069205929036,
940
+ "loss": 0.2831,
941
+ "step": 665
942
+ },
943
+ {
944
+ "epoch": 0.5854084753167322,
945
+ "grad_norm": 1.928232192993164,
946
+ "learning_rate": 0.00019350986877200867,
947
+ "loss": 0.323,
948
+ "step": 670
949
+ },
950
+ {
951
+ "epoch": 0.5897771952817824,
952
+ "grad_norm": 1.86429762840271,
953
+ "learning_rate": 0.00019341211052871887,
954
+ "loss": 0.4248,
955
+ "step": 675
956
+ },
957
+ {
958
+ "epoch": 0.5941459152468327,
959
+ "grad_norm": 2.022738456726074,
960
+ "learning_rate": 0.00019331364660155103,
961
+ "loss": 0.3411,
962
+ "step": 680
963
+ },
964
+ {
965
+ "epoch": 0.598514635211883,
966
+ "grad_norm": 1.2337995767593384,
967
+ "learning_rate": 0.00019321447773435306,
968
+ "loss": 0.2368,
969
+ "step": 685
970
+ },
971
+ {
972
+ "epoch": 0.6028833551769331,
973
+ "grad_norm": 2.015075445175171,
974
+ "learning_rate": 0.00019311460467629843,
975
+ "loss": 0.5116,
976
+ "step": 690
977
+ },
978
+ {
979
+ "epoch": 0.6072520751419834,
980
+ "grad_norm": 1.2344030141830444,
981
+ "learning_rate": 0.00019301402818188036,
982
+ "loss": 0.3313,
983
+ "step": 695
984
+ },
985
+ {
986
+ "epoch": 0.6116207951070336,
987
+ "grad_norm": 1.129764437675476,
988
+ "learning_rate": 0.00019291274901090625,
989
+ "loss": 0.408,
990
+ "step": 700
991
+ },
992
+ {
993
+ "epoch": 0.6159895150720839,
994
+ "grad_norm": 1.4350385665893555,
995
+ "learning_rate": 0.00019281076792849184,
996
+ "loss": 0.3729,
997
+ "step": 705
998
+ },
999
+ {
1000
+ "epoch": 0.6203582350371342,
1001
+ "grad_norm": 1.9586119651794434,
1002
+ "learning_rate": 0.00019270808570505553,
1003
+ "loss": 0.4315,
1004
+ "step": 710
1005
+ },
1006
+ {
1007
+ "epoch": 0.6247269550021843,
1008
+ "grad_norm": 1.0157238245010376,
1009
+ "learning_rate": 0.00019260470311631243,
1010
+ "loss": 0.2861,
1011
+ "step": 715
1012
+ },
1013
+ {
1014
+ "epoch": 0.6290956749672346,
1015
+ "grad_norm": 1.3841652870178223,
1016
+ "learning_rate": 0.00019250062094326864,
1017
+ "loss": 0.4037,
1018
+ "step": 720
1019
+ },
1020
+ {
1021
+ "epoch": 0.6334643949322848,
1022
+ "grad_norm": 1.848821997642517,
1023
+ "learning_rate": 0.00019239583997221525,
1024
+ "loss": 0.3665,
1025
+ "step": 725
1026
+ },
1027
+ {
1028
+ "epoch": 0.6378331148973351,
1029
+ "grad_norm": 0.9416481256484985,
1030
+ "learning_rate": 0.0001922903609947225,
1031
+ "loss": 0.339,
1032
+ "step": 730
1033
+ },
1034
+ {
1035
+ "epoch": 0.6422018348623854,
1036
+ "grad_norm": 1.0696804523468018,
1037
+ "learning_rate": 0.0001921841848076336,
1038
+ "loss": 0.2783,
1039
+ "step": 735
1040
+ },
1041
+ {
1042
+ "epoch": 0.6465705548274355,
1043
+ "grad_norm": 1.9199622869491577,
1044
+ "learning_rate": 0.00019207731221305903,
1045
+ "loss": 0.2904,
1046
+ "step": 740
1047
+ },
1048
+ {
1049
+ "epoch": 0.6509392747924858,
1050
+ "grad_norm": 1.347430944442749,
1051
+ "learning_rate": 0.00019196974401837008,
1052
+ "loss": 0.2719,
1053
+ "step": 745
1054
+ },
1055
+ {
1056
+ "epoch": 0.6553079947575361,
1057
+ "grad_norm": 0.9743670225143433,
1058
+ "learning_rate": 0.0001918614810361932,
1059
+ "loss": 0.2748,
1060
+ "step": 750
1061
+ },
1062
+ {
1063
+ "epoch": 0.6596767147225863,
1064
+ "grad_norm": 1.4043099880218506,
1065
+ "learning_rate": 0.00019175252408440343,
1066
+ "loss": 0.3285,
1067
+ "step": 755
1068
+ },
1069
+ {
1070
+ "epoch": 0.6640454346876365,
1071
+ "grad_norm": 2.9343338012695312,
1072
+ "learning_rate": 0.0001916428739861185,
1073
+ "loss": 0.4962,
1074
+ "step": 760
1075
+ },
1076
+ {
1077
+ "epoch": 0.6684141546526867,
1078
+ "grad_norm": 2.3201515674591064,
1079
+ "learning_rate": 0.0001915325315696926,
1080
+ "loss": 0.3243,
1081
+ "step": 765
1082
+ },
1083
+ {
1084
+ "epoch": 0.672782874617737,
1085
+ "grad_norm": 1.675564169883728,
1086
+ "learning_rate": 0.00019142149766870992,
1087
+ "loss": 0.4596,
1088
+ "step": 770
1089
+ },
1090
+ {
1091
+ "epoch": 0.6771515945827873,
1092
+ "grad_norm": 1.664604663848877,
1093
+ "learning_rate": 0.00019130977312197854,
1094
+ "loss": 0.3024,
1095
+ "step": 775
1096
+ },
1097
+ {
1098
+ "epoch": 0.6815203145478375,
1099
+ "grad_norm": 1.8358148336410522,
1100
+ "learning_rate": 0.00019119735877352412,
1101
+ "loss": 0.3862,
1102
+ "step": 780
1103
+ },
1104
+ {
1105
+ "epoch": 0.6858890345128877,
1106
+ "grad_norm": 1.3632128238677979,
1107
+ "learning_rate": 0.00019108425547258328,
1108
+ "loss": 0.2374,
1109
+ "step": 785
1110
+ },
1111
+ {
1112
+ "epoch": 0.6902577544779379,
1113
+ "grad_norm": 2.0279934406280518,
1114
+ "learning_rate": 0.0001909704640735975,
1115
+ "loss": 0.4392,
1116
+ "step": 790
1117
+ },
1118
+ {
1119
+ "epoch": 0.6946264744429882,
1120
+ "grad_norm": 1.2824902534484863,
1121
+ "learning_rate": 0.0001908559854362064,
1122
+ "loss": 0.2782,
1123
+ "step": 795
1124
+ },
1125
+ {
1126
+ "epoch": 0.6989951944080385,
1127
+ "grad_norm": 1.3477047681808472,
1128
+ "learning_rate": 0.00019074082042524145,
1129
+ "loss": 0.3631,
1130
+ "step": 800
1131
+ },
1132
+ {
1133
+ "epoch": 0.7033639143730887,
1134
+ "grad_norm": 1.8478046655654907,
1135
+ "learning_rate": 0.00019062496991071928,
1136
+ "loss": 0.3788,
1137
+ "step": 805
1138
+ },
1139
+ {
1140
+ "epoch": 0.7077326343381389,
1141
+ "grad_norm": 1.470382571220398,
1142
+ "learning_rate": 0.0001905084347678352,
1143
+ "loss": 0.3825,
1144
+ "step": 810
1145
+ },
1146
+ {
1147
+ "epoch": 0.7121013543031892,
1148
+ "grad_norm": 2.4951813220977783,
1149
+ "learning_rate": 0.00019039121587695652,
1150
+ "loss": 0.3359,
1151
+ "step": 815
1152
+ },
1153
+ {
1154
+ "epoch": 0.7164700742682394,
1155
+ "grad_norm": 2.3441359996795654,
1156
+ "learning_rate": 0.000190273314123616,
1157
+ "loss": 0.32,
1158
+ "step": 820
1159
+ },
1160
+ {
1161
+ "epoch": 0.7208387942332897,
1162
+ "grad_norm": 2.372884750366211,
1163
+ "learning_rate": 0.00019015473039850513,
1164
+ "loss": 0.3651,
1165
+ "step": 825
1166
+ },
1167
+ {
1168
+ "epoch": 0.7252075141983398,
1169
+ "grad_norm": 2.4474101066589355,
1170
+ "learning_rate": 0.0001900354655974672,
1171
+ "loss": 0.4401,
1172
+ "step": 830
1173
+ },
1174
+ {
1175
+ "epoch": 0.7295762341633901,
1176
+ "grad_norm": 1.4031054973602295,
1177
+ "learning_rate": 0.0001899155206214909,
1178
+ "loss": 0.308,
1179
+ "step": 835
1180
+ },
1181
+ {
1182
+ "epoch": 0.7339449541284404,
1183
+ "grad_norm": 1.6008141040802002,
1184
+ "learning_rate": 0.00018979489637670322,
1185
+ "loss": 0.2937,
1186
+ "step": 840
1187
+ },
1188
+ {
1189
+ "epoch": 0.7383136740934906,
1190
+ "grad_norm": 0.9202178120613098,
1191
+ "learning_rate": 0.0001896735937743627,
1192
+ "loss": 0.3157,
1193
+ "step": 845
1194
+ },
1195
+ {
1196
+ "epoch": 0.7426823940585409,
1197
+ "grad_norm": 1.024746298789978,
1198
+ "learning_rate": 0.00018955161373085253,
1199
+ "loss": 0.2934,
1200
+ "step": 850
1201
+ },
1202
+ {
1203
+ "epoch": 0.747051114023591,
1204
+ "grad_norm": 1.1573566198349,
1205
+ "learning_rate": 0.00018942895716767374,
1206
+ "loss": 0.3617,
1207
+ "step": 855
1208
+ },
1209
+ {
1210
+ "epoch": 0.7514198339886413,
1211
+ "grad_norm": 1.227409839630127,
1212
+ "learning_rate": 0.00018930562501143805,
1213
+ "loss": 0.3581,
1214
+ "step": 860
1215
+ },
1216
+ {
1217
+ "epoch": 0.7557885539536916,
1218
+ "grad_norm": 1.5460100173950195,
1219
+ "learning_rate": 0.00018918161819386095,
1220
+ "loss": 0.3393,
1221
+ "step": 865
1222
+ },
1223
+ {
1224
+ "epoch": 0.7601572739187418,
1225
+ "grad_norm": 1.688852310180664,
1226
+ "learning_rate": 0.0001890569376517548,
1227
+ "loss": 0.4389,
1228
+ "step": 870
1229
+ },
1230
+ {
1231
+ "epoch": 0.764525993883792,
1232
+ "grad_norm": 1.5271598100662231,
1233
+ "learning_rate": 0.00018893158432702149,
1234
+ "loss": 0.2915,
1235
+ "step": 875
1236
+ },
1237
+ {
1238
+ "epoch": 0.7688947138488423,
1239
+ "grad_norm": 1.695788860321045,
1240
+ "learning_rate": 0.00018880555916664555,
1241
+ "loss": 0.4026,
1242
+ "step": 880
1243
+ },
1244
+ {
1245
+ "epoch": 0.7732634338138925,
1246
+ "grad_norm": 1.6879792213439941,
1247
+ "learning_rate": 0.00018867886312268683,
1248
+ "loss": 0.2857,
1249
+ "step": 885
1250
+ },
1251
+ {
1252
+ "epoch": 0.7776321537789428,
1253
+ "grad_norm": 2.0718719959259033,
1254
+ "learning_rate": 0.00018855149715227344,
1255
+ "loss": 0.4236,
1256
+ "step": 890
1257
+ },
1258
+ {
1259
+ "epoch": 0.782000873743993,
1260
+ "grad_norm": 1.5112775564193726,
1261
+ "learning_rate": 0.00018842346221759448,
1262
+ "loss": 0.325,
1263
+ "step": 895
1264
+ },
1265
+ {
1266
+ "epoch": 0.7863695937090432,
1267
+ "grad_norm": 1.2844749689102173,
1268
+ "learning_rate": 0.00018829475928589271,
1269
+ "loss": 0.3782,
1270
+ "step": 900
1271
+ },
1272
+ {
1273
+ "epoch": 0.7907383136740935,
1274
+ "grad_norm": 2.150299072265625,
1275
+ "learning_rate": 0.00018816538932945728,
1276
+ "loss": 0.3726,
1277
+ "step": 905
1278
+ },
1279
+ {
1280
+ "epoch": 0.7951070336391437,
1281
+ "grad_norm": 1.7050650119781494,
1282
+ "learning_rate": 0.00018803535332561646,
1283
+ "loss": 0.3824,
1284
+ "step": 910
1285
+ },
1286
+ {
1287
+ "epoch": 0.799475753604194,
1288
+ "grad_norm": 1.8164982795715332,
1289
+ "learning_rate": 0.00018790465225673012,
1290
+ "loss": 0.3664,
1291
+ "step": 915
1292
+ },
1293
+ {
1294
+ "epoch": 0.8038444735692443,
1295
+ "grad_norm": 1.1102941036224365,
1296
+ "learning_rate": 0.00018777328711018244,
1297
+ "loss": 0.3166,
1298
+ "step": 920
1299
+ },
1300
+ {
1301
+ "epoch": 0.8082131935342944,
1302
+ "grad_norm": 1.4220764636993408,
1303
+ "learning_rate": 0.0001876412588783743,
1304
+ "loss": 0.3049,
1305
+ "step": 925
1306
+ },
1307
+ {
1308
+ "epoch": 0.8125819134993447,
1309
+ "grad_norm": 2.11336088180542,
1310
+ "learning_rate": 0.000187508568558716,
1311
+ "loss": 0.3076,
1312
+ "step": 930
1313
+ },
1314
+ {
1315
+ "epoch": 0.8169506334643949,
1316
+ "grad_norm": 1.9948710203170776,
1317
+ "learning_rate": 0.00018737521715361948,
1318
+ "loss": 0.3846,
1319
+ "step": 935
1320
+ },
1321
+ {
1322
+ "epoch": 0.8213193534294452,
1323
+ "grad_norm": 1.8913676738739014,
1324
+ "learning_rate": 0.00018724120567049094,
1325
+ "loss": 0.4296,
1326
+ "step": 940
1327
+ },
1328
+ {
1329
+ "epoch": 0.8256880733944955,
1330
+ "grad_norm": 1.3633447885513306,
1331
+ "learning_rate": 0.0001871065351217231,
1332
+ "loss": 0.3569,
1333
+ "step": 945
1334
+ },
1335
+ {
1336
+ "epoch": 0.8300567933595456,
1337
+ "grad_norm": 1.4957417249679565,
1338
+ "learning_rate": 0.00018697120652468762,
1339
+ "loss": 0.3085,
1340
+ "step": 950
1341
+ },
1342
+ {
1343
+ "epoch": 0.8344255133245959,
1344
+ "grad_norm": 2.076399803161621,
1345
+ "learning_rate": 0.0001868352209017275,
1346
+ "loss": 0.3331,
1347
+ "step": 955
1348
+ },
1349
+ {
1350
+ "epoch": 0.8387942332896461,
1351
+ "grad_norm": 1.1817855834960938,
1352
+ "learning_rate": 0.00018669857928014906,
1353
+ "loss": 0.3414,
1354
+ "step": 960
1355
+ },
1356
+ {
1357
+ "epoch": 0.8431629532546964,
1358
+ "grad_norm": 1.4255414009094238,
1359
+ "learning_rate": 0.00018656128269221454,
1360
+ "loss": 0.2782,
1361
+ "step": 965
1362
+ },
1363
+ {
1364
+ "epoch": 0.8475316732197467,
1365
+ "grad_norm": 1.326687216758728,
1366
+ "learning_rate": 0.0001864233321751341,
1367
+ "loss": 0.2998,
1368
+ "step": 970
1369
+ },
1370
+ {
1371
+ "epoch": 0.8519003931847968,
1372
+ "grad_norm": 2.222280263900757,
1373
+ "learning_rate": 0.00018628472877105793,
1374
+ "loss": 0.3348,
1375
+ "step": 975
1376
+ },
1377
+ {
1378
+ "epoch": 0.8562691131498471,
1379
+ "grad_norm": 1.518401026725769,
1380
+ "learning_rate": 0.00018614547352706863,
1381
+ "loss": 0.3816,
1382
+ "step": 980
1383
+ },
1384
+ {
1385
+ "epoch": 0.8606378331148974,
1386
+ "grad_norm": 1.1030207872390747,
1387
+ "learning_rate": 0.00018600556749517305,
1388
+ "loss": 0.3222,
1389
+ "step": 985
1390
+ },
1391
+ {
1392
+ "epoch": 0.8650065530799476,
1393
+ "grad_norm": 2.406994104385376,
1394
+ "learning_rate": 0.00018586501173229437,
1395
+ "loss": 0.3754,
1396
+ "step": 990
1397
+ },
1398
+ {
1399
+ "epoch": 0.8693752730449978,
1400
+ "grad_norm": 1.2401646375656128,
1401
+ "learning_rate": 0.00018572380730026434,
1402
+ "loss": 0.4402,
1403
+ "step": 995
1404
+ },
1405
+ {
1406
+ "epoch": 0.873743993010048,
1407
+ "grad_norm": 2.0233402252197266,
1408
+ "learning_rate": 0.0001855819552658149,
1409
+ "loss": 0.3323,
1410
+ "step": 1000
1411
+ },
1412
+ {
1413
+ "epoch": 0.8781127129750983,
1414
+ "grad_norm": 1.5329450368881226,
1415
+ "learning_rate": 0.00018543945670057045,
1416
+ "loss": 0.235,
1417
+ "step": 1005
1418
+ },
1419
+ {
1420
+ "epoch": 0.8824814329401486,
1421
+ "grad_norm": 1.8849459886550903,
1422
+ "learning_rate": 0.00018529631268103964,
1423
+ "loss": 0.357,
1424
+ "step": 1010
1425
+ },
1426
+ {
1427
+ "epoch": 0.8868501529051988,
1428
+ "grad_norm": 2.016646146774292,
1429
+ "learning_rate": 0.0001851525242886071,
1430
+ "loss": 0.2663,
1431
+ "step": 1015
1432
+ },
1433
+ {
1434
+ "epoch": 0.891218872870249,
1435
+ "grad_norm": 2.3272440433502197,
1436
+ "learning_rate": 0.0001850080926095255,
1437
+ "loss": 0.2926,
1438
+ "step": 1020
1439
+ },
1440
+ {
1441
+ "epoch": 0.8955875928352992,
1442
+ "grad_norm": 1.7760261297225952,
1443
+ "learning_rate": 0.00018486301873490713,
1444
+ "loss": 0.4155,
1445
+ "step": 1025
1446
+ },
1447
+ {
1448
+ "epoch": 0.8999563128003495,
1449
+ "grad_norm": 1.4679979085922241,
1450
+ "learning_rate": 0.0001847173037607159,
1451
+ "loss": 0.2877,
1452
+ "step": 1030
1453
+ },
1454
+ {
1455
+ "epoch": 0.9043250327653998,
1456
+ "grad_norm": 1.8398054838180542,
1457
+ "learning_rate": 0.0001845709487877588,
1458
+ "loss": 0.2856,
1459
+ "step": 1035
1460
+ },
1461
+ {
1462
+ "epoch": 0.9086937527304499,
1463
+ "grad_norm": 3.05880069732666,
1464
+ "learning_rate": 0.00018442395492167775,
1465
+ "loss": 0.3373,
1466
+ "step": 1040
1467
+ },
1468
+ {
1469
+ "epoch": 0.9130624726955002,
1470
+ "grad_norm": 1.2527328729629517,
1471
+ "learning_rate": 0.0001842763232729412,
1472
+ "loss": 0.2412,
1473
+ "step": 1045
1474
+ },
1475
+ {
1476
+ "epoch": 0.9174311926605505,
1477
+ "grad_norm": 1.7745814323425293,
1478
+ "learning_rate": 0.00018412805495683575,
1479
+ "loss": 0.3955,
1480
+ "step": 1050
1481
+ },
1482
+ {
1483
+ "epoch": 0.9217999126256007,
1484
+ "grad_norm": 3.2864468097686768,
1485
+ "learning_rate": 0.0001839791510934577,
1486
+ "loss": 0.333,
1487
+ "step": 1055
1488
+ },
1489
+ {
1490
+ "epoch": 0.926168632590651,
1491
+ "grad_norm": 2.0274927616119385,
1492
+ "learning_rate": 0.0001838296128077046,
1493
+ "loss": 0.4004,
1494
+ "step": 1060
1495
+ },
1496
+ {
1497
+ "epoch": 0.9305373525557011,
1498
+ "grad_norm": 1.9851633310317993,
1499
+ "learning_rate": 0.0001836794412292668,
1500
+ "loss": 0.3132,
1501
+ "step": 1065
1502
+ },
1503
+ {
1504
+ "epoch": 0.9349060725207514,
1505
+ "grad_norm": 1.3309999704360962,
1506
+ "learning_rate": 0.00018352863749261883,
1507
+ "loss": 0.2645,
1508
+ "step": 1070
1509
+ },
1510
+ {
1511
+ "epoch": 0.9392747924858017,
1512
+ "grad_norm": 2.0173072814941406,
1513
+ "learning_rate": 0.00018337720273701088,
1514
+ "loss": 0.4376,
1515
+ "step": 1075
1516
+ },
1517
+ {
1518
+ "epoch": 0.9436435124508519,
1519
+ "grad_norm": 1.815408706665039,
1520
+ "learning_rate": 0.00018322513810646024,
1521
+ "loss": 0.2851,
1522
+ "step": 1080
1523
+ },
1524
+ {
1525
+ "epoch": 0.9480122324159022,
1526
+ "grad_norm": 1.1190584897994995,
1527
+ "learning_rate": 0.00018307244474974254,
1528
+ "loss": 0.4664,
1529
+ "step": 1085
1530
+ },
1531
+ {
1532
+ "epoch": 0.9523809523809523,
1533
+ "grad_norm": 0.9746566414833069,
1534
+ "learning_rate": 0.00018291912382038317,
1535
+ "loss": 0.3816,
1536
+ "step": 1090
1537
+ },
1538
+ {
1539
+ "epoch": 0.9567496723460026,
1540
+ "grad_norm": 1.9062715768814087,
1541
+ "learning_rate": 0.0001827651764766485,
1542
+ "loss": 0.3031,
1543
+ "step": 1095
1544
+ },
1545
+ {
1546
+ "epoch": 0.9611183923110529,
1547
+ "grad_norm": 1.027502417564392,
1548
+ "learning_rate": 0.00018261060388153718,
1549
+ "loss": 0.2657,
1550
+ "step": 1100
1551
+ }
1552
+ ],
1553
+ "logging_steps": 5,
1554
+ "max_steps": 5725,
1555
+ "num_input_tokens_seen": 0,
1556
+ "num_train_epochs": 5,
1557
+ "save_steps": 100,
1558
+ "stateful_callbacks": {
1559
+ "TrainerControl": {
1560
+ "args": {
1561
+ "should_epoch_stop": false,
1562
+ "should_evaluate": false,
1563
+ "should_log": false,
1564
+ "should_save": true,
1565
+ "should_training_stop": false
1566
+ },
1567
+ "attributes": {}
1568
+ }
1569
+ },
1570
+ "total_flos": 534084911751168.0,
1571
+ "train_batch_size": 4,
1572
+ "trial_name": null,
1573
+ "trial_params": null
1574
+ }
checkpoint-1100/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:629a35960f565450f5c9e65334fb9c14eb136182b8af4bc22c885bdde32de5f3
3
+ size 5777
checkpoint-1200/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: unsloth/functiongemma-270m-it
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:unsloth/functiongemma-270m-it
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.17.1
checkpoint-1200/adapter_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "unsloth/functiongemma-270m-it",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 128,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.05,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "qalora_group_size": 16,
24
+ "r": 64,
25
+ "rank_pattern": {},
26
+ "revision": null,
27
+ "target_modules": [
28
+ "gate_proj",
29
+ "down_proj",
30
+ "o_proj",
31
+ "k_proj",
32
+ "q_proj",
33
+ "v_proj",
34
+ "up_proj"
35
+ ],
36
+ "target_parameters": null,
37
+ "task_type": "CAUSAL_LM",
38
+ "trainable_token_indices": null,
39
+ "use_dora": false,
40
+ "use_qalora": false,
41
+ "use_rslora": false
42
+ }