Datasets:

Languages:
English
Size:
n>1T
ArXiv:
DOI:
License:
guipenedo HF staff anton-l HF staff loubnabnl HF staff thomwolf HF staff commited on
Commit
3a9b4e7
0 Parent(s):

Super-squash branch 'main' using huggingface_hub

Browse files

Co-authored-by: anton-l <[email protected]>
Co-authored-by: loubnabnl <[email protected]>
Co-authored-by: thomwolf <[email protected]>

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +55 -0
  2. README.md +602 -0
  3. data/CC-MAIN-2013-20/train-00000-of-00014.parquet +3 -0
  4. data/CC-MAIN-2013-20/train-00001-of-00014.parquet +3 -0
  5. data/CC-MAIN-2013-20/train-00002-of-00014.parquet +3 -0
  6. data/CC-MAIN-2013-20/train-00003-of-00014.parquet +3 -0
  7. data/CC-MAIN-2013-20/train-00004-of-00014.parquet +3 -0
  8. data/CC-MAIN-2013-20/train-00005-of-00014.parquet +3 -0
  9. data/CC-MAIN-2013-20/train-00006-of-00014.parquet +3 -0
  10. data/CC-MAIN-2013-20/train-00007-of-00014.parquet +3 -0
  11. data/CC-MAIN-2013-20/train-00008-of-00014.parquet +3 -0
  12. data/CC-MAIN-2013-20/train-00009-of-00014.parquet +3 -0
  13. data/CC-MAIN-2013-20/train-00010-of-00014.parquet +3 -0
  14. data/CC-MAIN-2013-20/train-00011-of-00014.parquet +3 -0
  15. data/CC-MAIN-2013-20/train-00012-of-00014.parquet +3 -0
  16. data/CC-MAIN-2013-20/train-00013-of-00014.parquet +3 -0
  17. data/CC-MAIN-2013-48/train-00000-of-00014.parquet +3 -0
  18. data/CC-MAIN-2013-48/train-00001-of-00014.parquet +3 -0
  19. data/CC-MAIN-2013-48/train-00002-of-00014.parquet +3 -0
  20. data/CC-MAIN-2013-48/train-00003-of-00014.parquet +3 -0
  21. data/CC-MAIN-2013-48/train-00004-of-00014.parquet +3 -0
  22. data/CC-MAIN-2013-48/train-00005-of-00014.parquet +3 -0
  23. data/CC-MAIN-2013-48/train-00006-of-00014.parquet +3 -0
  24. data/CC-MAIN-2013-48/train-00007-of-00014.parquet +3 -0
  25. data/CC-MAIN-2013-48/train-00008-of-00014.parquet +3 -0
  26. data/CC-MAIN-2013-48/train-00009-of-00014.parquet +3 -0
  27. data/CC-MAIN-2013-48/train-00010-of-00014.parquet +3 -0
  28. data/CC-MAIN-2013-48/train-00011-of-00014.parquet +3 -0
  29. data/CC-MAIN-2013-48/train-00012-of-00014.parquet +3 -0
  30. data/CC-MAIN-2013-48/train-00013-of-00014.parquet +3 -0
  31. data/CC-MAIN-2014-10/train-00000-of-00014.parquet +3 -0
  32. data/CC-MAIN-2014-10/train-00001-of-00014.parquet +3 -0
  33. data/CC-MAIN-2014-10/train-00002-of-00014.parquet +3 -0
  34. data/CC-MAIN-2014-10/train-00003-of-00014.parquet +3 -0
  35. data/CC-MAIN-2014-10/train-00004-of-00014.parquet +3 -0
  36. data/CC-MAIN-2014-10/train-00005-of-00014.parquet +3 -0
  37. data/CC-MAIN-2014-10/train-00006-of-00014.parquet +3 -0
  38. data/CC-MAIN-2014-10/train-00007-of-00014.parquet +3 -0
  39. data/CC-MAIN-2014-10/train-00008-of-00014.parquet +3 -0
  40. data/CC-MAIN-2014-10/train-00009-of-00014.parquet +3 -0
  41. data/CC-MAIN-2014-10/train-00010-of-00014.parquet +3 -0
  42. data/CC-MAIN-2014-10/train-00011-of-00014.parquet +3 -0
  43. data/CC-MAIN-2014-10/train-00012-of-00014.parquet +3 -0
  44. data/CC-MAIN-2014-10/train-00013-of-00014.parquet +3 -0
  45. data/CC-MAIN-2014-15/train-00000-of-00014.parquet +3 -0
  46. data/CC-MAIN-2014-15/train-00001-of-00014.parquet +3 -0
  47. data/CC-MAIN-2014-15/train-00002-of-00014.parquet +3 -0
  48. data/CC-MAIN-2014-15/train-00003-of-00014.parquet +3 -0
  49. data/CC-MAIN-2014-15/train-00004-of-00014.parquet +3 -0
  50. data/CC-MAIN-2014-15/train-00005-of-00014.parquet +3 -0
.gitattributes ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar filter=lfs diff=lfs merge=lfs -text
30
+ *.tflite filter=lfs diff=lfs merge=lfs -text
31
+ *.tgz filter=lfs diff=lfs merge=lfs -text
32
+ *.wasm filter=lfs diff=lfs merge=lfs -text
33
+ *.xz filter=lfs diff=lfs merge=lfs -text
34
+ *.zip filter=lfs diff=lfs merge=lfs -text
35
+ *.zst filter=lfs diff=lfs merge=lfs -text
36
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
37
+ # Audio files - uncompressed
38
+ *.pcm filter=lfs diff=lfs merge=lfs -text
39
+ *.sam filter=lfs diff=lfs merge=lfs -text
40
+ *.raw filter=lfs diff=lfs merge=lfs -text
41
+ # Audio files - compressed
42
+ *.aac filter=lfs diff=lfs merge=lfs -text
43
+ *.flac filter=lfs diff=lfs merge=lfs -text
44
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
45
+ *.ogg filter=lfs diff=lfs merge=lfs -text
46
+ *.wav filter=lfs diff=lfs merge=lfs -text
47
+ # Image files - uncompressed
48
+ *.bmp filter=lfs diff=lfs merge=lfs -text
49
+ *.gif filter=lfs diff=lfs merge=lfs -text
50
+ *.png filter=lfs diff=lfs merge=lfs -text
51
+ *.tiff filter=lfs diff=lfs merge=lfs -text
52
+ # Image files - compressed
53
+ *.jpg filter=lfs diff=lfs merge=lfs -text
54
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
55
+ *.webp filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,602 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: odc-by
3
+ task_categories:
4
+ - text-generation
5
+ language:
6
+ - en
7
+ pretty_name: FineWeb-Edu
8
+ size_categories:
9
+ - n>1T
10
+ configs:
11
+ - config_name: default
12
+ data_files:
13
+ - split: train
14
+ path: data/*/*
15
+ - config_name: sample-10BT
16
+ data_files:
17
+ - split: train
18
+ path: sample/10BT/*
19
+ - config_name: sample-100BT
20
+ data_files:
21
+ - split: train
22
+ path: sample/100BT/*
23
+ - config_name: sample-350BT
24
+ data_files:
25
+ - split: train
26
+ path: sample/350BT/*
27
+ - config_name: CC-MAIN-2024-51
28
+ data_files:
29
+ - split: train
30
+ path: data/CC-MAIN-2024-51/*
31
+ - config_name: CC-MAIN-2024-46
32
+ data_files:
33
+ - split: train
34
+ path: data/CC-MAIN-2024-46/*
35
+ - config_name: CC-MAIN-2024-42
36
+ data_files:
37
+ - split: train
38
+ path: data/CC-MAIN-2024-42/*
39
+ - config_name: CC-MAIN-2024-38
40
+ data_files:
41
+ - split: train
42
+ path: data/CC-MAIN-2024-38/*
43
+ - config_name: CC-MAIN-2024-33
44
+ data_files:
45
+ - split: train
46
+ path: data/CC-MAIN-2024-33/*
47
+ - config_name: CC-MAIN-2024-30
48
+ data_files:
49
+ - split: train
50
+ path: data/CC-MAIN-2024-30/*
51
+ - config_name: CC-MAIN-2024-26
52
+ data_files:
53
+ - split: train
54
+ path: data/CC-MAIN-2024-26/*
55
+ - config_name: CC-MAIN-2024-22
56
+ data_files:
57
+ - split: train
58
+ path: data/CC-MAIN-2024-22/*
59
+ - config_name: CC-MAIN-2024-18
60
+ data_files:
61
+ - split: train
62
+ path: data/CC-MAIN-2024-18/*
63
+ - config_name: CC-MAIN-2024-10
64
+ data_files:
65
+ - split: train
66
+ path: data/CC-MAIN-2024-10/*
67
+ - config_name: CC-MAIN-2023-50
68
+ data_files:
69
+ - split: train
70
+ path: data/CC-MAIN-2023-50/*
71
+ - config_name: CC-MAIN-2023-40
72
+ data_files:
73
+ - split: train
74
+ path: data/CC-MAIN-2023-40/*
75
+ - config_name: CC-MAIN-2023-23
76
+ data_files:
77
+ - split: train
78
+ path: data/CC-MAIN-2023-23/*
79
+ - config_name: CC-MAIN-2023-14
80
+ data_files:
81
+ - split: train
82
+ path: data/CC-MAIN-2023-14/*
83
+ - config_name: CC-MAIN-2023-06
84
+ data_files:
85
+ - split: train
86
+ path: data/CC-MAIN-2023-06/*
87
+ - config_name: CC-MAIN-2022-49
88
+ data_files:
89
+ - split: train
90
+ path: data/CC-MAIN-2022-49/*
91
+ - config_name: CC-MAIN-2022-40
92
+ data_files:
93
+ - split: train
94
+ path: data/CC-MAIN-2022-40/*
95
+ - config_name: CC-MAIN-2022-33
96
+ data_files:
97
+ - split: train
98
+ path: data/CC-MAIN-2022-33/*
99
+ - config_name: CC-MAIN-2022-27
100
+ data_files:
101
+ - split: train
102
+ path: data/CC-MAIN-2022-27/*
103
+ - config_name: CC-MAIN-2022-21
104
+ data_files:
105
+ - split: train
106
+ path: data/CC-MAIN-2022-21/*
107
+ - config_name: CC-MAIN-2022-05
108
+ data_files:
109
+ - split: train
110
+ path: data/CC-MAIN-2022-05/*
111
+ - config_name: CC-MAIN-2021-49
112
+ data_files:
113
+ - split: train
114
+ path: data/CC-MAIN-2021-49/*
115
+ - config_name: CC-MAIN-2021-43
116
+ data_files:
117
+ - split: train
118
+ path: data/CC-MAIN-2021-43/*
119
+ - config_name: CC-MAIN-2021-39
120
+ data_files:
121
+ - split: train
122
+ path: data/CC-MAIN-2021-39/*
123
+ - config_name: CC-MAIN-2021-31
124
+ data_files:
125
+ - split: train
126
+ path: data/CC-MAIN-2021-31/*
127
+ - config_name: CC-MAIN-2021-25
128
+ data_files:
129
+ - split: train
130
+ path: data/CC-MAIN-2021-25/*
131
+ - config_name: CC-MAIN-2021-21
132
+ data_files:
133
+ - split: train
134
+ path: data/CC-MAIN-2021-21/*
135
+ - config_name: CC-MAIN-2021-17
136
+ data_files:
137
+ - split: train
138
+ path: data/CC-MAIN-2021-17/*
139
+ - config_name: CC-MAIN-2021-10
140
+ data_files:
141
+ - split: train
142
+ path: data/CC-MAIN-2021-10/*
143
+ - config_name: CC-MAIN-2021-04
144
+ data_files:
145
+ - split: train
146
+ path: data/CC-MAIN-2021-04/*
147
+ - config_name: CC-MAIN-2020-50
148
+ data_files:
149
+ - split: train
150
+ path: data/CC-MAIN-2020-50/*
151
+ - config_name: CC-MAIN-2020-45
152
+ data_files:
153
+ - split: train
154
+ path: data/CC-MAIN-2020-45/*
155
+ - config_name: CC-MAIN-2020-40
156
+ data_files:
157
+ - split: train
158
+ path: data/CC-MAIN-2020-40/*
159
+ - config_name: CC-MAIN-2020-34
160
+ data_files:
161
+ - split: train
162
+ path: data/CC-MAIN-2020-34/*
163
+ - config_name: CC-MAIN-2020-29
164
+ data_files:
165
+ - split: train
166
+ path: data/CC-MAIN-2020-29/*
167
+ - config_name: CC-MAIN-2020-24
168
+ data_files:
169
+ - split: train
170
+ path: data/CC-MAIN-2020-24/*
171
+ - config_name: CC-MAIN-2020-16
172
+ data_files:
173
+ - split: train
174
+ path: data/CC-MAIN-2020-16/*
175
+ - config_name: CC-MAIN-2020-10
176
+ data_files:
177
+ - split: train
178
+ path: data/CC-MAIN-2020-10/*
179
+ - config_name: CC-MAIN-2020-05
180
+ data_files:
181
+ - split: train
182
+ path: data/CC-MAIN-2020-05/*
183
+ - config_name: CC-MAIN-2019-51
184
+ data_files:
185
+ - split: train
186
+ path: data/CC-MAIN-2019-51/*
187
+ - config_name: CC-MAIN-2019-47
188
+ data_files:
189
+ - split: train
190
+ path: data/CC-MAIN-2019-47/*
191
+ - config_name: CC-MAIN-2019-43
192
+ data_files:
193
+ - split: train
194
+ path: data/CC-MAIN-2019-43/*
195
+ - config_name: CC-MAIN-2019-39
196
+ data_files:
197
+ - split: train
198
+ path: data/CC-MAIN-2019-39/*
199
+ - config_name: CC-MAIN-2019-35
200
+ data_files:
201
+ - split: train
202
+ path: data/CC-MAIN-2019-35/*
203
+ - config_name: CC-MAIN-2019-30
204
+ data_files:
205
+ - split: train
206
+ path: data/CC-MAIN-2019-30/*
207
+ - config_name: CC-MAIN-2019-26
208
+ data_files:
209
+ - split: train
210
+ path: data/CC-MAIN-2019-26/*
211
+ - config_name: CC-MAIN-2019-22
212
+ data_files:
213
+ - split: train
214
+ path: data/CC-MAIN-2019-22/*
215
+ - config_name: CC-MAIN-2019-18
216
+ data_files:
217
+ - split: train
218
+ path: data/CC-MAIN-2019-18/*
219
+ - config_name: CC-MAIN-2019-13
220
+ data_files:
221
+ - split: train
222
+ path: data/CC-MAIN-2019-13/*
223
+ - config_name: CC-MAIN-2019-09
224
+ data_files:
225
+ - split: train
226
+ path: data/CC-MAIN-2019-09/*
227
+ - config_name: CC-MAIN-2019-04
228
+ data_files:
229
+ - split: train
230
+ path: data/CC-MAIN-2019-04/*
231
+ - config_name: CC-MAIN-2018-51
232
+ data_files:
233
+ - split: train
234
+ path: data/CC-MAIN-2018-51/*
235
+ - config_name: CC-MAIN-2018-47
236
+ data_files:
237
+ - split: train
238
+ path: data/CC-MAIN-2018-47/*
239
+ - config_name: CC-MAIN-2018-43
240
+ data_files:
241
+ - split: train
242
+ path: data/CC-MAIN-2018-43/*
243
+ - config_name: CC-MAIN-2018-39
244
+ data_files:
245
+ - split: train
246
+ path: data/CC-MAIN-2018-39/*
247
+ - config_name: CC-MAIN-2018-34
248
+ data_files:
249
+ - split: train
250
+ path: data/CC-MAIN-2018-34/*
251
+ - config_name: CC-MAIN-2018-30
252
+ data_files:
253
+ - split: train
254
+ path: data/CC-MAIN-2018-30/*
255
+ - config_name: CC-MAIN-2018-26
256
+ data_files:
257
+ - split: train
258
+ path: data/CC-MAIN-2018-26/*
259
+ - config_name: CC-MAIN-2018-22
260
+ data_files:
261
+ - split: train
262
+ path: data/CC-MAIN-2018-22/*
263
+ - config_name: CC-MAIN-2018-17
264
+ data_files:
265
+ - split: train
266
+ path: data/CC-MAIN-2018-17/*
267
+ - config_name: CC-MAIN-2018-13
268
+ data_files:
269
+ - split: train
270
+ path: data/CC-MAIN-2018-13/*
271
+ - config_name: CC-MAIN-2018-09
272
+ data_files:
273
+ - split: train
274
+ path: data/CC-MAIN-2018-09/*
275
+ - config_name: CC-MAIN-2018-05
276
+ data_files:
277
+ - split: train
278
+ path: data/CC-MAIN-2018-05/*
279
+ - config_name: CC-MAIN-2017-51
280
+ data_files:
281
+ - split: train
282
+ path: data/CC-MAIN-2017-51/*
283
+ - config_name: CC-MAIN-2017-47
284
+ data_files:
285
+ - split: train
286
+ path: data/CC-MAIN-2017-47/*
287
+ - config_name: CC-MAIN-2017-43
288
+ data_files:
289
+ - split: train
290
+ path: data/CC-MAIN-2017-43/*
291
+ - config_name: CC-MAIN-2017-39
292
+ data_files:
293
+ - split: train
294
+ path: data/CC-MAIN-2017-39/*
295
+ - config_name: CC-MAIN-2017-34
296
+ data_files:
297
+ - split: train
298
+ path: data/CC-MAIN-2017-34/*
299
+ - config_name: CC-MAIN-2017-30
300
+ data_files:
301
+ - split: train
302
+ path: data/CC-MAIN-2017-30/*
303
+ - config_name: CC-MAIN-2017-26
304
+ data_files:
305
+ - split: train
306
+ path: data/CC-MAIN-2017-26/*
307
+ - config_name: CC-MAIN-2017-22
308
+ data_files:
309
+ - split: train
310
+ path: data/CC-MAIN-2017-22/*
311
+ - config_name: CC-MAIN-2017-17
312
+ data_files:
313
+ - split: train
314
+ path: data/CC-MAIN-2017-17/*
315
+ - config_name: CC-MAIN-2017-13
316
+ data_files:
317
+ - split: train
318
+ path: data/CC-MAIN-2017-13/*
319
+ - config_name: CC-MAIN-2017-09
320
+ data_files:
321
+ - split: train
322
+ path: data/CC-MAIN-2017-09/*
323
+ - config_name: CC-MAIN-2017-04
324
+ data_files:
325
+ - split: train
326
+ path: data/CC-MAIN-2017-04/*
327
+ - config_name: CC-MAIN-2016-50
328
+ data_files:
329
+ - split: train
330
+ path: data/CC-MAIN-2016-50/*
331
+ - config_name: CC-MAIN-2016-44
332
+ data_files:
333
+ - split: train
334
+ path: data/CC-MAIN-2016-44/*
335
+ - config_name: CC-MAIN-2016-40
336
+ data_files:
337
+ - split: train
338
+ path: data/CC-MAIN-2016-40/*
339
+ - config_name: CC-MAIN-2016-36
340
+ data_files:
341
+ - split: train
342
+ path: data/CC-MAIN-2016-36/*
343
+ - config_name: CC-MAIN-2016-30
344
+ data_files:
345
+ - split: train
346
+ path: data/CC-MAIN-2016-30/*
347
+ - config_name: CC-MAIN-2016-26
348
+ data_files:
349
+ - split: train
350
+ path: data/CC-MAIN-2016-26/*
351
+ - config_name: CC-MAIN-2016-22
352
+ data_files:
353
+ - split: train
354
+ path: data/CC-MAIN-2016-22/*
355
+ - config_name: CC-MAIN-2016-18
356
+ data_files:
357
+ - split: train
358
+ path: data/CC-MAIN-2016-18/*
359
+ - config_name: CC-MAIN-2016-07
360
+ data_files:
361
+ - split: train
362
+ path: data/CC-MAIN-2016-07/*
363
+ - config_name: CC-MAIN-2015-48
364
+ data_files:
365
+ - split: train
366
+ path: data/CC-MAIN-2015-48/*
367
+ - config_name: CC-MAIN-2015-40
368
+ data_files:
369
+ - split: train
370
+ path: data/CC-MAIN-2015-40/*
371
+ - config_name: CC-MAIN-2015-35
372
+ data_files:
373
+ - split: train
374
+ path: data/CC-MAIN-2015-35/*
375
+ - config_name: CC-MAIN-2015-32
376
+ data_files:
377
+ - split: train
378
+ path: data/CC-MAIN-2015-32/*
379
+ - config_name: CC-MAIN-2015-27
380
+ data_files:
381
+ - split: train
382
+ path: data/CC-MAIN-2015-27/*
383
+ - config_name: CC-MAIN-2015-22
384
+ data_files:
385
+ - split: train
386
+ path: data/CC-MAIN-2015-22/*
387
+ - config_name: CC-MAIN-2015-18
388
+ data_files:
389
+ - split: train
390
+ path: data/CC-MAIN-2015-18/*
391
+ - config_name: CC-MAIN-2015-14
392
+ data_files:
393
+ - split: train
394
+ path: data/CC-MAIN-2015-14/*
395
+ - config_name: CC-MAIN-2015-11
396
+ data_files:
397
+ - split: train
398
+ path: data/CC-MAIN-2015-11/*
399
+ - config_name: CC-MAIN-2015-06
400
+ data_files:
401
+ - split: train
402
+ path: data/CC-MAIN-2015-06/*
403
+ - config_name: CC-MAIN-2014-52
404
+ data_files:
405
+ - split: train
406
+ path: data/CC-MAIN-2014-52/*
407
+ - config_name: CC-MAIN-2014-49
408
+ data_files:
409
+ - split: train
410
+ path: data/CC-MAIN-2014-49/*
411
+ - config_name: CC-MAIN-2014-42
412
+ data_files:
413
+ - split: train
414
+ path: data/CC-MAIN-2014-42/*
415
+ - config_name: CC-MAIN-2014-41
416
+ data_files:
417
+ - split: train
418
+ path: data/CC-MAIN-2014-41/*
419
+ - config_name: CC-MAIN-2014-35
420
+ data_files:
421
+ - split: train
422
+ path: data/CC-MAIN-2014-35/*
423
+ - config_name: CC-MAIN-2014-23
424
+ data_files:
425
+ - split: train
426
+ path: data/CC-MAIN-2014-23/*
427
+ - config_name: CC-MAIN-2014-15
428
+ data_files:
429
+ - split: train
430
+ path: data/CC-MAIN-2014-15/*
431
+ - config_name: CC-MAIN-2014-10
432
+ data_files:
433
+ - split: train
434
+ path: data/CC-MAIN-2014-10/*
435
+ - config_name: CC-MAIN-2013-48
436
+ data_files:
437
+ - split: train
438
+ path: data/CC-MAIN-2013-48/*
439
+ - config_name: CC-MAIN-2013-20
440
+ data_files:
441
+ - split: train
442
+ path: data/CC-MAIN-2013-20/*
443
+ ---
444
+
445
+ # 📚 FineWeb-Edu
446
+ <center>
447
+ <img src="https://cdn-uploads.huggingface.co/production/uploads/61c141342aac764ce1654e43/wwRnEQydH9qdRtFofIE-A.png" alt="FineWeb-Edu: The finest collection of educational content the web has to offer">
448
+ </center>
449
+
450
+ > 1.3 trillion tokens of the finest educational data the 🌐 web has to offer
451
+
452
+ **Paper:** https://arxiv.org/abs/2406.17557
453
+
454
+ ## What is it?
455
+
456
+ 📚 FineWeb-Edu dataset consists of **1.3T tokens** and **5.4T tokens** ([FineWeb-Edu-score-2](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu-score-2)) of educational web pages filtered from 🍷 FineWeb dataset. This is the 1.3 trillion version.
457
+
458
+ To enhance FineWeb's quality, we developed an [educational quality classifier](https://huggingface.co/HuggingFaceFW/fineweb-edu-classifier) using annotations generated by LLama3-70B-Instruct. We then used this classifier to retain only the most educational web pages. FineWeb-Edu outperforms FineWeb on popular benchmarks and shows the power of classifiers trained on synthetic data.
459
+
460
+ The [Dataset Curation](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu#dataset-curation) section details the process for creating the dataset.
461
+
462
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/61c141342aac764ce1654e43/QqXOM8h_ZjjhuCv71xmV7.png)
463
+
464
+ You can find a deduplicated version of FineWeb-edu in [SmolLM-Corpus](https://huggingface.co/datasets/HuggingFaceTB/smollm-corpus). We find that the deduplication of this dataset doesn't have any impact on model performance in our ablation setup (1.8B trained on 350B tokens).
465
+
466
+ ## What is being released?
467
+
468
+ Along with the dataset, which includes all filtered CommonCrawl dumps since 2013, we also release the educational classifier used for the filtering as well as the code for training it and running inference at: https://github.com/huggingface/cosmopedia/tree/main/classification
469
+
470
+ ## Changelog
471
+ _Previous versions remain available in the branch `version name`._
472
+
473
+ - **v1.2.0 (03-01-2025):** Added 9 new snapshots: `CC-MAIN-2024-18`, `CC-MAIN-2024-22`, `CC-MAIN-2024-26`, `CC-MAIN-2024-30`, `CC-MAIN-2024-33`, `CC-MAIN-2024-38`, `CC-MAIN-2024-42`, `CC-MAIN-2024-46`, `CC-MAIN-2024-51`, covering April to December 2024.
474
+ - **v1.0.0 (02-06-2024):** Initial version
475
+
476
+
477
+ ## How to load the dataset
478
+ Similarily to FineWeb, You can load the full dataset or a specific crawl/dump. Dumps have the format `CC-MAIN-(year)-(week number)`.
479
+
480
+ ### (Smaller) sample versions
481
+ Along with config `default` (all the data), and the configs for each individual dump, you can also download the following configs:
482
+ - `sample-350BT`: a subset randomly sampled from the whole dataset of around 350B gpt2 tokens
483
+ - `sample-100BT`: a subset randomly sampled from the whole dataset of around 100B gpt2 tokens
484
+ - `sample-10BT`: a subset randomly sampled from the whole dataset of around 10B gpt2 tokens
485
+
486
+ `sample-10BT` was sampled from `sample-100BT` which in turn was sampled from `sample-350BT`.
487
+
488
+ ### Using 🏭 [`datatrove`](https://github.com/huggingface/datatrove/)
489
+
490
+ ```python
491
+ from datatrove.pipeline.readers import ParquetReader
492
+
493
+ # limit determines how many documents will be streamed (remove for all)
494
+ data_reader = ParquetReader("hf://datasets/HuggingFaceFW/fineweb-edu", glob_pattern="data/*/*.parquet", limit=1000)
495
+ # or to fetch a specific dump CC-MAIN-2024-10, eplace "CC-MAIN-2024-10" with "sample/100BT" to use the 100BT sample
496
+ data_reader = ParquetReader("hf://datasets/HuggingFaceFW/fineweb-edu/CC-MAIN-2024-10", limit=1000)
497
+ for document in data_reader():
498
+ # do something with document
499
+ print(document)
500
+
501
+ ###############################
502
+ # OR for a processing pipeline:
503
+ ###############################
504
+
505
+ from datatrove.executor import LocalPipelineExecutor
506
+ from datatrove.pipeline.readers import ParquetReader
507
+ from datatrove.pipeline.filters import LambdaFilter
508
+ from datatrove.pipeline.writers import JsonlWriter
509
+
510
+ pipeline_exec = LocalPipelineExecutor(
511
+ pipeline=[
512
+ # replace "CC-MAIN-2024-10" with "sample/100BT" to use the 100BT sample
513
+ ParquetReader("hf://datasets/HuggingFaceFW/fineweb-edu/CC-MAIN-2024-10", limit=1000),
514
+ LambdaFilter(lambda doc: "hugging" in doc.text),
515
+ JsonlWriter("some-output-path")
516
+ ],
517
+ tasks=10
518
+ )
519
+ pipeline_exec.run()
520
+ ```
521
+
522
+ ### Using `datasets`
523
+
524
+ ```python
525
+ from datasets import load_dataset
526
+ # use name="sample-10BT" to use the 10BT sample
527
+ fw = load_dataset("HuggingFaceFW/fineweb-edu", name="CC-MAIN-2024-10", split="train", streaming=True)
528
+ ```
529
+
530
+ ## Dataset curation
531
+ A new approach has recently emerged for filtering LLM training datasets: using synthetic data to develop classifiers for identifying educational content. This technique was used in the trainings of [LLama3](https://ai.meta.com/blog/meta-llama-3-meta-ai-responsibility/) and [Phi3](https://arxiv.org/abs/2404.14219), but its large-scale impact on web data filtering hasn't been fully explored or published.
532
+
533
+ The highly popular Phi3 models were trained on 3.3 and 4.8 trillion tokens, with the paper stating: “Our training data consists of heavily filtered publicly available web data (according to the 'educational level') from various open internet sources, as well as synthetic LLM-generated data". Similarly, the LLama3 blog post notes: “We found that previous generations of Llama are good at identifying high-quality data, so we used Llama 2 to help build the text-quality classifiers that are powering Llama 3.” However these classifiers and filtered datasets are not publicly available. To enhance FineWeb's quality, we developed an educational quality classifier using annotations generated by [LLama3-70B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct) to create FineWeb-Edu.
534
+
535
+ ### Annotation
536
+ We used [Llama3-70B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct) to score 500k FineWeb samples for their educational quality on a scale from 0 to 5.
537
+
538
+ We explored various prompts and found that the additive scale by [Yuan et al.](https://arxiv.org/pdf/2401.10020) worked best. To avoid the LLM favoring highly technical pages like arXiv abstracts and submissions, we focused on grade-school and middle-school level knowledge. By setting a threshold of 3 (on a scale of 0 to 5) during the filtering process, we were able to also retain some high-level educational pages. The final prompt can be found [here](https://huggingface.co/HuggingFaceFW/fineweb-edu-classifier/blob/main/utils/prompt.txt).
539
+
540
+ We also experimented with different LLMs: Llama3-70B-Instruct, Mixtral-8x-7B-Instruct, and Mixtral-8x22B-Instruct. Llama 3 and Mixtral-8x22B produced similar scores, while Mixtral-8x7B tended to be more generous, not fully adhering to the score scale. Verga et al. suggest using multiple LLMs as juries. We tried averaging the scores from the three models, but this shifted the distribution to the right due to the higher scores from Mixtral-8x7B. Training on a dataset filtered with a classifier using jury annotations performed worse than using a classifier based on Llama3 annotations. We hypothesize that the jury-based approach retains more low-quality samples.
541
+
542
+ ### Classifier training
543
+ We fine-tuned a Bert-like regression model using these annotations, based on [Snowflake-arctic-embed](https://huggingface.co/Snowflake/snowflake-arctic-embed-m). When converted to a binary classification using a score of 3 as a threshold for keeping and removing files, the model achieved an F1 score of 82%. The classification of FineWeb 15T tokens took 6k H100 GPU hours.
544
+
545
+ The classifier is available at: [HuggingFaceFW/fineweb-edu-classifier/](https://huggingface.co/HuggingFaceFW/fineweb-edu-classifier/)
546
+
547
+ ### Filtering and results
548
+ **Note**: You can find more details about the ablations and results in the FineWeb [blog post](https://huggingface.co/spaces/HuggingFaceFW/blogpost-fineweb-v1).
549
+
550
+ We investigated the impact of using different thresholds for the filtering and found that threshold 3 gave the best overall results. Although using a threshold higher than 3 improves performance on knowledge and reasoning intensive benchmarks, it significantly degrades performance on HellaSwag and PIQA.
551
+
552
+ We then built 📚 FineWeb-Edu by filtering out samples with scores lower than 3. This removed 92% of the dataset, leaving us with 1.3T educational tokens. Our ablation demonstrated that this refined dataset surpasses 🍷 FineWeb and all other open web datasets, with remarkable improvements on educational benchmarks such as MMLU, ARC, and OpenBookQA. The plot below compares FineWeb-Edu to other web datasets:
553
+
554
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/61c141342aac764ce1654e43/hJlyTgDzZpYuxO9LUm0PF.png)
555
+
556
+ To retain more tokens, we also experimented with a less strict threshold of 2 instead of 3. While being less performant than using threshold 3, it still outperformed FineWeb and it preserved 5.4T tokens. We release these two dataset as [FineWeb-Edu](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu) and [FineWeb-Edu-score-2](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu-score-2) along with the [classifier](https://huggingface.co/HuggingFaceFW/fineweb-edu-classifier).
557
+
558
+ You will find all the ablation models in [this collection](https://huggingface.co/collections/HuggingFaceFW/ablation-models-662457b0d213e8c14fe47f32). The FineWeb-Edu ablation model (trained on 350B tokens) is available at [https://huggingface.co/HuggingFaceFW/ablation-model-fineweb-edu](https://huggingface.co/HuggingFaceFW/ablation-model-fineweb-edu).
559
+
560
+ ## Considerations for Using the Data
561
+ This section is copied from the parent dataset: [FineWeb](https://huggingface.co/datasets/HuggingFaceFW/fineweb).
562
+
563
+ ### Social Impact of Dataset
564
+
565
+ With the release of this dataset we aim to make model training more accessible to the machine learning community at large.
566
+
567
+ While multiple open-weights models with strong performance have been publicly released in the past, more often than not these releases are not accompanied by the corresponding training dataset. This is unfortunate as the dataset specificities and characteristics have been demonstrated to have a very large impact and role in the performances of the models. As the creation of a high quality training dataset is a fundamental requirement to training an LLM capable of excelling at downstream tasks, with 🍷 FineWeb we (a) not only make the dataset creation process more transparent, by sharing our entire processing setup including the codebase used, we also (b) help alleviate the costs of dataset curation, both in time and in compute, for model creators by publicly releasing our dataset with the community.
568
+
569
+ ### Discussion of Biases
570
+
571
+ Efforts were made to minimize the amount of NSFW and toxic content present in the dataset by employing filtering on the URL level. However, there are still a significant number of documents present in the final dataset that could be considered toxic or contain harmful content. As 🍷 FineWeb was sourced from the web as a whole, any harmful biases typically present in it may be reproduced on our dataset.
572
+
573
+ We deliberately avoided using machine learning filtering methods that define text quality based on the similarity to a “gold” source such as wikipedia or toxicity classifiers as these methods have been known to [disproportionately remove content in specific dialects](https://aclanthology.org/D16-1120/) and [overclassify as toxic text related to specific social identities](https://arxiv.org/pdf/2109.07445.pdf), respectively.
574
+
575
+ ### Other Known Limitations
576
+
577
+ As a consequence of some of the filtering steps applied, it is likely that code content is not prevalent in our dataset. If you are training a model that should also perform code tasks, we recommend you use 🍷 FineWeb with a code dataset, such as [The Stack v2](https://huggingface.co/datasets/bigcode/the-stack-v2). You should also probably consider complementing 🍷 FineWeb with specialized curated sources (such as Wikipedia, for example) as they will likely have better formatting than the wikipedia content included in 🍷 FineWeb (we did not tailor the processing to individual websites).
578
+
579
+ ## Additional Information
580
+
581
+ ### Licensing Information
582
+
583
+ The dataset is released under the **Open Data Commons Attribution License (ODC-By) v1.0** [license](https://opendatacommons.org/licenses/by/1-0/). The use of this dataset is also subject to [CommonCrawl's Terms of Use](https://commoncrawl.org/terms-of-use).
584
+
585
+ ### Future work
586
+
587
+ We plan to work on better educational classifier to improve the quality of FineWeb-Edu.
588
+
589
+ ### Citation Information
590
+
591
+ You can cite our paper https://arxiv.org/abs/2406.17557 or this dataset:
592
+
593
+ ```
594
+ @misc{lozhkov2024fineweb-edu,
595
+ author = { Lozhkov, Anton and Ben Allal, Loubna and von Werra, Leandro and Wolf, Thomas },
596
+ title = { FineWeb-Edu: the Finest Collection of Educational Content },
597
+ year = 2024,
598
+ url = { https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu },
599
+ doi = { 10.57967/hf/2497 },
600
+ publisher = { Hugging Face }
601
+ }
602
+ ```
data/CC-MAIN-2013-20/train-00000-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb989c566f6fba00ab61decc5f7aa1538a07d9b142e58a52ff790154528ffd03
3
+ size 2369456837
data/CC-MAIN-2013-20/train-00001-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90105b41a509da93651a4364127c83451f97de61e438074f86ece09dfd86961e
3
+ size 2376360695
data/CC-MAIN-2013-20/train-00002-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14fc9ec030a8f3a6dfee9a3286ab8eb979ec4cab3df1ad503779dc8dac631261
3
+ size 2366714674
data/CC-MAIN-2013-20/train-00003-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:145d0a0d25d7f441febc08abc46fcff2c95bab8ebafbc1f96f343f9ca2b83285
3
+ size 2349721791
data/CC-MAIN-2013-20/train-00004-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dce281f2924512c3f4c7ef9f39a6521bc04ca30b7fdcf7121ea9825316ff6de
3
+ size 2347567335
data/CC-MAIN-2013-20/train-00005-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea40c90b2ef3565c3382c9954f3d80584ff81daf6ad65606b54265d9125962ec
3
+ size 2344454381
data/CC-MAIN-2013-20/train-00006-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0acdafc24aa73112ee78f1c5f136e924b490a5d88f36677c1c444caa8507142b
3
+ size 2336964549
data/CC-MAIN-2013-20/train-00007-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68852a8748a3707e2eb5cb1bee0e4cb243588fc82d0552cfebf5741d42bbda92
3
+ size 2320625373
data/CC-MAIN-2013-20/train-00008-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11748c3f363cfa4aa456e59608222dd8fd6ee301ff9bdeb7eac57a46ed7bd5ab
3
+ size 2307363915
data/CC-MAIN-2013-20/train-00009-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b02eb37467007501a9bb06284023222d4991e0a5f7960778bcca24f6a7be2cb
3
+ size 2310654336
data/CC-MAIN-2013-20/train-00010-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f0fb9098157edb573d38a0cf9d27d6bf7718b13b0fb8a99e4bd8564434ba69c
3
+ size 2294070592
data/CC-MAIN-2013-20/train-00011-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e719a6f913aa3753e490f5170b1da5d82b5c82c8c82de2887530f2d82a9c874
3
+ size 2294546063
data/CC-MAIN-2013-20/train-00012-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7b0850b29f91c0bdfb0f3668379cd541d5c334bc23045e8aa89d31eeaacd7fc
3
+ size 2295986307
data/CC-MAIN-2013-20/train-00013-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71c607b1a2bc3cd304009b5601ac662041af1e0879506fd360c47d32de967d56
3
+ size 2284902248
data/CC-MAIN-2013-48/train-00000-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29db96009ac1de5a42336ec0ae4ec6fee3e8dd8aec4de1d1b50aa2b38478caec
3
+ size 2329592314
data/CC-MAIN-2013-48/train-00001-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db5fce1225d1b17bde6d3cd4f0148ae04e8580b7b8cd048c65f8d6414503e493
3
+ size 2332353351
data/CC-MAIN-2013-48/train-00002-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c16a52b1c352d4a70aef14721f72a3886ab3a4a6a1c252ff285dc9fa5ade402
3
+ size 2327303340
data/CC-MAIN-2013-48/train-00003-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:424a7bee659a160dc2d3a80e703e36439589850c126ebd1a4af8d010b840dd0f
3
+ size 2316277195
data/CC-MAIN-2013-48/train-00004-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1137697278aa528361f2e6fe241f299738949cabba15ce7e74990eeff682f0e5
3
+ size 2295811884
data/CC-MAIN-2013-48/train-00005-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aac083d0117e2b4e78d2b69b73c764020f26d6087d8a92bccb4a7a5c1ac2bbfd
3
+ size 2299104720
data/CC-MAIN-2013-48/train-00006-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a15ec6b424d336be9a4c704fa85e8fe46407123bc6ccea6053057c4634ef165
3
+ size 2278623295
data/CC-MAIN-2013-48/train-00007-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c0787aa05d46cc647796246b0fc93e2501bf0b53d9da45539d93c9184c20460
3
+ size 2274629249
data/CC-MAIN-2013-48/train-00008-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:008397de104732df8a08417815510d27d9daa5ddea5d1f4409c39f527b442a10
3
+ size 2273053146
data/CC-MAIN-2013-48/train-00009-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a994524b8aba4f1423b17cd1552c5c08fa77f5c086129c5ba7a86d72210c0738
3
+ size 2269555721
data/CC-MAIN-2013-48/train-00010-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:126a0f67008354d253786f887e6dd65accf99b5414821d7454f893d4c187a36c
3
+ size 2255912359
data/CC-MAIN-2013-48/train-00011-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d355beaec4969f4de01cc5c250ca6fe6f474b4e527d6e64219cb15fc2d885875
3
+ size 2258788899
data/CC-MAIN-2013-48/train-00012-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d26c7bb689d9f5581baabd33d440e30096a68811f6e95358b858bbbad405541
3
+ size 2252159861
data/CC-MAIN-2013-48/train-00013-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b4b9e8f331117c7210f967ddbc10decdebb3bd5267a5fbca9129ebe25be463e
3
+ size 2251108466
data/CC-MAIN-2014-10/train-00000-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:301b3805b7b505ccb899f1ffab25f7d06e1d19d8ae221f4177044e04a1943e2b
3
+ size 2378402603
data/CC-MAIN-2014-10/train-00001-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73ccd40928affce688bcc23e19a9288eda67f20762850d7ed08fb8848657aad3
3
+ size 2374581082
data/CC-MAIN-2014-10/train-00002-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10d45b5276fd3f0b984bcb8e8079892aa169a11bdb0cccc7b92eed2b7bf1419d
3
+ size 2370839030
data/CC-MAIN-2014-10/train-00003-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03d6f43cae56c3747c49480167122782466a879b724cd2e473f2c736dcc41f2f
3
+ size 2367174733
data/CC-MAIN-2014-10/train-00004-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2356a49dc4bae1c726b3bc27f2e1fb93955f5393b12e987faf79702d72c1168
3
+ size 2355201629
data/CC-MAIN-2014-10/train-00005-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48bd01b21e42d698e77a7cd025b3ce63298e686a00786968d68dca743fd9bba5
3
+ size 2353220675
data/CC-MAIN-2014-10/train-00006-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f091b04c2d26f7d39eb8ac5fef323798a2280276bd1a43d295038a35945935d
3
+ size 2344324541
data/CC-MAIN-2014-10/train-00007-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3be423a243289bc7afcaaa3566d9ceea9c027fd9daec6713040cae02ed3f6d8f
3
+ size 2339577028
data/CC-MAIN-2014-10/train-00008-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef5afa503b1606af25c5e9d83a4a86974e27a0f58ef0db6f1a0ae493dfaee373
3
+ size 2341770146
data/CC-MAIN-2014-10/train-00009-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6a9cabfbbbb018ada4c4f33d9bd00e0f82faf70b9c8c9c607f0d4f8671be3fa
3
+ size 2329802974
data/CC-MAIN-2014-10/train-00010-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fc7c2d1cca1cb60cde18fe6c647edee6e69a5e0d187297430d43ae06752a241
3
+ size 2325948344
data/CC-MAIN-2014-10/train-00011-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3d3a1ff655881b616b5bea1ccd120cbaffd5d0b0e1c5f587b86c24207a7a84a
3
+ size 2324674557
data/CC-MAIN-2014-10/train-00012-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67c4321416ea340f0aa9f42dbdbec5a002e17b67790d5d70f7483297b9bb0978
3
+ size 2326385593
data/CC-MAIN-2014-10/train-00013-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0820fe329bc52566cdaf591499a5432bbfd932bdd972bfe1018d7e39b03f668
3
+ size 2317814766
data/CC-MAIN-2014-15/train-00000-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9da4ada954d1f1d0c8be46717bc59ed9364fb611768fa4b20423495d9672408a
3
+ size 2271082635
data/CC-MAIN-2014-15/train-00001-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86df6b7ab75933cf935cc86fba6e05ddc6862251a106a9a6c561a81b865b7230
3
+ size 2276397299
data/CC-MAIN-2014-15/train-00002-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed1c7b21827aea13b8a66112e3e7ab82ca70793a2ad26418fd61ba26a3a2ad1b
3
+ size 2276871097
data/CC-MAIN-2014-15/train-00003-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a73bdfb43bae173832151f71441e8702c9bd99a4810a20e71c974f2e8fd8a098
3
+ size 2274158040
data/CC-MAIN-2014-15/train-00004-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:510e3c1cfc6b96efc4e5e89c9ea9103bc0dccc2ba45fbbff056507e6191d26af
3
+ size 2263202434
data/CC-MAIN-2014-15/train-00005-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17fc24a67cc070942015ca26a71660265f70ab223f8e2fc7a1d289e60e883959
3
+ size 2265126394