sachin6624 commited on
Commit
14aa376
·
verified ·
1 Parent(s): 19c3f3a

tiny shakespeare

Browse files
Files changed (1) hide show
  1. tiny_shakespeare.py +110 -0
tiny_shakespeare.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Tiny Shakespeare dataset."""
18
+
19
+
20
+ import os
21
+
22
+ import datasets
23
+
24
+
25
+ _CITATION = """\
26
+ @misc{
27
+ author={Karpathy, Andrej},
28
+ title={char-rnn},
29
+ year={2015},
30
+ howpublished={\\url{https://github.com/karpathy/char-rnn}}
31
+ }"""
32
+
33
+ _DESCRIPTION = """\
34
+ 40,000 lines of Shakespeare from a variety of Shakespeare's plays. \
35
+ Featured in Andrej Karpathy's blog post 'The Unreasonable Effectiveness of \
36
+ Recurrent Neural Networks': \
37
+ http://karpathy.github.io/2015/05/21/rnn-effectiveness/.
38
+
39
+ To use for e.g. character modelling:
40
+
41
+ ```
42
+ d = datasets.load_dataset(name='tiny_shakespeare')['train']
43
+ d = d.map(lambda x: datasets.Value('strings').unicode_split(x['text'], 'UTF-8'))
44
+ # train split includes vocabulary for other splits
45
+ vocabulary = sorted(set(next(iter(d)).numpy()))
46
+ d = d.map(lambda x: {'cur_char': x[:-1], 'next_char': x[1:]})
47
+ d = d.unbatch()
48
+ seq_len = 100
49
+ batch_size = 2
50
+ d = d.batch(seq_len)
51
+ d = d.batch(batch_size)
52
+ ```
53
+ """
54
+
55
+
56
+ class TinyShakespeare(datasets.GeneratorBasedBuilder):
57
+ """Tiny Shakespeare dataset builder."""
58
+
59
+ VERSION = datasets.Version("1.0.0")
60
+
61
+ def _info(self):
62
+ return datasets.DatasetInfo(
63
+ description=_DESCRIPTION,
64
+ features=datasets.Features({"text": datasets.Value("string")}),
65
+ supervised_keys=None,
66
+ homepage="https://github.com/karpathy/char-rnn/blob/master/data/tinyshakespeare/input.txt",
67
+ citation=_CITATION,
68
+ )
69
+
70
+ def _split_generators(self, dl_manager):
71
+ """Returns SplitGenerators."""
72
+ download_path = dl_manager.download_and_extract(
73
+ "https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt"
74
+ )
75
+ if os.path.isdir(download_path):
76
+ # During testing the download manager mock gives us a directory
77
+ txt_path = os.path.join(download_path, "input.txt")
78
+ else:
79
+ txt_path = download_path
80
+ with open(txt_path, "r", encoding="utf-8") as f:
81
+ text = f.read()
82
+
83
+ # 90/5/5 split
84
+ i = int(len(text) * 0.9)
85
+ train_text, text = text[:i], text[i:]
86
+ i = int(len(text) * 0.5)
87
+ validation_text, text = text[:i], text[i:]
88
+ test_text = text
89
+
90
+ return [
91
+ datasets.SplitGenerator(
92
+ name=datasets.Split.TRAIN,
93
+ # These kwargs will be passed to _generate_examples
94
+ gen_kwargs={"split_key": "train", "split_text": train_text},
95
+ ),
96
+ datasets.SplitGenerator(
97
+ name=datasets.Split.VALIDATION,
98
+ gen_kwargs={"split_key": "validation", "split_text": validation_text},
99
+ ),
100
+ datasets.SplitGenerator(
101
+ name=datasets.Split.TEST,
102
+ gen_kwargs={"split_key": "test", "split_text": test_text},
103
+ ),
104
+ ]
105
+
106
+ def _generate_examples(self, split_key, split_text):
107
+ """Yields examples."""
108
+ data_key = split_key # Should uniquely identify the thing yielded
109
+ feature_dict = {"text": split_text}
110
+ yield data_key, feature_dict