Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
10M - 100M
ArXiv:
DOI:
License:
Kenneth Enevoldsen commited on
format
Browse files- data/domsdatabasen/create.py +1 -1
- data/lexdk/create.py +1 -1
- data/memo/create.py +3 -3
- data/ncc_books/create.py +4 -4
- data/ncc_maalfrid/create.py +4 -4
- data/ncc_newspaper/create.py +4 -4
- data/ncc_parliament/create.py +4 -4
- src/dynaword/datasheet.py +1 -1
- src/dynaword/plot_tokens_over_time.py +4 -4
- src/tests/test_quality/test_short_texts.py +3 -3
data/domsdatabasen/create.py
CHANGED
|
@@ -114,7 +114,7 @@ def retry(func, *args, retries=RETRY_COUNT, delay=RETRY_DELAY, **kwargs):
|
|
| 114 |
try:
|
| 115 |
return func(*args, **kwargs)
|
| 116 |
except Exception as e:
|
| 117 |
-
logger.warning(f"⚠️ Retry {attempt+1}/{retries} failed: {e}")
|
| 118 |
time.sleep(delay)
|
| 119 |
raise RuntimeError(f"❌ All retries failed for {func.__name__}({args})")
|
| 120 |
|
|
|
|
| 114 |
try:
|
| 115 |
return func(*args, **kwargs)
|
| 116 |
except Exception as e:
|
| 117 |
+
logger.warning(f"⚠️ Retry {attempt + 1}/{retries} failed: {e}")
|
| 118 |
time.sleep(delay)
|
| 119 |
raise RuntimeError(f"❌ All retries failed for {func.__name__}({args})")
|
| 120 |
|
data/lexdk/create.py
CHANGED
|
@@ -38,7 +38,7 @@ def convert_sample(example: dict) -> dict:
|
|
| 38 |
# "text": "Kullmanns Mølle er en mølle i Gudhjem, opkaldt efter Matts Kullmann, der byggede møllen i 1893 til sin søn, Christian Kullmann, se Gudhjem Mølle.",
|
| 39 |
# }
|
| 40 |
date = datetime.fromisoformat(example["date"])
|
| 41 |
-
text = f"{example[
|
| 42 |
|
| 43 |
new_example = dict(
|
| 44 |
text_new=text,
|
|
|
|
| 38 |
# "text": "Kullmanns Mølle er en mølle i Gudhjem, opkaldt efter Matts Kullmann, der byggede møllen i 1893 til sin søn, Christian Kullmann, se Gudhjem Mølle.",
|
| 39 |
# }
|
| 40 |
date = datetime.fromisoformat(example["date"])
|
| 41 |
+
text = f"{example['title']}\n\npubliceret: {date}\n{example['text']}"
|
| 42 |
|
| 43 |
new_example = dict(
|
| 44 |
text_new=text,
|
data/memo/create.py
CHANGED
|
@@ -96,9 +96,9 @@ def load_memo(repo_path: Path) -> pd.DataFrame:
|
|
| 96 |
|
| 97 |
text_without_metadata = [t for t in text_df_fileames if t not in metadata_filenames]
|
| 98 |
|
| 99 |
-
assert (
|
| 100 |
-
|
| 101 |
-
)
|
| 102 |
|
| 103 |
# merge texts with metadata
|
| 104 |
merged_df = pd.merge(
|
|
|
|
| 96 |
|
| 97 |
text_without_metadata = [t for t in text_df_fileames if t not in metadata_filenames]
|
| 98 |
|
| 99 |
+
assert len(text_without_metadata) == 0, (
|
| 100 |
+
f"Some texts in the repository do not have metadata: {text_without_metadata}"
|
| 101 |
+
)
|
| 102 |
|
| 103 |
# merge texts with metadata
|
| 104 |
merged_df = pd.merge(
|
data/ncc_books/create.py
CHANGED
|
@@ -147,7 +147,7 @@ def dynaword_format(
|
|
| 147 |
"license": license,
|
| 148 |
"domain": domain,
|
| 149 |
"metadata": {
|
| 150 |
-
"source-pretty": f"Norwegian Colossal Corpus ({re.sub(
|
| 151 |
"source-type": doc_type,
|
| 152 |
},
|
| 153 |
}
|
|
@@ -176,9 +176,9 @@ def log_pre_filter_lang_data(
|
|
| 176 |
logger.info(f"Documents of {source}:")
|
| 177 |
logger.info(f"NO: {no_docs}, {no_perc}% ; DA: {da_docs}, {da_perc}%")
|
| 178 |
logger.info("After language confidence filtering:")
|
| 179 |
-
logger.info(f"DA: {f_length}, lost: {100-f_perc}%")
|
| 180 |
logger.info("Total document change:")
|
| 181 |
-
logger.info(f"{all_docs} -> {f_length}, loss: {100-f_total_perc}%")
|
| 182 |
|
| 183 |
|
| 184 |
def get_var_name(var):
|
|
@@ -272,7 +272,7 @@ def quality_checks(ds: Dataset) -> Dataset:
|
|
| 272 |
|
| 273 |
long_texts = ds_f.filter(too_long_filter, num_proc=None)
|
| 274 |
if len(long_texts["id"]) > 0:
|
| 275 |
-
logger.info(f"{len(long_texts[
|
| 276 |
for id in long_texts["id"]:
|
| 277 |
logger.info(f"id: {id}")
|
| 278 |
else:
|
|
|
|
| 147 |
"license": license,
|
| 148 |
"domain": domain,
|
| 149 |
"metadata": {
|
| 150 |
+
"source-pretty": f"Norwegian Colossal Corpus ({re.sub('ncc_', '', source)})",
|
| 151 |
"source-type": doc_type,
|
| 152 |
},
|
| 153 |
}
|
|
|
|
| 176 |
logger.info(f"Documents of {source}:")
|
| 177 |
logger.info(f"NO: {no_docs}, {no_perc}% ; DA: {da_docs}, {da_perc}%")
|
| 178 |
logger.info("After language confidence filtering:")
|
| 179 |
+
logger.info(f"DA: {f_length}, lost: {100 - f_perc}%")
|
| 180 |
logger.info("Total document change:")
|
| 181 |
+
logger.info(f"{all_docs} -> {f_length}, loss: {100 - f_total_perc}%")
|
| 182 |
|
| 183 |
|
| 184 |
def get_var_name(var):
|
|
|
|
| 272 |
|
| 273 |
long_texts = ds_f.filter(too_long_filter, num_proc=None)
|
| 274 |
if len(long_texts["id"]) > 0:
|
| 275 |
+
logger.info(f"{len(long_texts['id'])} Long texts (>~1e5 tokens) found")
|
| 276 |
for id in long_texts["id"]:
|
| 277 |
logger.info(f"id: {id}")
|
| 278 |
else:
|
data/ncc_maalfrid/create.py
CHANGED
|
@@ -147,7 +147,7 @@ def dynaword_format(
|
|
| 147 |
"license": license,
|
| 148 |
"domain": domain,
|
| 149 |
"metadata": {
|
| 150 |
-
"source-pretty": f"Norwegian Colossal Corpus ({re.sub(
|
| 151 |
"source-type": doc_type,
|
| 152 |
},
|
| 153 |
}
|
|
@@ -176,9 +176,9 @@ def log_pre_filter_lang_data(
|
|
| 176 |
logger.info(f"Documents of {source}:")
|
| 177 |
logger.info(f"NO: {no_docs}, {no_perc}% ; DA: {da_docs}, {da_perc}%")
|
| 178 |
logger.info("After language confidence filtering:")
|
| 179 |
-
logger.info(f"DA: {f_length}, lost: {100-f_perc}%")
|
| 180 |
logger.info("Total document change:")
|
| 181 |
-
logger.info(f"{all_docs} -> {f_length}, loss: {100-f_total_perc}%")
|
| 182 |
|
| 183 |
|
| 184 |
def get_var_name(var):
|
|
@@ -272,7 +272,7 @@ def quality_checks(ds: Dataset) -> Dataset:
|
|
| 272 |
|
| 273 |
long_texts = ds_f.filter(too_long_filter, num_proc=num_proc)
|
| 274 |
if len(long_texts["id"]) > 0:
|
| 275 |
-
logger.info(f"{len(long_texts[
|
| 276 |
for id in long_texts["id"]:
|
| 277 |
logger.info(f"id: {id}")
|
| 278 |
else:
|
|
|
|
| 147 |
"license": license,
|
| 148 |
"domain": domain,
|
| 149 |
"metadata": {
|
| 150 |
+
"source-pretty": f"Norwegian Colossal Corpus ({re.sub('ncc_', '', source)})",
|
| 151 |
"source-type": doc_type,
|
| 152 |
},
|
| 153 |
}
|
|
|
|
| 176 |
logger.info(f"Documents of {source}:")
|
| 177 |
logger.info(f"NO: {no_docs}, {no_perc}% ; DA: {da_docs}, {da_perc}%")
|
| 178 |
logger.info("After language confidence filtering:")
|
| 179 |
+
logger.info(f"DA: {f_length}, lost: {100 - f_perc}%")
|
| 180 |
logger.info("Total document change:")
|
| 181 |
+
logger.info(f"{all_docs} -> {f_length}, loss: {100 - f_total_perc}%")
|
| 182 |
|
| 183 |
|
| 184 |
def get_var_name(var):
|
|
|
|
| 272 |
|
| 273 |
long_texts = ds_f.filter(too_long_filter, num_proc=num_proc)
|
| 274 |
if len(long_texts["id"]) > 0:
|
| 275 |
+
logger.info(f"{len(long_texts['id'])} Long texts (>~1e5 tokens) found")
|
| 276 |
for id in long_texts["id"]:
|
| 277 |
logger.info(f"id: {id}")
|
| 278 |
else:
|
data/ncc_newspaper/create.py
CHANGED
|
@@ -148,7 +148,7 @@ def dynaword_format(
|
|
| 148 |
"license": license,
|
| 149 |
"domain": domain,
|
| 150 |
"metadata": {
|
| 151 |
-
"source-pretty": f"Norwegian Colossal Corpus ({re.sub(
|
| 152 |
"source-type": doc_type,
|
| 153 |
},
|
| 154 |
}
|
|
@@ -177,9 +177,9 @@ def log_pre_filter_lang_data(
|
|
| 177 |
logger.info(f"Documents of {source}:")
|
| 178 |
logger.info(f"NO: {no_docs}, {no_perc}% ; DA: {da_docs}, {da_perc}%")
|
| 179 |
logger.info("After language confidence filtering:")
|
| 180 |
-
logger.info(f"DA: {f_length}, lost: {100-f_perc}%")
|
| 181 |
logger.info("Total document change:")
|
| 182 |
-
logger.info(f"{all_docs} -> {f_length}, loss: {100-f_total_perc}%")
|
| 183 |
|
| 184 |
|
| 185 |
def get_var_name(var):
|
|
@@ -275,7 +275,7 @@ def quality_checks(ds: Dataset) -> Dataset:
|
|
| 275 |
|
| 276 |
long_texts = ds_f.filter(too_long_filter, num_proc=num_proc)
|
| 277 |
if len(long_texts["id"]) > 0:
|
| 278 |
-
logger.info(f"{len(long_texts[
|
| 279 |
for id in long_texts["id"]:
|
| 280 |
logger.info(f"id: {id}")
|
| 281 |
else:
|
|
|
|
| 148 |
"license": license,
|
| 149 |
"domain": domain,
|
| 150 |
"metadata": {
|
| 151 |
+
"source-pretty": f"Norwegian Colossal Corpus ({re.sub('ncc_', '', source)})",
|
| 152 |
"source-type": doc_type,
|
| 153 |
},
|
| 154 |
}
|
|
|
|
| 177 |
logger.info(f"Documents of {source}:")
|
| 178 |
logger.info(f"NO: {no_docs}, {no_perc}% ; DA: {da_docs}, {da_perc}%")
|
| 179 |
logger.info("After language confidence filtering:")
|
| 180 |
+
logger.info(f"DA: {f_length}, lost: {100 - f_perc}%")
|
| 181 |
logger.info("Total document change:")
|
| 182 |
+
logger.info(f"{all_docs} -> {f_length}, loss: {100 - f_total_perc}%")
|
| 183 |
|
| 184 |
|
| 185 |
def get_var_name(var):
|
|
|
|
| 275 |
|
| 276 |
long_texts = ds_f.filter(too_long_filter, num_proc=num_proc)
|
| 277 |
if len(long_texts["id"]) > 0:
|
| 278 |
+
logger.info(f"{len(long_texts['id'])} Long texts (>~1e5 tokens) found")
|
| 279 |
for id in long_texts["id"]:
|
| 280 |
logger.info(f"id: {id}")
|
| 281 |
else:
|
data/ncc_parliament/create.py
CHANGED
|
@@ -147,7 +147,7 @@ def dynaword_format(
|
|
| 147 |
"license": license,
|
| 148 |
"domain": domain,
|
| 149 |
"metadata": {
|
| 150 |
-
"source-pretty": f"Norwegian Colossal Corpus ({re.sub(
|
| 151 |
"source-type": doc_type,
|
| 152 |
},
|
| 153 |
}
|
|
@@ -176,9 +176,9 @@ def log_pre_filter_lang_data(
|
|
| 176 |
logger.info(f"Documents of {source}:")
|
| 177 |
logger.info(f"NO: {no_docs}, {no_perc}% ; DA: {da_docs}, {da_perc}%")
|
| 178 |
logger.info("After language confidence filtering:")
|
| 179 |
-
logger.info(f"DA: {f_length}, lost: {100-f_perc}%")
|
| 180 |
logger.info("Total document change:")
|
| 181 |
-
logger.info(f"{all_docs} -> {f_length}, loss: {100-f_total_perc}%")
|
| 182 |
|
| 183 |
|
| 184 |
def get_var_name(var):
|
|
@@ -272,7 +272,7 @@ def quality_checks(ds: Dataset) -> Dataset:
|
|
| 272 |
|
| 273 |
long_texts = ds_f.filter(too_long_filter, num_proc=None)
|
| 274 |
if len(long_texts["id"]) > 0:
|
| 275 |
-
logger.info(f"{len(long_texts[
|
| 276 |
for id in long_texts["id"]:
|
| 277 |
logger.info(f"id: {id}")
|
| 278 |
else:
|
|
|
|
| 147 |
"license": license,
|
| 148 |
"domain": domain,
|
| 149 |
"metadata": {
|
| 150 |
+
"source-pretty": f"Norwegian Colossal Corpus ({re.sub('ncc_', '', source)})",
|
| 151 |
"source-type": doc_type,
|
| 152 |
},
|
| 153 |
}
|
|
|
|
| 176 |
logger.info(f"Documents of {source}:")
|
| 177 |
logger.info(f"NO: {no_docs}, {no_perc}% ; DA: {da_docs}, {da_perc}%")
|
| 178 |
logger.info("After language confidence filtering:")
|
| 179 |
+
logger.info(f"DA: {f_length}, lost: {100 - f_perc}%")
|
| 180 |
logger.info("Total document change:")
|
| 181 |
+
logger.info(f"{all_docs} -> {f_length}, loss: {100 - f_total_perc}%")
|
| 182 |
|
| 183 |
|
| 184 |
def get_var_name(var):
|
|
|
|
| 272 |
|
| 273 |
long_texts = ds_f.filter(too_long_filter, num_proc=None)
|
| 274 |
if len(long_texts["id"]) > 0:
|
| 275 |
+
logger.info(f"{len(long_texts['id'])} Long texts (>~1e5 tokens) found")
|
| 276 |
for id in long_texts["id"]:
|
| 277 |
logger.info(f"id: {id}")
|
| 278 |
else:
|
src/dynaword/datasheet.py
CHANGED
|
@@ -60,7 +60,7 @@ def human_readable_large_int(value: int) -> str:
|
|
| 60 |
]
|
| 61 |
for threshold, label in thresholds:
|
| 62 |
if value > threshold:
|
| 63 |
-
return f"{value/threshold:.2f}{label}"
|
| 64 |
|
| 65 |
return str(value)
|
| 66 |
|
|
|
|
| 60 |
]
|
| 61 |
for threshold, label in thresholds:
|
| 62 |
if value > threshold:
|
| 63 |
+
return f"{value / threshold:.2f}{label}"
|
| 64 |
|
| 65 |
return str(value)
|
| 66 |
|
src/dynaword/plot_tokens_over_time.py
CHANGED
|
@@ -115,13 +115,13 @@ def create_token_dataframe(filename: str = "descriptive_stats.json") -> pd.DataF
|
|
| 115 |
def _format_tokens(value: float) -> str:
|
| 116 |
"""Format tokens with human-readable suffixes"""
|
| 117 |
if value >= 1e12:
|
| 118 |
-
return f"{value/1e12:.2f}T"
|
| 119 |
elif value >= 1e9:
|
| 120 |
-
return f"{value/1e9:.2f}G"
|
| 121 |
elif value >= 1e6:
|
| 122 |
-
return f"{value/1e6:.2f}M"
|
| 123 |
elif value >= 1e3:
|
| 124 |
-
return f"{value/1e3:.2f}k"
|
| 125 |
else:
|
| 126 |
return f"{value:.0f}"
|
| 127 |
|
|
|
|
| 115 |
def _format_tokens(value: float) -> str:
|
| 116 |
"""Format tokens with human-readable suffixes"""
|
| 117 |
if value >= 1e12:
|
| 118 |
+
return f"{value / 1e12:.2f}T"
|
| 119 |
elif value >= 1e9:
|
| 120 |
+
return f"{value / 1e9:.2f}G"
|
| 121 |
elif value >= 1e6:
|
| 122 |
+
return f"{value / 1e6:.2f}M"
|
| 123 |
elif value >= 1e3:
|
| 124 |
+
return f"{value / 1e3:.2f}k"
|
| 125 |
else:
|
| 126 |
return f"{value:.0f}"
|
| 127 |
|
src/tests/test_quality/test_short_texts.py
CHANGED
|
@@ -16,6 +16,6 @@ def test_no_one_word_documents(dataset_name: str):
|
|
| 16 |
|
| 17 |
one_word_docs = ds.filter(lambda x: x["token_count"] <= 1)
|
| 18 |
|
| 19 |
-
assert (
|
| 20 |
-
len(one_word_docs)
|
| 21 |
-
)
|
|
|
|
| 16 |
|
| 17 |
one_word_docs = ds.filter(lambda x: x["token_count"] <= 1)
|
| 18 |
|
| 19 |
+
assert len(one_word_docs) == 0, (
|
| 20 |
+
f"Found {len(one_word_docs)} one-word documents in dataset '{dataset_name}'"
|
| 21 |
+
)
|