Dataset Viewer
Auto-converted to Parquet Duplicate
Search is not available for this dataset
image
image
width
int64
height
int64
image_id
int64
annotations
list
num_objects
int64
image_path
string
815
1,623
0
[ { "bbox": [ 82, 48, 673, 51 ], "category": "Title", "text": "މާދަމާގެ އެއްވުމަށް ފުލުހުން ޝަރުތުތަކެއް ކަނޑައަޅައި އަދާލަތު ޕާޓީ އަށް ސިޓީ" }, { "bbox": [ 676, 100, 79, 46 ], "category": "Title", "text": "ފޮނުވައިފި" }, { "b...
30
images/image_0.png
2,338
2,667
1
[ { "bbox": [ 961, 48, 413, 46 ], "category": "Title", "text": "ފެން ހޮޅި އުފެއްދުމުގެ މަސައްކަތް އެމްޑަބްލިއުއެސްސީން" }, { "bbox": [ 1140, 106, 54, 40 ], "category": "Title", "text": "ފަށަައިފި" }, { "bbox": [ 90, ...
238
images/image_1.png
1,814
1,985
2
[ { "bbox": [ 645, 48, 520, 43 ], "category": "Title", "text": "އީޔޫން ރާއްޖެއާ ދެކޮޅަށް ފާސްކުރީ \"ހަރުކަށި ގަރާރެއް\": ތަޖުރިބާކާރުން" }, { "bbox": [ 91, 124, 755, 1802 ], "category": "Columns", "text": "" }, { "bbox": [ ...
79
images/image_2.png
556
569
3
[ { "bbox": [ 91, 48, 405, 39 ], "category": "Title", "text": "ހުއްދަ ނެތި އުތުރު ބަނދަރަށް ނޫސްވެރިން ވަނުން މަނާކޮށްފި" }, { "bbox": [ 77, 132, 419, 30 ], "category": "Text", "text": "ހުއްދަ ނެތި، މާލޭ އުތުރު ފަރާތުގެ އާ ޖެޓީ ނުވަތަ...
13
images/image_3.png
421
1,539
4
[ { "bbox": [ 88, 48, 273, 54 ], "category": "Title", "text": "ނަޝީދު ދޫކޮށްލުމުގެ ކުރިއަކުން" }, { "bbox": [ 71, 121, 290, 39 ], "category": "Title", "text": "މަސައްކަތެއް ހުއްޓަލެއް ނުލާނަން:" }, { "bbox": [ 250, ...
34
images/image_4.png
482
1,280
5
[ { "bbox": [ 91, 48, 331, 48 ], "category": "Title", "text": "މާބްލް ހޮޓަލާ މެދު ފިޔަވަޅު އަޅައިދޭން" }, { "bbox": [ 282, 110, 140, 48 ], "category": "Title", "text": "ފުލުހުން އެދިއްޖެ" }, { "bbox": [ 56, 202, ...
24
images/image_5.png
828
1,684
6
[ { "bbox": [ 361, 48, 407, 42 ], "category": "Title", "text": "ރައީސަށް އަޑުއިވެން ފަށައިފި، އާ ދުވަހެއް ފެންނާނެ: އަމީން" }, { "bbox": [ 114, 126, 654, 36 ], "category": "Text", "text": "އަނިޔާވެރިކަން ހުއްޓާލަން ގޮވާލައި އިދިކޮޅު ފ...
30
images/image_6.png
452
1,277
7
[ { "bbox": [ 66, 48, 326, 46 ], "category": "Title", "text": "މުޒާހަރާތަކާ ދެކޮޅަށް އަނިޔާވެރިނުވުމަށާއި" }, { "bbox": [ 57, 109, 335, 37 ], "category": "Title", "text": "މަރުގެ އަދަބު ނުދޭން ރާއްޖެ އަށް ގޮވާލާ" }, { "bbox": ...
25
images/image_7.png
427
3,338
8
[ { "bbox": [ 78, 48, 289, 52 ], "category": "Title", "text": "މާދަމާގެ އެއްވުމުގައި ހަމަނުޖެހުން" }, { "bbox": [ 116, 103, 251, 53 ], "category": "Title", "text": "ހިންގާނެ ކަމަކަށް ނުބެލެވޭ:" }, { "bbox": [ 236, ...
78
images/image_8.png
637
2,021
9
[ { "bbox": [ 46, 48, 531, 61 ], "category": "Title", "text": "ދައުލަތުގެ މީޑިއާ ހިންގާ ބޯޑުގެ މެންބަރުންނަށް ރުހުން ދީ،" }, { "bbox": [ 383, 125, 194, 53 ], "category": "Title", "text": "މުސާރަ ކަނޑައަޅައިފި" }, { "bbox": [ ...
41
images/image_9.png
448
2,519
10
[ { "bbox": [ 102, 48, 286, 48 ], "category": "Title", "text": "ސިޔާސީ ފަރާތްތަކާ އެކު މަޝްވަރާކޮށް" }, { "bbox": [ 167, 101, 221, 50 ], "category": "Title", "text": "ހައްލެއް ހޯދުން މުހިންމު: އީޔޫ" }, { "bbox": [ 115, ...
65
images/image_10.png
415
1,725
11
[ { "bbox": [ 85, 48, 270, 38 ], "category": "Title", "text": "އިމްރާން މިއަދު އިމިގްރޭޝަނަށް ހާޒިރެއް" }, { "bbox": [ 312, 102, 43, 35 ], "category": "Title", "text": "ނުކުރި" }, { "bbox": [ 99, 183, 256, ...
32
images/image_11.png
464
1,085
12
[ { "bbox": [ 133, 48, 271, 43 ], "category": "Title", "text": "ހަމަޖެހުން ނަގާލުމުގެ އެއްވުންތަކުގައި" }, { "bbox": [ 173, 95, 231, 47 ], "category": "Title", "text": "ބައިވެރިނުވަން ނައިބު ރައީސް" }, { "bbox": [ 242, ...
17
images/image_12.png
645
1,717
13
[ { "bbox": [ 142, 48, 443, 58 ], "category": "Title", "text": "ގަދަކަމުން ވެރިކަން ހޯދުމާއި ގަދަކަމުން ވެރިކަން" }, { "bbox": [ 187, 117, 398, 59 ], "category": "Title", "text": "ދެމެެހެއްޓުމަކީ ކުރާނެ ކަމެއް ނޫން: ރައީސް" }, { ...
29
images/image_13.png
580
2,961
14
[ { "bbox": [ 59, 48, 461, 52 ], "category": "Title", "text": "ފާތުމަ މަރާލި މައްސަލަ ޝަހީމްގެ މައްޗަށް ސާބިތެއް ނުވި" }, { "bbox": [ 42, 136, 478, 39 ], "category": "Text", "text": "ހދ. ނޭކުރެންދޫ، ރަންވިލު، ފާތިމަތު ޒަކަރިއްޔާ (ފާތު...
51
images/image_14.png
1,325
1,682
15
[ { "bbox": [ 434, 48, 453, 44 ], "category": "Title", "text": "އަދީބަކީ ރާއްޖޭގައި ނެތްވަރުގެ ބޮޑު ފާސިދު، ފާސިގު، މުޖްރިމެއް:" }, { "bbox": [ 634, 104, 53, 41 ], "category": "Title", "text": "އިމްރާން" }, { "bbox": [ 9...
66
images/image_15.png
1,288
988
16
[ { "bbox": [ 426, 48, 433, 56 ], "category": "Title", "text": "މިލަންދޫގައި ގެއްލިގެން ހޯދަމުން ދިޔަ މީހަކު މޫދުގައި" }, { "bbox": [ 532, 107, 220, 45 ], "category": "Title", "text": "މަރުވެފައި އޮއްވައި ފެނިއްޖެ" }, { "bbox"...
30
images/image_16.png
427
1,319
17
[ { "bbox": [ 79, 48, 288, 39 ], "category": "Title", "text": "އިމްރާން ކުރައްވާ މަސައްކަތަކުން އެއްވެސް" }, { "bbox": [ 197, 102, 170, 41 ], "category": "Title", "text": "ދިރުމެއް ނުފެނޭ: ރައީސް" }, { "bbox": [ 50, ...
38
images/image_17.png
485
1,747
18
[ { "bbox": [ 57, 48, 368, 47 ], "category": "Title", "text": "ލޯފަން ކުދިންނަށް ހާއްސަ މުސްހަފް އެ ކުދިންގެ" }, { "bbox": [ 233, 103, 192, 44 ], "category": "Title", "text": "ޖަމިއްޔާ އަށް ހަދިޔާކޮށްފި" }, { "bbox": [ 8...
31
images/image_18.png
556
2,338
19
[ { "bbox": [ 79, 48, 417, 52 ], "category": "Title", "text": "ސިފައިންގެ ތެރެއަށް ސިޔާސީ ކަންކަން ވަދެގެން" }, { "bbox": [ 360, 113, 136, 53 ], "category": "Title", "text": "ނުވާނެ: ރައީސް" }, { "bbox": [ 63, 209,...
48
images/image_19.png
632
1,295
20
[ { "bbox": [ 193, 48, 379, 44 ], "category": "Title", "text": "ނޭޕާލުން 14 ދިވެއްސަކު އިންޑިއާ އަށް ހިނގައްޖެ" }, { "bbox": [ 64, 139, 508, 27 ], "category": "Text", "text": "ނޭޕާލަށް މިދިޔަ ހޮނިހިރު ދުވަހު އައި ބާރުގަދަ ބިންހެލުމާ ގ...
20
images/image_20.png
End of preview. Expand in Data Studio

Dhivehi Image Bounding Box Prompt Dataset

This dataset, alakxender/dhivehi-image-bbox-prompt, contains 58,738 images annotated with COCO-style bounding boxes and Dhivehi (Thaana script) text, along with layout categories such as Text, Title, Picture, Caption, and Columns. It is designed for OCR, document layout analysis, and multimodal vision–language research focused on Dhivehi.

Dataset

Each row includes:

  • image — the RGB image (preserved original dimensions)

  • width, height — image size in pixels

  • image_id — unique integer id

  • annotations — list of objects with:

    • bbox — bounding box (COCO-style [x, y, width, height], absolute pixels)
    • category — one of: Text, Title, Picture, Caption, Columns
    • text — Dhivehi transcript for the region (may be empty for pictures)
  • num_objects — number of boxes in the image

  • image_path — original file path (useful for local debugging)

Splits

Split Count
train 52,864
validation (test) 5,874
total 58,738

Usage

from datasets import load_dataset

ds = load_dataset("alakxender/dhivehi-image-bbox-prompt")
sample = ds["train"][0]

print(sample["image_id"], sample["width"], sample["height"])
print(sample["annotations"][0])  # {'bbox':[x,y,w,h], 'category':'Text', 'text':'...'}

Validate bbox format quickly:

x, y, w, h = sample["annotations"][0]["bbox"]
assert w > 0 and h > 0  # COCO-style width/height

Target tasks & model ideas

  • OCR (text detection + recognition)
  • Document layout understanding
  • Multimodal VLM

Sample

From assets/visualize_hf_dataset.py:

Category Color (hex)
Text #FF0000 (red)
Title #00FF00 (green)
Picture #0000FF (blue)
Caption #FF00FF (magenta)
Columns #FFFF00 (yellow)

Sample Annotated

Visualize a few samples (helper script)

# First 8 training images (streamed)
python visualize_hf_dataset.py --split train --head 8

# Skip first 500 validation images and take 12 (streamed)
python visualize_hf_dataset.py --split validation --offset 500 --head 12

# Random 10 from the first 2000 training items (streamed reservoir sampling)
python visualize_hf_dataset.py --split train --sample 10 --limit 2000

# Show Dhivehi text snippets and downscale to max 1600px side
python visualize_hf_dataset.py --split train --head 6 --show-text --max-side 1600

Key options

  • --streaming / --no-streaming – Enable/disable HF streaming mode (default: streaming on)
  • --head N – Take the first N samples (with optional --offset)
  • --sample N --limit M – Randomly pick N samples from the first M streamed examples (reservoir sampling)
  • --show-text – Overlay a short snippet of the Dhivehi text for each region
  • --make-grid N – Make a simple grid collage of the saved visualizations

All generated images are saved in the specified --out-dir (default: hf_visualizations/).

Known limitations

  • Some captions fields may be empty or contain line-break artifacts.
  • Bounding boxes are axis-aligned; no polygons/rotations are provided.
  • Class set is focused on page layout (not general object categories).
  • Images are included in their original resolutions and orientations, resulting in varying sizes and aspect ratios.

Notes

This dataset was created for testing and experimentation in layout analysis of printed Dhivehi documents, and is not intended as a fixed standard; the dataset, annotation schema, or category set may be modified to suit specific project requirements.

Downloads last month
214