blumenstiel commited on
Commit
a9590f4
·
verified ·
1 Parent(s): b0a124e

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +12 -2
README.md CHANGED
@@ -140,6 +140,7 @@ dataset = build_terramesh_dataset(
140
  path="https://huggingface.co/datasets/ibm-esa-geospatial/TerraMesh/resolve/main/", # Streaming or local path
141
  modalities=["S2L2A"],
142
  split="val",
 
143
  batch_size=8
144
  )
145
  # Batch keys: ["__key__", "__url__", "image"]
@@ -148,6 +149,7 @@ dataset = build_terramesh_dataset(
148
  dataset = build_terramesh_dataset(
149
  path="https://huggingface.co/datasets/ibm-esa-geospatial/TerraMesh/resolve/main/", # Streaming or local path
150
  modalities=["S2L2A", "S2L1C", "S2RGB", "S1GRD", "S1RTC", "DEM", "NDVI", "LULC"],
 
151
  split="val",
152
  batch_size=8
153
  )
@@ -176,7 +178,7 @@ However, it requires some wrapping to bring the data into the expected shape.
176
  ```python
177
  import albumentations as A
178
  from albumentations.pytorch import ToTensorV2
179
- from terramesh import build_terramesh_dataset, Transpose, MultimodalTransforms
180
 
181
  # Define all image modalities
182
  modalities = ["S2L2A", "S2L1C", "S2RGB", "S1GRD", "S1RTC", "DEM", "NDVI", "LULC"]
@@ -185,6 +187,7 @@ modalities = ["S2L2A", "S2L1C", "S2RGB", "S1GRD", "S1RTC", "DEM", "NDVI", "LULC"
185
  val_transform = MultimodalTransforms(
186
  transforms=A.Compose([ # We use albumentations because of the shared transform between image modalities
187
  Transpose([1, 2, 0]), # Convert data to channel last (expected shape from albumentations)
 
188
  A.CenterCrop(224, 224), # Use center crop in val split
189
  # A.RandomCrop(224, 224), # Use random crop in train split
190
  # A.D4(), # Optionally, use random flipping and rotation for the train split
@@ -205,7 +208,14 @@ dataset = build_terramesh_dataset(
205
  )
206
  ```
207
 
208
- If you only use a single modality, you don't need to specify `additional_targets`.
 
 
 
 
 
 
 
209
 
210
  ### Returning metadata
211
 
 
140
  path="https://huggingface.co/datasets/ibm-esa-geospatial/TerraMesh/resolve/main/", # Streaming or local path
141
  modalities=["S2L2A"],
142
  split="val",
143
+ shuffle=False, # Set false for split="val"
144
  batch_size=8
145
  )
146
  # Batch keys: ["__key__", "__url__", "image"]
 
149
  dataset = build_terramesh_dataset(
150
  path="https://huggingface.co/datasets/ibm-esa-geospatial/TerraMesh/resolve/main/", # Streaming or local path
151
  modalities=["S2L2A", "S2L1C", "S2RGB", "S1GRD", "S1RTC", "DEM", "NDVI", "LULC"],
152
+ shuffle=False, # Set false for split="val"
153
  split="val",
154
  batch_size=8
155
  )
 
178
  ```python
179
  import albumentations as A
180
  from albumentations.pytorch import ToTensorV2
181
+ from terramesh import build_terramesh_dataset, Transpose, MultimodalTransforms, MultimodalNormalize, statistics
182
 
183
  # Define all image modalities
184
  modalities = ["S2L2A", "S2L1C", "S2RGB", "S1GRD", "S1RTC", "DEM", "NDVI", "LULC"]
 
187
  val_transform = MultimodalTransforms(
188
  transforms=A.Compose([ # We use albumentations because of the shared transform between image modalities
189
  Transpose([1, 2, 0]), # Convert data to channel last (expected shape from albumentations)
190
+ MultimodalNormalize(mean=statistics["mean"], std=statistics["std"]),
191
  A.CenterCrop(224, 224), # Use center crop in val split
192
  # A.RandomCrop(224, 224), # Use random crop in train split
193
  # A.D4(), # Optionally, use random flipping and rotation for the train split
 
208
  )
209
  ```
210
 
211
+ If you only use a single modality, you don't need to specify `additional_targets`. You need to change the normalization to:
212
+ ```
213
+ MultimodalNormalize(
214
+ mean={"image": statistics["mean"]["<modality>"]},
215
+ std={"image": statistics["std"]["<modality>"]}
216
+ ),
217
+ ```
218
+
219
 
220
  ### Returning metadata
221