@@ -302,8 +302,8 @@ def _choose_float_dtype(
302
302
) -> type [np .floating [Any ]]:
303
303
# check scale/offset first to derive dtype
304
304
# see https://github.com/pydata/xarray/issues/5597#issuecomment-879561954
305
- scale_factor = mapping .get ("scale_factor" , False )
306
- add_offset = mapping .get ("add_offset" , False )
305
+ scale_factor = mapping .get ("scale_factor" )
306
+ add_offset = mapping .get ("add_offset" )
307
307
if scale_factor or add_offset :
308
308
# get the maximum itemsize from scale_factor/add_offset to determine
309
309
# the needed floating point type
@@ -320,7 +320,7 @@ def _choose_float_dtype(
320
320
# but a large integer offset could lead to loss of precision.
321
321
# Sensitivity analysis can be tricky, so we just use a float64
322
322
# if there's any offset at all - better unoptimised than wrong!
323
- if maxsize == 4 and np .issubdtype (add_offset_type , np .floating ):
323
+ if maxsize == 4 or not np .issubdtype (add_offset_type , np .floating ):
324
324
return np .float32
325
325
else :
326
326
return np .float64
@@ -350,12 +350,14 @@ def encode(self, variable: Variable, name: T_Name = None) -> Variable:
350
350
if scale_factor or add_offset :
351
351
dtype = _choose_float_dtype (data .dtype , attrs )
352
352
data = data .astype (dtype = dtype , copy = True )
353
- if add_offset :
354
- data -= add_offset
355
- if scale_factor :
356
- data /= scale_factor
353
+ if add_offset :
354
+ data -= add_offset
355
+ if scale_factor :
356
+ data /= scale_factor
357
357
358
- return Variable (dims , data , attrs , encoding , fastpath = True )
358
+ return Variable (dims , data , attrs , encoding , fastpath = True )
359
+ else :
360
+ return variable
359
361
360
362
def decode (self , variable : Variable , name : T_Name = None ) -> Variable :
361
363
dims , data , attrs , encoding = unpack_for_decoding (variable )
0 commit comments