Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions src/ert/analysis/_es_update.py
Original file line number Diff line number Diff line change
Expand Up @@ -299,8 +299,8 @@ def adaptive_localization_progress_callback(
np.fill_diagonal(T, T.diagonal() + 1)

def correlation_callback(
cross_correlations_of_batch: npt.NDArray[np.float64],
cross_correlations_accumulator: list[npt.NDArray[np.float64]],
cross_correlations_of_batch: npt.NDArray[np.float32],
cross_correlations_accumulator: list[npt.NDArray[np.float32]],
) -> None:
cross_correlations_accumulator.append(cross_correlations_of_batch)

Expand Down Expand Up @@ -350,7 +350,7 @@ def correlation_callback(
progress_callback(AnalysisStatusEvent(msg=log_msg))

start = time.time()
cross_correlations: list[npt.NDArray[np.float64]] = []
cross_correlations: list[npt.NDArray[np.float32]] = []
for param_batch_idx in batches:
update_idx = param_batch_idx[non_zero_variance_mask[param_batch_idx]]
X_local = param_ensemble_array[update_idx, :]
Expand Down
4 changes: 2 additions & 2 deletions src/ert/analysis/_update_commons.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def _auto_scale_observations(
obs_mask: npt.NDArray[np.bool_],
active_realizations: list[str],
progress_callback: Callable[[AnalysisEvent], None],
) -> tuple[npt.NDArray[np.float64], pl.DataFrame] | tuple[None, None]:
) -> tuple[npt.NDArray[np.float32], pl.DataFrame] | tuple[None, None]:
"""
Performs 'Auto Scaling' to mitigate issues with correlated observations,
and saves computed scaling factors across input groups to ERT storage.
Expand Down Expand Up @@ -328,7 +328,7 @@ class _OutlierColumns(StrEnum):
def _all_parameters(
ensemble: Ensemble,
iens_active_index: npt.NDArray[np.int_],
) -> npt.NDArray[np.float64]:
) -> npt.NDArray[np.float32]:
"""Return all parameters in assimilation problem"""

param_groups = list(ensemble.experiment.parameter_configuration.keys())
Expand Down
12 changes: 6 additions & 6 deletions src/ert/analysis/misfit_preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def get_scaling_factor(nr_observations: int, nr_components: int) -> float:


def get_nr_primary_components(
responses: npt.NDArray[np.float64], threshold: float
responses: npt.NDArray[np.float32], threshold: float
) -> int:
"""
Calculate the number of principal components required
Expand Down Expand Up @@ -61,7 +61,7 @@ def get_nr_primary_components(


def cluster_responses(
responses: npt.NDArray[np.float64],
responses: npt.NDArray[np.float32],
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should this be np.floating instead? it seems responses can be np.float64 coming from main?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes, probably it should be np.floating I have changed it in some places 😄

nr_clusters: int,
) -> npt.NDArray[np.int_]:
"""
Expand All @@ -70,7 +70,7 @@ def cluster_responses(
be clustered together.
"""
correlation = spearmanr(responses).statistic
if isinstance(correlation, np.float64):
if isinstance(correlation, np.floating):
correlation = np.array([[1, correlation], [correlation, 1]])
# Take absolute value to cluster based on correlation strength rather
# than direction.
Expand All @@ -84,9 +84,9 @@ def cluster_responses(


def main(
responses: npt.NDArray[np.float64],
obs_errors: npt.NDArray[np.float64],
) -> tuple[npt.NDArray[np.float64], npt.NDArray[np.int_], npt.NDArray[np.int_]]:
responses: npt.NDArray[np.floating],
obs_errors: npt.NDArray[np.floating],
) -> tuple[npt.NDArray[np.floating], npt.NDArray[np.int_], npt.NDArray[np.int_]]:
Comment on lines +87 to +89
Copy link

Copilot AI Nov 12, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The type annotations are inconsistent. The main function signature uses npt.NDArray[np.floating] for both responses and obs_errors, but the functions it calls internally (get_nr_primary_components and cluster_responses on lines 31-32 and 63-64) expect specifically npt.NDArray[np.float32]. This creates a type incompatibility. Either change the internal functions to accept np.floating, or change the main function signature to use np.float32.

Suggested change
responses: npt.NDArray[np.floating],
obs_errors: npt.NDArray[np.floating],
) -> tuple[npt.NDArray[np.floating], npt.NDArray[np.int_], npt.NDArray[np.int_]]:
responses: npt.NDArray[np.float32],
obs_errors: npt.NDArray[np.float32],
) -> tuple[npt.NDArray[np.float32], npt.NDArray[np.int_], npt.NDArray[np.int_]]:

Copilot uses AI. Check for mistakes.
"""
Perform 'Auto Scaling' to mitigate issues with correlated observations in ensemble
smoothers.
Expand Down
4 changes: 2 additions & 2 deletions src/ert/config/design_matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -391,14 +391,14 @@ def convert_numeric_string_columns(df: pl.DataFrame) -> pl.DataFrame:
for col, dtype in zip(df.columns, df.dtypes, strict=False):
if dtype == pl.String:
try:
df = df.with_columns(pl.col(col).cast(pl.Int64, strict=True).alias(col))
df = df.with_columns(pl.col(col).cast(pl.Int32, strict=True).alias(col))
continue
except InvalidOperationError:
pass

try: # noqa: SIM105
df = df.with_columns(
pl.col(col).cast(pl.Float64, strict=True).alias(col)
pl.col(col).cast(pl.Float32, strict=True).alias(col)
)
except InvalidOperationError:
pass
Expand Down
4 changes: 2 additions & 2 deletions src/ert/config/ext_param_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def write_to_runpath(

def create_storage_datasets(
self,
from_data: npt.NDArray[np.float64],
from_data: npt.NDArray[np.float32],
iens_active_index: npt.NDArray[np.int_],
) -> Iterator[tuple[int, xr.Dataset]]:
for i, realization in enumerate(iens_active_index):
Expand All @@ -97,7 +97,7 @@ def create_storage_datasets(

def load_parameters(
self, ensemble: Ensemble, realizations: npt.NDArray[np.int_]
) -> npt.NDArray[np.float64]:
) -> npt.NDArray[np.float32]:
raise NotImplementedError

def load_parameter_graph(self) -> nx.Graph[int]:
Expand Down
4 changes: 2 additions & 2 deletions src/ert/config/field.py
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,7 @@ def write_to_runpath(

def create_storage_datasets(
self,
from_data: npt.NDArray[np.float64],
from_data: npt.NDArray[np.float32],
iens_active_index: npt.NDArray[np.int_],
) -> Iterator[tuple[int, xr.Dataset]]:
for i, realization in enumerate(iens_active_index):
Expand All @@ -336,7 +336,7 @@ def create_storage_datasets(

def load_parameters(
self, ensemble: Ensemble, realizations: npt.NDArray[np.int_]
) -> npt.NDArray[np.float64]:
) -> npt.NDArray[np.float32]:
ds = ensemble.load_parameters(self.name, realizations)
assert isinstance(ds, xr.Dataset)
ensemble_size = len(ds.realizations)
Expand Down
4 changes: 2 additions & 2 deletions src/ert/config/gen_kw_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ def write_to_runpath(

def load_parameters(
self, ensemble: Ensemble, realizations: npt.NDArray[np.int_]
) -> npt.NDArray[np.float64]:
) -> npt.NDArray[np.float32]:
return (
ensemble.load_parameters(self.name, realizations)
.drop("realization")
Expand All @@ -242,7 +242,7 @@ def load_parameters(

def create_storage_datasets(
self,
from_data: npt.NDArray[np.float64],
from_data: npt.NDArray[np.float32],
iens_active_index: npt.NDArray[np.int_],
) -> Iterator[tuple[int | None, pl.DataFrame]]:
yield (
Expand Down
4 changes: 2 additions & 2 deletions src/ert/config/parameter_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def write_to_runpath(
@abstractmethod
def create_storage_datasets(
self,
from_data: npt.NDArray[np.float64],
from_data: npt.NDArray[np.float32],
iens_active_index: npt.NDArray[np.int_],
) -> Iterator[tuple[int | None, pl.DataFrame | xr.Dataset]]:
"""
Expand Down Expand Up @@ -128,7 +128,7 @@ def copy_parameters(
@abstractmethod
def load_parameters(
self, ensemble: Ensemble, realizations: npt.NDArray[np.int_]
) -> npt.NDArray[np.float64]:
) -> npt.NDArray[np.float32]:
"""
Load the parameter from internal storage for the given ensemble.
Must return array of shape (number of parameters, number of realizations).
Expand Down
4 changes: 2 additions & 2 deletions src/ert/config/surface_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ def write_to_runpath(

def create_storage_datasets(
self,
from_data: npt.NDArray[np.float64],
from_data: npt.NDArray[np.float32],
iens_active_index: npt.NDArray[np.int_],
) -> Iterator[tuple[int, xr.Dataset]]:
for i, realization in enumerate(iens_active_index):
Expand Down Expand Up @@ -229,7 +229,7 @@ def create_storage_datasets(

def load_parameters(
self, ensemble: Ensemble, realizations: npt.NDArray[np.int_]
) -> npt.NDArray[np.float64]:
) -> npt.NDArray[np.float32]:
ds = ensemble.load_parameters(self.name, realizations)
assert isinstance(ds, xr.Dataset)
ensemble_size = len(ds.realizations)
Expand Down
6 changes: 3 additions & 3 deletions src/ert/dark_storage/compute/misfits.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@


def _calculate_misfit(
obs_value: npt.NDArray[np.float64],
response_value: npt.NDArray[np.float64],
obs_std: npt.NDArray[np.float64],
obs_value: npt.NDArray[np.float32],
response_value: npt.NDArray[np.float32],
obs_std: npt.NDArray[np.float32],
) -> list[float]:
difference = response_value - obs_value
misfit = (difference / obs_std) ** 2
Expand Down
2 changes: 1 addition & 1 deletion src/ert/gui/tools/plot/plottery/plots/distribution.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def _plotDistribution(
index: int,
previous_data: pd.DataFrame | None,
) -> None:
data = pd.Series(dtype="float64") if data.empty else data[0]
data = pd.Series(dtype="float32") if data.empty else data[0]

axes.yaxis.set_major_formatter(ConditionalAxisFormatter())
axes.set_xlabel(plot_config.xLabel()) # type: ignore
Expand Down
2 changes: 1 addition & 1 deletion src/ert/gui/tools/plot/plottery/plots/histogram.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def plotHistogram(

for ensemble, datas in ensemble_to_data_map.items():
if datas.empty:
data[ensemble.id] = pd.Series(dtype="float64")
data[ensemble.id] = pd.Series(dtype="float32")
continue

data[ensemble.id] = datas[0]
Expand Down
10 changes: 5 additions & 5 deletions src/ert/storage/local_ensemble.py
Original file line number Diff line number Diff line change
Expand Up @@ -529,9 +529,9 @@ def _load_single_dataset(
def _load_dataset(
self,
group: str,
realizations: int | np.int64 | npt.NDArray[np.int_],
realizations: int | np.int32 | npt.NDArray[np.int_],
) -> xr.Dataset:
if isinstance(realizations, int | np.int64):
if isinstance(realizations, int | np.int32):
Copy link

Copilot AI Nov 12, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The isinstance check is too restrictive. While the type annotation was changed from np.int64 to np.int32, numpy scalar integers can be various types (np.int8, np.int16, np.int32, np.int64, etc.) depending on the platform and how they were created. Using isinstance(realizations, int | np.int32) will fail for other numpy integer types. Consider using np.integer to match any numpy integer scalar, or use isinstance(realizations, int | np.integer).

Suggested change
if isinstance(realizations, int | np.int32):
if isinstance(realizations, int | np.integer):

Copilot uses AI. Check for mistakes.
return self._load_single_dataset(group, int(realizations)).isel(
realizations=0, drop=True
)
Expand Down Expand Up @@ -633,7 +633,7 @@ def load_parameters(

def load_parameters_numpy(
self, group: str, realizations: npt.NDArray[np.int_]
) -> npt.NDArray[np.float64]:
) -> npt.NDArray[np.float32]:
if group in self.experiment.parameter_configuration:
config = self.experiment.parameter_configuration[group]
return config.load_parameters(self, realizations)
Expand All @@ -655,7 +655,7 @@ def load_parameters_numpy(

def save_parameters_numpy(
self,
parameters: npt.NDArray[np.float64],
parameters: npt.NDArray[np.float32],
param_group: str,
iens_active_index: npt.NDArray[np.int_],
) -> None:
Expand Down Expand Up @@ -724,7 +724,7 @@ def sample_parameter(
parameter_dict["realization"] = real_nr
return pl.DataFrame(
parameter_dict,
schema={parameter.name: pl.Float64, "realization": pl.Int64},
schema={parameter.name: pl.Float32, "realization": pl.Int32},
)

def load_responses(self, key: str, realizations: tuple[int, ...]) -> pl.DataFrame:
Expand Down
Loading
Loading