diff --git a/CMakeLists.txt b/CMakeLists.txt index 474451c8aeb..27a6fd9a77e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -413,6 +413,7 @@ list(APPEND libopenmc_SOURCES src/tallies/filter_distribcell.cpp src/tallies/filter_energy.cpp src/tallies/filter_energyfunc.cpp + src/tallies/filter_importance.cpp src/tallies/filter_legendre.cpp src/tallies/filter_material.cpp src/tallies/filter_materialfrom.cpp @@ -434,6 +435,7 @@ list(APPEND libopenmc_SOURCES src/tallies/filter_zernike.cpp src/tallies/tally.cpp src/tallies/tally_scoring.cpp + src/tallies/sensitivity.cpp src/tallies/trigger.cpp src/thermal.cpp src/timer.cpp diff --git a/docs/source/pythonapi/base.rst b/docs/source/pythonapi/base.rst index 2a9d0876cd2..9b96d845171 100644 --- a/docs/source/pythonapi/base.rst +++ b/docs/source/pythonapi/base.rst @@ -128,6 +128,7 @@ Constructing Tallies openmc.SurfaceFilter openmc.MeshFilter openmc.MeshBornFilter + openmc.ImportanceFilter openmc.MeshMaterialFilter openmc.MeshSurfaceFilter openmc.EnergyFilter @@ -151,6 +152,8 @@ Constructing Tallies openmc.MeshMaterialVolumes openmc.Trigger openmc.TallyDerivative + openmc.SensitivityTally + openmc.Sensitivity openmc.Tally openmc.Tallies diff --git a/docs/source/pythonapi/capi.rst b/docs/source/pythonapi/capi.rst index 67eca009471..3607736ac44 100644 --- a/docs/source/pythonapi/capi.rst +++ b/docs/source/pythonapi/capi.rst @@ -76,6 +76,7 @@ Classes Mesh MeshFilter MeshBornFilter + ImportanceFilter MeshSurfaceFilter MuFilter Nuclide @@ -89,6 +90,7 @@ Classes SphericalMesh SurfaceFilter Tally + SensitivityTally TemporarySession UniverseFilter UnstructuredMesh diff --git a/include/openmc/constants.h b/include/openmc/constants.h index a0d1646131c..86a759952bf 100644 --- a/include/openmc/constants.h +++ b/include/openmc/constants.h @@ -293,6 +293,8 @@ enum class MgxsType { enum class TallyResult { VALUE, SUM, SUM_SQ, SIZE }; +enum class SensitivityTallyResult { VALUE, SUM, SUM_SQ, PREVIOUS_VALUE }; + enum class TallyType { VOLUME, MESH_SURFACE, SURFACE, PULSE_HEIGHT }; enum class TallyEstimator { ANALOG, TRACKLENGTH, COLLISION }; diff --git a/include/openmc/filter_importance.h b/include/openmc/filter_importance.h new file mode 100644 index 00000000000..f2d30bfef26 --- /dev/null +++ b/include/openmc/filter_importance.h @@ -0,0 +1,58 @@ +#ifndef OPENMC_TALLIES_FILTER_IMPORTANCE_H +#define OPENMC_TALLIES_FILTER_IMPORTANCE_H + +#include + +#include +#include "openmc/vector.h" +#include "openmc/tallies/filter.h" + +namespace openmc { + +//============================================================================== +//! Multiplies each tally by the importance corresponding to the mesh index +//============================================================================== + +class ImportanceFilter : public Filter { +public: + //---------------------------------------------------------------------------- + // Constructors, destructors + + ~ImportanceFilter() = default; + + //---------------------------------------------------------------------------- + // Methods + + std::string type_str() const override {return "importance";} + FilterType type() const override { return FilterType::IMPORTANCE; } + + void from_xml(pugi::xml_node node) override; + + void get_all_bins(const Particle& p, TallyEstimator estimator, FilterMatch& match) + const override; + + void to_statepoint(hid_t filter_group) const override; + + std::string text_label(int bin) const override; + + //---------------------------------------------------------------------------- + // Accessors + + const std::vector& importance() const { return importance_; } + void set_importance(gsl::span importance); + + virtual int32_t mesh() const {return mesh_;} + + virtual void set_mesh(int32_t mesh); + +protected: + //---------------------------------------------------------------------------- + // Data members + + std::vector importance_; + + int32_t mesh_; +}; + +} // namespace openmc +#endif // OPENMC_TALLIES_FILTER_IMPORTANCE_H \ No newline at end of file diff --git a/include/openmc/particle_data.h b/include/openmc/particle_data.h index 5383487d4fa..d42afb96716 100644 --- a/include/openmc/particle_data.h +++ b/include/openmc/particle_data.h @@ -44,9 +44,11 @@ struct SourceSite { Position r; Direction u; double E; + double E_parent; double time {0.0}; double wgt {1.0}; int delayed_group {0}; + int fission_nuclide; int surf_id {SURFACE_NONE}; ParticleType particle; @@ -481,8 +483,13 @@ class ParticleData : public GeometryState { double E_; double E_last_; + double E_parent_; //!< energy of parent neutron in eV int g_ {0}; int g_last_; + + // Other birth data + int fission_nuclide_; //!< this particle was born as a result of this nuclide fissioning + // a double for fission cross section at birth? if so, I need to also add it to the bank... double wgt_ {1.0}; double wgt_born_ {1.0}; @@ -521,6 +528,8 @@ class ParticleData : public GeometryState { int64_t current_work_; vector flux_derivs_; + + std::vector> cumulative_sensitivities_; // for sensitivities for this particle vector filter_matches_; @@ -583,6 +592,8 @@ class ParticleData : public GeometryState { const double& E() const { return E_; } double& E_last() { return E_last_; } const double& E_last() const { return E_last_; } + double& E_parent() { return E_parent_; } // for sensitivity analysis + const double& E_parent() const { return E_parent_; } // for SA int& g() { return g_; } const int& g() const { return g_; } int& g_last() { return g_last_; } @@ -681,6 +692,12 @@ class ParticleData : public GeometryState { // Used in tally derivatives double& flux_derivs(int i) { return flux_derivs_[i]; } const double& flux_derivs(int i) const { return flux_derivs_[i]; } + + // Used in sensitivity analysis + std::vector& cumulative_sensitivities(int i) { return cumulative_sensitivities_[i]; } + const std::vector& cumulative_sensitivities(int i) const { return cumulative_sensitivities_[i]; } + int& fission_nuclide() { return fission_nuclide_; } + const int& fission_nuclide() const { return fission_nuclide_; } // Matches of tallies decltype(filter_matches_)& filter_matches() { return filter_matches_; } @@ -749,6 +766,40 @@ class ParticleData : public GeometryState { d = 0; } } + + void resize_flux_derivs(int newSize) + { + flux_derivs_.resize(newSize, 0.0); + } + + // Resize and initialize sensitivity vectors + void resize_init_cumulative_sensitivities(int newSize) + { + cumulative_sensitivities_.resize(newSize, {0.0}); + } + + void resize_init_cumulative_sensitivities_vec(int indx, int newSize) + { + cumulative_sensitivities_[indx].resize(newSize, 0.0); + } + + void resize_alloc_filter_matches(int newSize) + { + filter_matches_.resize(newSize); + for (auto &match: filter_matches_){ + match.bins_.resize(1, 0.0); + match.weights_.resize(1, 0.0); + match.i_bin_ = 0; + // bins_present_ default is false from header + } + } + + void initialize_cumulative_sensitivities() + { + for (auto& it : cumulative_sensitivities_){ + std::fill(it.begin(), it.end(), 0.0); + } + } }; } // namespace openmc diff --git a/include/openmc/simulation.h b/include/openmc/simulation.h index 3e4e24e1d06..15eb2d43fce 100644 --- a/include/openmc/simulation.h +++ b/include/openmc/simulation.h @@ -72,6 +72,9 @@ void initialize_generation(); //! Full initialization of a particle history void initialize_history(Particle& p, int64_t index_source); +//! Helper function for initialize_history() that is called independently elsewhere +void initialize_history_partial(Particle& p); + //! Finalize a batch //! //! Handles synchronization and accumulation of tallies, calculation of Shannon diff --git a/include/openmc/tallies/filter.h b/include/openmc/tallies/filter.h index 65098597a5d..9f5265229b1 100644 --- a/include/openmc/tallies/filter.h +++ b/include/openmc/tallies/filter.h @@ -33,6 +33,7 @@ enum class FilterType { MATERIALFROM, MESH, MESHBORN, + IMPORTANCE, MESH_MATERIAL, MESH_SURFACE, MU, diff --git a/include/openmc/tallies/filter_importance.h b/include/openmc/tallies/filter_importance.h new file mode 100644 index 00000000000..f2d30bfef26 --- /dev/null +++ b/include/openmc/tallies/filter_importance.h @@ -0,0 +1,58 @@ +#ifndef OPENMC_TALLIES_FILTER_IMPORTANCE_H +#define OPENMC_TALLIES_FILTER_IMPORTANCE_H + +#include + +#include +#include "openmc/vector.h" +#include "openmc/tallies/filter.h" + +namespace openmc { + +//============================================================================== +//! Multiplies each tally by the importance corresponding to the mesh index +//============================================================================== + +class ImportanceFilter : public Filter { +public: + //---------------------------------------------------------------------------- + // Constructors, destructors + + ~ImportanceFilter() = default; + + //---------------------------------------------------------------------------- + // Methods + + std::string type_str() const override {return "importance";} + FilterType type() const override { return FilterType::IMPORTANCE; } + + void from_xml(pugi::xml_node node) override; + + void get_all_bins(const Particle& p, TallyEstimator estimator, FilterMatch& match) + const override; + + void to_statepoint(hid_t filter_group) const override; + + std::string text_label(int bin) const override; + + //---------------------------------------------------------------------------- + // Accessors + + const std::vector& importance() const { return importance_; } + void set_importance(gsl::span importance); + + virtual int32_t mesh() const {return mesh_;} + + virtual void set_mesh(int32_t mesh); + +protected: + //---------------------------------------------------------------------------- + // Data members + + std::vector importance_; + + int32_t mesh_; +}; + +} // namespace openmc +#endif // OPENMC_TALLIES_FILTER_IMPORTANCE_H \ No newline at end of file diff --git a/include/openmc/tallies/sensitivity.h b/include/openmc/tallies/sensitivity.h new file mode 100644 index 00000000000..a2833c4c71a --- /dev/null +++ b/include/openmc/tallies/sensitivity.h @@ -0,0 +1,132 @@ +#ifndef OPENMC_TALLIES_SENSITIVITY_H +#define OPENMC_TALLIES_SENSITIVITY_H + +#include "openmc/particle.h" +#include "openmc/tallies/filter.h" +#include "openmc/tallies/tally.h" +#include "openmc/vector.h" + +#include + +#include "pugixml.hpp" + +//============================================================================== +//! Describes a first-order sensitivity that can be applied to importance +// weighted tallies. +//============================================================================== + +namespace openmc { + +//============================================================================== +// Class Definitions +//============================================================================== + +class SensitivityTally : public Tally +{ +public: + //---------------------------------------------------------------------------- + // Constructors, destructors, factory functions + SensitivityTally(int32_t id); + SensitivityTally(pugi::xml_node node); + virtual ~SensitivityTally(); + //static SensitivityTally* create(int32_t id = -1); + + //---------------------------------------------------------------------------- + // Accessors + + void set_filters(span filters) override; + + //---------------------------------------------------------------------------- + // Other methods. + void add_filter(Filter* filter) override { set_filters({&filter, 1}); } + + void init_results() override; + + void accumulate() override; + + void reset() override; + + //---------------------------------------------------------------------------- + // Major public data members. + + double denominator_ {0.0}; //!< Denominator of the sensitivity, <\phi F \psi> + + xt::xtensor previous_results_; + + bool gpt_ {false}; //!< if true, do not use denominator. + + //int sens_ {C_NONE}; //!< Index of a Sensitivity object for sensitivity tallies. +}; + +// Different independent variables +enum class SensitivityVariable { + CROSS_SECTION, + MULTIPOLE, + CURVE_FIT +}; + +struct TallySensitivity { + + SensitivityVariable variable; //!< Independent variable (like xs) + int id; //!< User-defined identifier + int sens_nuclide; //!< Nuclide the sensitivity is calculated for + int sens_element; //!< Element the sensitivity is calculated for + int sens_reaction; //!< Need something to specify reaction, use ReactionType? + int sens_material; //!< Material the sensitivity is calculated in + int sens_cell; //!< Cell the sensitivity is calculated in + std::vector energy_bins_; //!< Energy bins on which to discretize the cross section + int n_bins_; //!< something to indicate the size of the vector + + TallySensitivity() {} + explicit TallySensitivity(pugi::xml_node node); + + void set_bins(span bins); +}; + +//============================================================================== +// Non-method functions +//============================================================================== + +//! Read tally sensitivities from a tallies.xml file +void read_tally_sensitivities(pugi::xml_node node); + +//! Scale the given score by its logarithmic derivative + +//void +//apply_sensitivity_to_score(const Particle* p, int i_tally, int i_nuclide, +// double atom_density, int score_bin, double& score); + +//! Adjust diff tally flux derivatives for a particle scattering event. +// +//! Note that this subroutine will be called after absorption events in +//! addition to scattering events, but any flux derivatives scored after an +//! absorption will never be tallied. The paricle will be killed before any +//! further tallies are scored. +// +//! \param p The particle being tracked +void score_collision_sensitivity(Particle& p); + +//! Adjust diff tally flux derivatives for a particle tracking event. +// +//! \param p The particle being tracked +//! \param distance The distance in [cm] traveled by the particle +void score_track_sensitivity(Particle& p, double distance); + +void score_source_sensitivity(Particle& p); + +} // namespace openmc + +//============================================================================== +// Global variables +//============================================================================== + +namespace openmc { + +namespace model { +extern std::vector tally_sens; +extern std::unordered_map tally_sens_map; +} // namespace model + +} // namespace openmc + +#endif // OPENMC_TALLIES_SENSITIVITY_H \ No newline at end of file diff --git a/include/openmc/tallies/tally.h b/include/openmc/tallies/tally.h index 3beeb9d5ac5..0d28df26820 100644 --- a/include/openmc/tallies/tally.h +++ b/include/openmc/tallies/tally.h @@ -25,9 +25,10 @@ class Tally { public: //---------------------------------------------------------------------------- // Constructors, destructors, factory functions + Tally(); //<-Empty constructor explicit Tally(int32_t id); explicit Tally(pugi::xml_node node); - ~Tally(); + virtual ~Tally(); static Tally* create(int32_t id = -1); //---------------------------------------------------------------------------- @@ -93,7 +94,7 @@ class Tally { //! \brief Check if this tally has a specified type of filter bool has_filter(FilterType filter_type) const; - void set_filters(span filters); + virtual void set_filters(span filters); //! Given already-set filters, set the stride lengths void set_strides(); @@ -109,15 +110,15 @@ class Tally { //---------------------------------------------------------------------------- // Other methods. - void add_filter(Filter* filter); + virtual void add_filter(Filter* filter) { set_filters({&filter, 1}); } void init_triggers(pugi::xml_node node); - void init_results(); + virtual void init_results(); - void reset(); + virtual void reset(); - void accumulate(); + virtual void accumulate(); //! return the index of a score specified by name int score_index(const std::string& score) const; @@ -175,6 +176,39 @@ class Tally { vector triggers_; int deriv_ {C_NONE}; //!< Index of a TallyDerivative object for diff tallies. + + int sens_ {C_NONE}; //!< Index of a Sensitivity object for sensitivity tallies. + + void clearFiltersStrides() + { + filters_.clear(); + strides_.clear(); + } + + void reserveFilters(int nfilter) + { + filters_.reserve(nfilter); + } + + void resize_strides(int newSize) + { + strides_.resize(newSize, 0); + } + + void set_n_filter_bins(int32_t newValue) { n_filter_bins_ = newValue; } + + void set_index(int32_t newValue) { index_ = newValue; } + + void setStrides(int indx, int32_t value) { + strides_[indx] = value; + } + + int updateStrides(int indx, int stride) { + stride *= model::tally_filters[filters_[indx]]->n_bins(); + return stride; + } + + void addFilter(int32_t filtr) {filters_.push_back(filtr); } private: //---------------------------------------------------------------------------- diff --git a/include/openmc/tallies/tally_scoring.h b/include/openmc/tallies/tally_scoring.h index c3ab779e6a1..71a518b8617 100644 --- a/include/openmc/tallies/tally_scoring.h +++ b/include/openmc/tallies/tally_scoring.h @@ -107,6 +107,10 @@ void score_timed_tracklength_tally(Particle& p, double total_distance); //! \param tallies A vector of the indices of the tallies to score to void score_surface_tally(Particle& p, const vector& tallies); +//! Function to handle the special case when the tally is a sensitivity tally. +void score_collision_sensitivity_tally(Particle& p, int i_tally, int start_index, int filter_index, + double filter_weight, int i_nuclide, double atom_density, double flux); + //! Score the pulse-height tally //! This is triggered at the end of every particle history // diff --git a/include/openmc/wmp.h b/include/openmc/wmp.h index 6a4abd86714..c4f9d8348d8 100644 --- a/include/openmc/wmp.h +++ b/include/openmc/wmp.h @@ -68,6 +68,22 @@ class WindowedMultipole { //! fission cross sections in [b/K] std::tuple evaluate_deriv( double E, double sqrtkT) const; + + //! function that returns derivative std::pair> + //! separate functions for poles and coefficients? + //! separate functions for curvefit coefficients? + //! this would make a total of 6! + std::pair> evaluate_pole_deriv_total(double E, double sqrtkT); + + std::pair> evaluate_pole_deriv_scatter(double E, double sqrtkT); + + std::pair> evaluate_pole_deriv_fission(double E, double sqrtkT); + + std::pair> evaluate_fit_deriv_total(double E, double sqrtkT); + + std::pair> evaluate_fit_deriv_scatter(double E, double sqrtkT); + + std::pair> evaluate_fit_deriv_fission(double E, double sqrtkT); // Data members std::string name_; //!< Name of nuclide diff --git a/openmc/__init__.py b/openmc/__init__.py index bb972b4e6ad..18da66bf7a1 100644 --- a/openmc/__init__.py +++ b/openmc/__init__.py @@ -23,6 +23,7 @@ from openmc.filter_expansion import * from openmc.trigger import * from openmc.tally_derivative import * +from openmc.sensitivity import * from openmc.tallies import * from openmc.mgxs_library import * from openmc.executor import * diff --git a/openmc/filter.py b/openmc/filter.py index 6a666d2a02c..d7c454f9700 100644 --- a/openmc/filter.py +++ b/openmc/filter.py @@ -25,8 +25,8 @@ 'energyout', 'mu', 'musurface', 'polar', 'azimuthal', 'distribcell', 'delayedgroup', 'energyfunction', 'cellfrom', 'materialfrom', 'legendre', 'spatiallegendre', 'sphericalharmonics', 'zernike', 'zernikeradial', 'particle', 'cellinstance', - 'collision', 'time', 'parentnuclide', 'weight', 'meshborn', 'meshsurface', - 'meshmaterial', + 'collision', 'time', 'importance', 'parentnuclide', 'weight', 'meshborn', + 'meshsurface', 'meshmaterial', ) _CURRENT_NAMES = ( @@ -1670,6 +1670,196 @@ def from_group_structure(cls, group_structure): return cls(openmc.mgxs.GROUP_STRUCTURES[group_structure.upper()]) +class ImportanceFilter(Filter): + """Importance weighted tally, where importance is based on locations onto a + regular, rectangular mesh. + + Parameters + ---------- + mesh : openmc.MeshBase + The mesh object that the importance is given on + filter_id : int + Unique identifier for the filter + + Attributes + ---------- + mesh : openmc.MeshBase + The mesh object that events will be tallied onto + id : int + Unique identifier for the filter + importance : numpy.ndarray + An array of values of the importance for each bin in the mesh + num_bins : Integral + The number of filter bins + + """ + + def __init__(self, importance, mesh, filter_id=None): + self.importance = np.asarray(importance) + self.mesh = mesh + self.id = filter_id + + def __hash__(self): + string = type(self).__name__ + '\n' + string += '{: <16}=\t{}\n'.format('\tMesh ID', self.mesh.id) + return hash(string) + + def __repr__(self): + string = type(self).__name__ + '\n' + string += '{: <16}=\t{}\n'.format('\tMesh ID', self.mesh.id) + string += '{: <16}=\t{}\n'.format('\tID', self.id) + return string + + @property + def bins(self): + raise AttributeError('ImportanceFilter have no bins.') + + @property + def num_bins(self): + return 1 + + @bins.setter + def bins(self, bins): + raise RuntimeError('ImportanceFilter have no bins.') + + @classmethod + def from_hdf5(cls, group, **kwargs): + if group['type'][()].decode() != cls.short_name.lower(): + raise ValueError("Expected HDF5 data for filter type '" + + cls.short_name.lower() + "' but got '" + + group['type'][()].decode() + " instead") + + if 'meshes' not in kwargs: + raise ValueError(cls.__name__ + " requires a 'meshes' keyword " + "argument.") + + mesh_id = group['mesh'][()] + mesh_obj = kwargs['meshes'][mesh_id] + importance = group['importance'][()] + filter_id = int(group.name.split('/')[-1].lstrip('filter ')) + + out = cls(importance,mesh_obj, filter_id=filter_id) + + return out + + @property + def mesh(self): + return self._mesh + + @property + def importance(self): + return self._importance + + @mesh.setter + def mesh(self, mesh): + cv.check_type('filter mesh', mesh, openmc.MeshBase) + self._mesh = mesh + + @importance.setter + def importance(self, importance): + self._importance = np.asarray(importance) + + def can_merge(self, other): + # Mesh filters cannot have more than one bin + return False + + def get_pandas_dataframe(self, data_size, stride, **kwargs): + """Builds a Pandas DataFrame for the Filter's bins. + + This method constructs a Pandas DataFrame object for the filter with + columns annotated by filter bin information. This is a helper method for + :meth:`Tally.get_pandas_dataframe`. + + Parameters + ---------- + data_size : int + The total number of bins in the tally corresponding to this filter + stride : int + Stride in memory for the filter + + Returns + ------- + pandas.DataFrame + A Pandas DataFrame with three columns describing the x,y,z mesh + cell indices corresponding to each filter bin. The number of rows + in the DataFrame is the same as the total number of bins in the + corresponding tally, with the filter bin appropriately tiled to map + to the corresponding tally bins. + + See also + -------- + Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe() + + """ + # Initialize Pandas DataFrame + df = pd.DataFrame() + + # Initialize dictionary to build Pandas Multi-index column + filter_dict = {} + + # Append mesh ID as outermost index of multi-index + mesh_key = 'mesh {}'.format(self.mesh.id) + + # Find mesh dimensions - use 3D indices for simplicity + n_dim = len(self.mesh.dimension) + if n_dim == 3: + nx, ny, nz = self.mesh.dimension + elif n_dim == 2: + nx, ny = self.mesh.dimension + nz = 1 + else: + nx = self.mesh.dimension + ny = nz = 1 + + # Generate multi-index sub-column for x-axis + filter_dict[mesh_key, 'x'] = _repeat_and_tile( + np.arange(1, nx + 1), stride, data_size) + + # Generate multi-index sub-column for y-axis + filter_dict[mesh_key, 'y'] = _repeat_and_tile( + np.arange(1, ny + 1), nx * stride, data_size) + + # Generate multi-index sub-column for z-axis + filter_dict[mesh_key, 'z'] = _repeat_and_tile( + np.arange(1, nz + 1), nx * ny * stride, data_size) + + # Initialize a Pandas DataFrame from the mesh dictionary + df = pd.concat([df, pd.DataFrame(filter_dict)]) + + return df + + def to_xml_element(self): + """Return XML Element representing the Filter. + + Returns + ------- + element : xml.etree.ElementTree.Element + XML element containing filter data + + """ + + element = ET.Element('filter') + element.set('id', str(self.id)) + element.set('type', self.short_name.lower()) + + subelement = ET.SubElement(element, 'importance') + subelement.text = ' '.join(str(e) for e in self.importance) + + subelement = ET.SubElement(element, 'mesh') + subelement.text = str(self.mesh.id) + + return element + + @classmethod + def from_xml_element(cls, elem, **kwargs): + filter_id = int(elem.get('id')) + importance = [float(x) for x in get_text(elem, 'importance').split()] + mesh_id = int(get_text(elem, 'mesh')) + mesh_obj = kwargs['meshes'][mesh_id] + out = cls(importance, mesh_obj, filter_id=filter_id) + return out + + class EnergyoutFilter(EnergyFilter): """Bins tally events based on outgoing particle energy. diff --git a/openmc/lib/core.py b/openmc/lib/core.py index 9f8db69d577..79d6a07cf56 100644 --- a/openmc/lib/core.py +++ b/openmc/lib/core.py @@ -22,9 +22,11 @@ class _SourceSite(Structure): _fields_ = [('r', c_double*3), ('u', c_double*3), ('E', c_double), + ('E_parent', c_double), ('time', c_double), ('wgt', c_double), ('delayed_group', c_int), + ('fission_nuclide', c_int), ('surf_id', c_int), ('particle', c_int), ('parent_nuclide', c_int), diff --git a/openmc/lib/filter.py b/openmc/lib/filter.py index 55b681d89ad..04b70096766 100644 --- a/openmc/lib/filter.py +++ b/openmc/lib/filter.py @@ -19,7 +19,7 @@ __all__ = [ 'Filter', 'AzimuthalFilter', 'CellFilter', 'CellbornFilter', 'CellfromFilter', 'CellInstanceFilter', 'CollisionFilter', 'DistribcellFilter', 'DelayedGroupFilter', - 'EnergyFilter', 'EnergyoutFilter', 'EnergyFunctionFilter', 'LegendreFilter', + 'EnergyFilter', 'EnergyoutFilter', 'EnergyFunctionFilter', 'ImportanceFilter', 'LegendreFilter', 'MaterialFilter', 'MaterialFromFilter', 'MeshFilter', 'MeshBornFilter', 'MeshMaterialFilter', 'MeshSurfaceFilter', 'MuFilter', 'MuSurfaceFilter', 'ParentNuclideFilter', 'ParticleFilter', 'PolarFilter', 'SphericalHarmonicsFilter', @@ -72,6 +72,19 @@ _dll.openmc_get_filter_index.argtypes = [c_int32, POINTER(c_int32)] _dll.openmc_get_filter_index.restype = c_int _dll.openmc_get_filter_index.errcheck = _error_handler +_dll.openmc_importance_filter_get_importance.argtypes = [ + c_int32, POINTER(POINTER(c_double)), POINTER(c_size_t)] +_dll.openmc_importance_filter_get_importance.restype = c_int +_dll.openmc_importance_filter_get_importance.errcheck = _error_handler +_dll.openmc_importance_filter_set_importance.argtypes = [c_int32, c_size_t, POINTER(c_double)] +_dll.openmc_importance_filter_set_importance.restype = c_int +_dll.openmc_importance_filter_set_importance.errcheck = _error_handler +_dll.openmc_importance_filter_get_mesh.argtypes = [c_int32, POINTER(c_int32)] +_dll.openmc_importance_filter_get_mesh.restype = c_int +_dll.openmc_importance_filter_get_mesh.errcheck = _error_handler +_dll.openmc_importance_filter_set_mesh.argtypes = [c_int32, c_int32] +_dll.openmc_importance_filter_set_mesh.restype = c_int +_dll.openmc_importance_filter_set_mesh.errcheck = _error_handler _dll.openmc_legendre_filter_get_order.argtypes = [c_int32, POINTER(c_int)] _dll.openmc_legendre_filter_get_order.restype = c_int _dll.openmc_legendre_filter_get_order.errcheck = _error_handler @@ -320,6 +333,43 @@ def _get_attr(self, cfunc): return as_array(array_p, (n.value, )) +class ImportanceFilter(Filter): + filter_type = 'importance' + + def __init__(self, importance=None, mesh=None, uid=None, new=True, index=None): + super().__init__(uid, new, index) + if importance is not None: + self.importance = importance + if mesh is not None: + self.mesh = mesh + + @property + def importance(self): + importance = POINTER(c_double)() + n = c_size_t() + _dll.openmc_importance_filter_get_importance(self._index, importance, n) + return as_array(importance, (n.value,)) + + @property + def mesh(self): + index_mesh = c_int32() + _dll.openmc_importance_filter_get_mesh(self._index, index_mesh) + return RegularMesh(index=index_mesh.value) + + @importance.setter + def importance(self, importance): + # Get numpy array as a double* + importance = np.asarray(importance) + importance_p = importance.ctypes.data_as(POINTER(c_double)) + + _dll.openmc_importance_filter_set_importance( + self._index, len(importance), importance_p) + + @mesh.setter + def mesh(self, mesh): + _dll.openmc_importance_filter_set_mesh(self._index, mesh._index) + + class LegendreFilter(Filter): filter_type = 'legendre' @@ -657,6 +707,7 @@ class ZernikeRadialFilter(ZernikeFilter): 'energy': EnergyFilter, 'energyout': EnergyoutFilter, 'energyfunction': EnergyFunctionFilter, + 'importance': ImportanceFilter, 'legendre': LegendreFilter, 'material': MaterialFilter, 'materialfrom': MaterialFromFilter, diff --git a/openmc/sensitivity.py b/openmc/sensitivity.py new file mode 100644 index 00000000000..e09f1ae1415 --- /dev/null +++ b/openmc/sensitivity.py @@ -0,0 +1,134 @@ +from numbers import Integral +import numpy as np +import lxml.etree as ET + +import openmc.checkvalue as cv +from .mixin import EqualityMixin, IDManagerMixin +from ._xml import get_text + + +class Sensitivity(EqualityMixin, IDManagerMixin): + """A material perturbation derivative to apply to a tally. + + Parameters + ---------- + derivative_id : int, optional + Unique identifier for the tally derivative. If none is specified, an + identifier will automatically be assigned + variable : str, optional + Accepted values are 'density', 'nuclide_density', and 'temperature' + material : int, optional + The perturbed material ID + nuclide : str, optional + The perturbed nuclide. Only needed for 'nuclide_density' derivatives. + Ex: 'Xe135' + + Attributes + ---------- + id : int + Unique identifier for the tally derivative + variable : str + Accepted values are 'density', 'nuclide_density', and 'temperature' + material : int + The perturubed material ID + nuclide : str + The perturbed nuclide. Only needed for 'nuclide_density' derivatives. + Ex: 'Xe135' + + """ + + next_id = 1 + used_ids = set() + + def __init__(self, sensitivity_id=None, variable=None, + nuclide=None, reaction=None,energy=None): + # Initialize Tally class attributes + self.id = sensitivity_id + self.variable = variable + self.nuclide = nuclide + self.reaction = reaction + self.energy = np.array(energy) + + def __repr__(self): + string = 'Sensitivity\n' + string += '{: <16}=\t{}\n'.format('\tID', self.id) + string += '{: <16}=\t{}\n'.format('\tVariable', self.variable) + string += '{: <16}=\t{}\n'.format('\tNuclide', self.nuclide) + if self.variable == 'cross_section': + string += '{: <16}=\t{}\n'.format('\tReaction', self.reaction) + string += '{: <16}=\t{}\n'.format('\tEnergy', self.energy) + + return string + + @property + def variable(self): + return self._variable + + @property + def nuclide(self): + return self._nuclide + + @property + def reaction(self): + return self._reaction + + @property + def energy(self): + return self._energy + + @variable.setter + def variable(self, var): + if var is not None: + cv.check_type('sensitivity variable', var, str) + cv.check_value('sensitivity variable', var, + ('cross_section', 'multipole', 'curve_fit')) + self._variable = var + + @nuclide.setter + def nuclide(self, nuc): + if nuc is not None: + cv.check_type('sensitivity nuclide', nuc, str) + self._nuclide = nuc + + @reaction.setter + def reaction(self, rxn): + if rxn is not None: + cv.check_type('sensitivity reaction', rxn, str) + self._reaction = rxn + + @energy.setter + def energy(self, ene): + self._energy = np.asarray(ene) + + def to_xml_element(self): + """Return XML representation of the tally sensitivity + + Returns + ------- + element : xml.etree.ElementTree.Element + XML element containing sensitivity data + + """ + + element = ET.Element("sensitivity") + element.set("id", str(self.id)) + + element.set("variable", self.variable) + element.set("nuclide", self.nuclide) + + if self.variable == 'cross_section': + element.set("reaction", self.reaction) + + subelement = ET.SubElement(element, 'energy') + subelement.text = ' '.join(str(e) for e in self.energy) + + return element + + @classmethod + def from_xml_element(cls, elem): + sens_id = int(elem.get("id")) + variable = elem.get("variable") + energy = [float(x) for x in get_text(elem, 'energy').split()] + nuclide = elem.get("nuclide") + reaction = elem.get("reaction") if variable == "cross_section" else None + return cls(sens_id, variable, nuclide, reaction, energy) \ No newline at end of file diff --git a/openmc/statepoint.py b/openmc/statepoint.py index a763db3971b..f7524b2d9d9 100644 --- a/openmc/statepoint.py +++ b/openmc/statepoint.py @@ -137,6 +137,7 @@ def __init__(self, filepath, autolink=True): self._filters = {} self._tallies = {} self._derivs = {} + self._senss = {} # Check filetype and version cv.check_filetype_version(self._f, 'statepoint', _VERSION_STATEPOINT) @@ -149,6 +150,7 @@ def __init__(self, filepath, autolink=True): self._global_tallies = None self._sparse = False self._derivs_read = False + self._senss_read = False # Automatically link in a summary file if one exists if autolink: @@ -445,6 +447,11 @@ def tallies(self): deriv_id = group['derivative'][()] tally.derivative = self.tally_derivatives[deriv_id] + # Read sensitivity information. + if 'sensitivity' in group: + sens_id = group['sensitivity'][()] + tally.sensitivity = self.tally_sensitivities[sens_id] + # Read all filters n_filters = group['n_filters'][()] if n_filters > 0: @@ -505,6 +512,33 @@ def tally_derivatives(self): return self._derivs + @property + def tally_sensitivities(self): + if not self._senss_read: + # Populate the dictionary if any sensitivities are present. + if 'sensitivities' in self._f['tallies']: + # Read the sensitivity ids. + base = 'tallies/sensitivities' + sens_ids = [int(k.split(' ')[1]) for k in self._f[base]] + + # Create each sensitivity object and add it to the dictionary. + for d_id in sens_ids: + group = self._f[f'tallies/sensitivities/sensitivity {d_id}'] + sens = openmc.Sensitivity(sensitivity_id=d_id) + sens.variable = group['independent variable'][()].decode() + if sens.variable == 'cross_section': + sens.nuclide = group['nuclide'][()].decode() + sens.reaction = group['reaction'][()].decode() + elif sens.variable == 'multipole': + sens.nuclide = group['nuclide'][()].decode() + elif sens.variable == 'curve_fit': + sens.nuclide = group['nuclide'][()].decode() + self._senss[d_id] = sens + + self._senss_read = True + + return self._senss + @property def version(self): return tuple(self._f.attrs['openmc_version']) diff --git a/openmc/tallies.py b/openmc/tallies.py index 075b1e99117..7e70a9ee4c3 100644 --- a/openmc/tallies.py +++ b/openmc/tallies.py @@ -1387,7 +1387,7 @@ def get_values(self, scores=[], filters=[], filter_bins=[], return data def get_pandas_dataframe(self, filters=True, nuclides=True, scores=True, - derivative=True, paths=True, float_format='{:.2e}'): + derivative=True, sensitivity=True, paths=True, float_format='{:.2e}'): """Build a Pandas DataFrame for the Tally data. This method constructs a Pandas DataFrame object for the Tally data @@ -1489,6 +1489,14 @@ def get_pandas_dataframe(self, filters=True, nuclides=True, scores=True, if self.derivative.nuclide is not None: df['d_nuclide'] = self.derivative.nuclide + # Include columns for sensitivity if user requested it + if sensitivity and (self.sensitivity is not None): + df['d_variable'] = self.sensitivity.variable + if self.sensitivity.nuclide is not None: + df['d_nuclide'] = self.sensitivity.nuclide + if self.sensitivity.reaction is not None: + df['d_reaction'] = self.sensitivity.reaction + # Append columns with mean, std. dev. for each tally bin df['mean'] = self.mean.ravel() df['std. dev.'] = self.std_dev.ravel() @@ -3163,6 +3171,111 @@ def diagonalize_filter(self, new_filter, filter_position=-1): return new_tally +class SensitivityTally(Tally): + + def __init__(self, tally_id=None, name=''): + # Initialize Tally class attributes + Tally.__init__(self,tally_id=tally_id, name = name) + self._sensitivity = None + self._gpt = False + + def __repr__(self): + parts = ['Sensitivity Tally'] + parts.append('{: <15}=\t{}'.format('ID', self.id)) + parts.append('{: <15}=\t{}'.format('Name', self.name)) + parts.append('{: <15}=\t{}'.format('Sensitivity ID', self.sensitivity.id)) + filters = ', '.join(type(f).__name__ for f in self.filters) + parts.append('{: <15}=\t{}'.format('Filters', filters)) + nuclides = ' '.join(str(nuclide) for nuclide in self.nuclides) + parts.append('{: <15}=\t{}'.format('Nuclides', nuclides)) + parts.append('{: <15}=\t{}'.format('Scores', self.scores)) + parts.append('{: <15}=\t{}'.format('Estimator', self.estimator)) + parts.append('{: <15}=\t{}'.format('Gen. Pert.', self.gpt)) + return '\n\t'.join(parts) + + @property + def shape(self): + length = len(self._sensitivity.energy) - 1 + return (length, self.num_nuclides, self.num_scores) + + @property + def sensitivity(self): + return self._sensitivity + + @sensitivity.setter + def sensitivity(self, sens): + cv.check_type('sensitivity', sens, openmc.Sensitivity, + none_ok=True) + self._sensitivity = sens + + @property + def gpt(self): + return self._gpt + + @gpt.setter + def gpt(self, value): + cv.check_type('generalized perturbation', value, bool) + self._gpt = value + + def to_xml_element(self): + """Return XML representation of the tally + + Returns + ------- + element : xml.etree.ElementTree.Element + XML element containing tally data + + """ + + element = ET.Element("sensitivity_tally") + + # Tally ID + element.set("id", str(self.id)) + + # Optional Tally name + if self.name != '': + element.set("name", self.name) + + # Multiply by density + if not self.multiply_density: + subelement = ET.SubElement(element, "multiply_density") + subelement.text = str(self.multiply_density).lower() + + # Optional Tally filters + if len(self.filters) > 0: + subelement = ET.SubElement(element, "filters") + subelement.text = ' '.join(str(f.id) for f in self.filters) + + # Optional Nuclides + if self.nuclides: + subelement = ET.SubElement(element, "nuclides") + subelement.text = ' '.join(str(n) for n in self.nuclides) + + # Scores + if len(self.scores) == 0: + msg = f'Unable to get XML for Tally ID="{self.id}" since it does ' \ + 'not contain any scores' + raise ValueError(msg) + + subelement = ET.SubElement(element, "scores") + subelement.text = ' '.join(str(x) for x in self.scores) + + # Optional Triggers + for trigger in self.triggers: + trigger.get_trigger_xml(element) + + # Optional sensitivities + subelement = ET.SubElement(element, "sensitivity") + subelement.text = str(self.sensitivity.id) + + # Gen. Pert. + if self.gpt: + subelement = ET.SubElement(element, "gpt") + subelement.text = str(self.gpt).lower() + + return element + + class Tallies(cv.CheckedList): """Collection of Tallies used for an OpenMC simulation. @@ -3284,7 +3397,7 @@ def _create_mesh_subelements(self, root_element, memo=None): already_written = memo if memo else set() for tally in self: for f in tally.filters: - if isinstance(f, openmc.MeshFilter): + if isinstance(f, openmc.MeshFilter) or isinstance(f, openmc.ImportanceFilter): if f.mesh.id in already_written: continue if len(f.mesh.name) > 0: @@ -3316,6 +3429,19 @@ def _create_derivative_subelements(self, root_element): for d in derivs: root_element.append(d.to_xml_element()) + + def _create_sensitivity_subelements(self, root_element): + # Get a list of all sensitivity referenced in a tally. + senss = [] + for tally in self: + sens = tally.sensitivity + if sens is not None and sens not in senss: + senss.append(sens) + + # Add the sensitivity to the XML tree. + for d in senss: + root_element.append(d.to_xml_element()) + def to_xml_element(self, memo=None): """Creates a 'tallies' element to be written to an XML file. """ @@ -3325,6 +3451,10 @@ def to_xml_element(self, memo=None): self._create_filter_subelements(element) self._create_tally_subelements(element) self._create_derivative_subelements(element) + try: + self._create_sensitivity_subelements(element) + except: + pass # Clean the indentation in the file to be user-readable clean_indentation(element) @@ -3388,11 +3518,17 @@ def from_xml_element(cls, elem, meshes=None): deriv = openmc.TallyDerivative.from_xml_element(e) derivatives[deriv.id] = deriv + # Read sensitivity elements + senss = {} + for e in elem.findall('sensitivity'): + sens = openmc.Sensitivity.from_xml_element(e) + senss[sens.id] = sens + # Read tally elements tallies = [] for e in elem.findall('tally'): tally = openmc.Tally.from_xml_element( - e, filters=filters, derivatives=derivatives + e, filters=filters, derivatives=derivatives, sensitivity=senss ) tallies.append(tally) diff --git a/src/initialize.cpp b/src/initialize.cpp index 36f32611690..c00f49f1916 100644 --- a/src/initialize.cpp +++ b/src/initialize.cpp @@ -157,26 +157,28 @@ void initialize_mpi(MPI_Comm intracomm) // Create bank datatype SourceSite b; - MPI_Aint disp[11]; + MPI_Aint disp[13]; MPI_Get_address(&b.r, &disp[0]); MPI_Get_address(&b.u, &disp[1]); MPI_Get_address(&b.E, &disp[2]); - MPI_Get_address(&b.time, &disp[3]); - MPI_Get_address(&b.wgt, &disp[4]); - MPI_Get_address(&b.delayed_group, &disp[5]); - MPI_Get_address(&b.surf_id, &disp[6]); - MPI_Get_address(&b.particle, &disp[7]); - MPI_Get_address(&b.parent_nuclide, &disp[8]); - MPI_Get_address(&b.parent_id, &disp[9]); - MPI_Get_address(&b.progeny_id, &disp[10]); - for (int i = 10; i >= 0; --i) { + MPI_Get_address(&b.E_parent, &disp[3]); + MPI_Get_address(&b.time, &disp[4]); + MPI_Get_address(&b.wgt, &disp[5]); + MPI_Get_address(&b.delayed_group, &disp[6]); + MPI_Get_address(&b.fission_nuclide, &disp[7]); + MPI_Get_address(&b.surf_id, &disp[8]); + MPI_Get_address(&b.particle, &disp[9]); + MPI_Get_address(&b.parent_nuclide, &disp[10]); + MPI_Get_address(&b.parent_id, &disp[11]); + MPI_Get_address(&b.progeny_id, &disp[12]); + for (int i = 12; i >= 0; --i) { disp[i] -= disp[0]; } - int blocks[] {3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1}; - MPI_Datatype types[] {MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE, - MPI_DOUBLE, MPI_INT, MPI_INT, MPI_INT, MPI_INT, MPI_LONG, MPI_LONG}; - MPI_Type_create_struct(11, blocks, disp, types, &mpi::source_site); + int blocks[] {3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; + MPI_Datatype types[] {MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE, + MPI_DOUBLE, MPI_INT, MPI_INT, MPI_INT, MPI_INT, MPI_INT, MPI_LONG, MPI_LONG}; + MPI_Type_create_struct(13, blocks, disp, types, &mpi::source_site); MPI_Type_commit(&mpi::source_site); } #endif // OPENMC_MPI diff --git a/src/output.cpp b/src/output.cpp index 0a14e8843d8..12710040190 100644 --- a/src/output.cpp +++ b/src/output.cpp @@ -37,6 +37,7 @@ #include "openmc/simulation.h" #include "openmc/surface.h" #include "openmc/tallies/derivative.h" +#include "openmc/tallies/sensitivity.h" #include "openmc/tallies/filter.h" #include "openmc/tallies/tally.h" #include "openmc/tallies/tally_scoring.h" @@ -659,6 +660,28 @@ void write_tallies() } } + // Write sensitivity information. + if (tally.sens_ != C_NONE) { + const auto& sens {model::tally_sens[tally.sens_]}; + switch (sens.variable) { + case SensitivityVariable::CROSS_SECTION: + fmt::print(tallies_out, " Cross section sensitivity Nuclide {} Reaction {}\n", + data::nuclides[sens.sens_nuclide]->name_, reaction_name(sens.sens_reaction)); + break; + case SensitivityVariable::MULTIPOLE: + fmt::print(tallies_out, " Multipole sensitivity Nuclide {}\n", + data::nuclides[sens.sens_nuclide]->name_); + break; + case SensitivityVariable::CURVE_FIT: + fmt::print(tallies_out, " Curvefit sensitivity Nuclide {}\n", + data::nuclides[sens.sens_nuclide]->name_); + break; + default: + fatal_error(fmt::format("Sensitivity tally dependent variable for " + "tally {} not defined in output.cpp", tally.id_)); + } + } + // Initialize Filter Matches Object vector filter_matches; // Allocate space for tally filter matches @@ -706,6 +729,17 @@ void write_tallies() std::string score_name = score > 0 ? reaction_name(score) : score_names.at(score); double mean, stdev; + if (tally.sens_ != C_NONE){ + int n_bins = model::tally_sens[tally.sens_].n_bins_; + for (auto filter_idx = 0; filter_idx < n_bins; ++filter_idx){ + std::tie(mean, stdev) = mean_stdev( + &tally.results_(filter_idx, score_index, 0), tally.n_realizations_); + fmt::print(tallies_out, "{0:{1}}{2:<36} {3:.6} +/- {4:.6}\n", + "", indent + 1, score_name, mean, t_value * stdev); + } + score_index += 1; + continue; + } std::tie(mean, stdev) = mean_stdev(&tally.results_(filter_index, score_index, 0), tally.n_realizations_); diff --git a/src/particle.cpp b/src/particle.cpp index 06ccf462a18..e42b77b0717 100644 --- a/src/particle.cpp +++ b/src/particle.cpp @@ -26,6 +26,7 @@ #include "openmc/simulation.h" #include "openmc/source.h" #include "openmc/surface.h" +#include "openmc/tallies/sensitivity.h" #include "openmc/tallies/derivative.h" #include "openmc/tallies/tally.h" #include "openmc/tallies/tally_scoring.h" @@ -86,8 +87,12 @@ bool Particle::create_secondary( bank.r = r(); bank.u = u; bank.E = settings::run_CE ? E : g(); + if (settings::run_CE) { + bank.E_parent = this->E(); + } bank.time = time(); bank_second_E() += bank.E; + // if (!model::active_tallies.empty()) score_source_sensitivity(*this); return true; } @@ -120,6 +125,8 @@ void Particle::from_source(const SourceSite* src) n_collision() = 0; fission() = false; zero_flux_derivs(); + + initialize_cumulative_sensitivities(); lifetime() = 0.0; // Copy attributes from source bank site @@ -132,8 +139,10 @@ void Particle::from_source(const SourceSite* src) r_last_current() = src->r; r_last() = src->r; u_last() = src->u; + fission_nuclide() = src->fission_nuclide; if (settings::run_CE) { E() = src->E; + E_parent() = src->E_parent; g() = 0; } else { g() = static_cast(src->E); @@ -273,6 +282,7 @@ void Particle::event_advance() // Score flux derivative accumulators for differential tallies. if (!model::active_tallies.empty()) { score_track_derivative(*this, distance); + score_track_sensitivity(*this, distance); // Score cumulative sensitivity for sensitivity tallies. } // Set particle weight to zero if it hit the time boundary @@ -397,9 +407,12 @@ void Particle::event_collide() } // Score flux derivative accumulators for differential tallies. - if (!model::active_tallies.empty()) + if (!model::active_tallies.empty()) { score_collision_derivative(*this); - + score_collision_sensitivity(*this); // Score cumulative sensitivity for sensitivity tallies. + // score_source_sensitivity(*this); + } + #ifdef OPENMC_DAGMC_ENABLED history().reset(); #endif diff --git a/src/particle_data.cpp b/src/particle_data.cpp index 370ca12e469..ce6a3a2f882 100644 --- a/src/particle_data.cpp +++ b/src/particle_data.cpp @@ -10,6 +10,7 @@ #include "openmc/photon.h" #include "openmc/settings.h" #include "openmc/tallies/derivative.h" +#include "openmc/tallies/sensitivity.h" #include "openmc/tallies/filter.h" #include "openmc/tallies/tally.h" @@ -100,6 +101,10 @@ ParticleData::ParticleData() if (!model::active_tallies.empty() || settings::event_based) { flux_derivs_.resize(model::tally_derivs.size()); zero_flux_derivs(); + + // Every particle starts with no accumulated cumulative sensitivity + cumulative_sensitivities_.resize(model::tally_sens.size()); + initialize_cumulative_sensitivities(); } // Allocate space for tally filter matches diff --git a/src/particle_restart.cpp b/src/particle_restart.cpp index 6b7778211af..eeb07419153 100644 --- a/src/particle_restart.cpp +++ b/src/particle_restart.cpp @@ -12,6 +12,7 @@ #include "openmc/settings.h" #include "openmc/simulation.h" #include "openmc/tallies/derivative.h" +#include "openmc/tallies/sensitivity.h" #include "openmc/tallies/tally.h" #include "openmc/track_output.h" @@ -120,6 +121,23 @@ void run_particle_restart() if (p.write_track()) add_particle_track(p); + // Every particle starts with no accumulated flux derivative. + if (!model::active_tallies.empty()) { + p.resize_flux_derivs(model::tally_derivs.size()); + p.zero_flux_derivs(); + + // This is probably wrong because flux_derivs_ is a vector and + // cumulative_sensitivities_ is a vector of vectors... + // these need to resized to be of size energy_bins or multipole_bins + p.resize_init_cumulative_sensitivities(model::tally_sens.size()); + for (int idx=0; idx< model::tally_sens.size();idx++){ + p.resize_init_cumulative_sensitivities_vec(idx, model::tally_sens[idx].n_bins_); + } + } + + // Allocate space for tally filter matches + p.filter_matches().resize(model::tally_filters.size()); + // Transport neutron transport_history_based_single_particle(p); diff --git a/src/physics.cpp b/src/physics.cpp index e947fecbb9c..921ca17689c 100644 --- a/src/physics.cpp +++ b/src/physics.cpp @@ -210,6 +210,10 @@ void create_fission_sites(Particle& p, int i_nuclide, const Reaction& rx) site.particle = ParticleType::neutron; site.time = p.time(); site.wgt = 1. / weight; + site.E_parent = p.E_last(); + site.fission_nuclide = i_nuclide; + site.parent_id = p.id(); + site.progeny_id = p.n_progeny()++; site.surf_id = 0; // Sample delayed group and angle/energy for fission reaction diff --git a/src/simulation.cpp b/src/simulation.cpp index b9986f47375..8ea52b5be1c 100644 --- a/src/simulation.cpp +++ b/src/simulation.cpp @@ -20,6 +20,7 @@ #include "openmc/source.h" #include "openmc/state_point.h" #include "openmc/tallies/derivative.h" +#include "openmc/tallies/sensitivity.h" #include "openmc/tallies/filter.h" #include "openmc/tallies/tally.h" #include "openmc/tallies/trigger.h" @@ -638,6 +639,29 @@ void initialize_history(Particle& p, int64_t index_source) // Prepare to write out particle track. if (p.write_track()) add_particle_track(p); + + initialize_history_partial(p); +} + +void initialize_history_partial(Particle& p) +{ + // Every particle starts with no accumulated flux derivative and + // no accumulated sensitivities. + // flux_derivs_ is a vector and cumulative_sensitivities_ is a vector of vectors... + // these need to resized to be of size energy_bins or multipole_bins + if (!model::active_tallies.empty()) + { + p.resize_flux_derivs(model::tally_derivs.size()); + p.zero_flux_derivs(); + + p.resize_init_cumulative_sensitivities(model::tally_sens.size()); + for (int idx=0; idx< model::tally_sens.size();idx++){ + p.resize_init_cumulative_sensitivities_vec(idx, model::tally_sens[idx].n_bins_); + } + } + + // Allocate space for tally filter matches + p.resize_alloc_filter_matches(model::tally_filters.size()); } int overall_generation() diff --git a/src/state_point.cpp b/src/state_point.cpp index 8195c486507..a8570b31caa 100644 --- a/src/state_point.cpp +++ b/src/state_point.cpp @@ -24,6 +24,7 @@ #include "openmc/settings.h" #include "openmc/simulation.h" #include "openmc/tallies/derivative.h" +#include "openmc/tallies/sensitivity.h" #include "openmc/tallies/filter.h" #include "openmc/tallies/filter_mesh.h" #include "openmc/tallies/tally.h" @@ -149,6 +150,35 @@ extern "C" int openmc_statepoint_write(const char* filename, bool* write_source) close_group(derivs_group); } + // Write information for sensitivities + if (!model::tally_sens.empty()) { + hid_t senss_group = create_group(tallies_group, "sensitivities"); + for (const auto& sens : model::tally_sens) { + hid_t sens_group = create_group(senss_group, + "sensitivity " + std::to_string(sens.id)); + if (sens.variable == SensitivityVariable::CROSS_SECTION) { + write_dataset(sens_group, "independent variable", "cross_section"); + write_dataset(sens_group, "nuclide", + data::nuclides[sens.sens_nuclide]->name_); + write_dataset(sens_group, "reaction", + reaction_name(sens.sens_reaction)); + } else if (sens.variable == SensitivityVariable::MULTIPOLE) { + write_dataset(sens_group, "independent variable", "multipole"); + write_dataset(sens_group, "nuclide", + data::nuclides[sens.sens_nuclide]->name_); + } else if (sens.variable == SensitivityVariable::CURVE_FIT) { + write_dataset(sens_group, "independent variable", "curve_fit"); + write_dataset(sens_group, "nuclide", + data::nuclides[sens.sens_nuclide]->name_); + } else { + fatal_error("Independent variable for sensitivity " + + std::to_string(sens.id) + " not defined in state_point.cpp"); + } + close_group(sens_group); + } + close_group(senss_group); + } + // Write information for filters hid_t filters_group = create_group(tallies_group, "filters"); write_attribute(filters_group, "n_filters", model::tally_filters.size()); @@ -240,6 +270,9 @@ extern "C" int openmc_statepoint_write(const char* filename, bool* write_source) write_dataset( tally_group, "derivative", model::tally_derivs[tally->deriv_].id); + if (tally->sens_ != C_NONE) write_dataset(tally_group, "sensitivity", + model::tally_sens[tally->sens_].id); + // Write the tally score bins vector scores; for (auto sc : tally->scores_) diff --git a/src/tallies/filter.cpp b/src/tallies/filter.cpp index 79817981db0..b5790d1ba44 100644 --- a/src/tallies/filter.cpp +++ b/src/tallies/filter.cpp @@ -20,6 +20,7 @@ #include "openmc/tallies/filter_distribcell.h" #include "openmc/tallies/filter_energy.h" #include "openmc/tallies/filter_energyfunc.h" +#include "openmc/tallies/filter_importance.h" #include "openmc/tallies/filter_legendre.h" #include "openmc/tallies/filter_material.h" #include "openmc/tallies/filter_materialfrom.h" @@ -164,6 +165,8 @@ Filter* Filter::create(const std::string& type, int32_t id) return Filter::create(id); } else if (type == "zernikeradial") { return Filter::create(id); + } else if (type == "importance") { + return Filter::create(id); } else { throw std::runtime_error {fmt::format("Unknown filter type: {}", type)}; } diff --git a/src/tallies/filter_importance.cpp b/src/tallies/filter_importance.cpp new file mode 100644 index 00000000000..eb9198f9739 --- /dev/null +++ b/src/tallies/filter_importance.cpp @@ -0,0 +1,179 @@ +#include "openmc/tallies/filter_importance.h" + +#include + +#include "openmc/capi.h" +#include "openmc/search.h" +#include "openmc/settings.h" +#include "openmc/mesh.h" +#include "openmc/xml_interface.h" + +namespace openmc { + +//============================================================================== +// ImportanceFilter implementation +//============================================================================== + +void ImportanceFilter::from_xml(pugi::xml_node node) +{ + // Set the importance (adjoint flux) + auto importance = get_node_array(node, "importance"); + this->set_importance(importance); + + // Set the importance mesh + auto bins_ = get_node_array(node, "mesh"); + if (bins_.size() != 1) { + fatal_error("Only one mesh can be specified per " + type_str() + + " importance filter."); + } + + auto id = bins_[0]; + auto search = model::mesh_map.find(id); + if (search != model::mesh_map.end()) { + set_mesh(search->second); + } else{ + fatal_error(fmt::format( + "Could not find mesh {} specified on tally filter.", id)); + } +} + +void ImportanceFilter::set_importance(gsl::span importance) +{ + // Clear existing importance + importance_.clear(); + importance_.reserve(importance.size()); + + // Copy importance + for (gsl::index i = 0; i < importance.size(); ++i) { + importance_.push_back(importance[i]); + } + + n_bins_ = 1; // This is probably redundant, I set this in set_mesh() +} + +void ImportanceFilter::get_all_bins( + const Particle& p, TallyEstimator estimator, FilterMatch& match) const +{ + auto bin = model::meshes[mesh_]->get_bin(p.r()); + if (bin >= 0) { + match.bins_.push_back(0); // There is only one bin, (is it 0-indexed?) + match.weights_.push_back(importance_[bin]); // the weight is the importance in the mesh bin + } +} + +void ImportanceFilter::to_statepoint(hid_t filter_group) const +{ + Filter::to_statepoint(filter_group); + write_dataset(filter_group, "importance", importance_); + write_dataset(filter_group, "mesh", model::meshes[mesh_]->id_); +} + +std::string ImportanceFilter::text_label(int bin) const +{ + // returns the label of the importance mesh + auto& mesh = *model::meshes[mesh_]; + return mesh.bin_label(bin); +} + +void ImportanceFilter::set_mesh(int32_t mesh) +{ + mesh_ = mesh; + n_bins_ = 1; // We only have 1 bin in this filter +} + +//============================================================================== +// C-API functions +//============================================================================== + +extern"C" int +openmc_importance_filter_get_importance(int32_t index, const double** importance, size_t* n) +{ + // Make sure this is a valid index to an allocated filter. + if (int err = verify_filter(index)) return err; + + // Get a pointer to the filter and downcast. + const auto& filt_base = model::tally_filters[index].get(); + auto* filt = dynamic_cast(filt_base); + + // Check the filter type. + if (!filt) { + set_errmsg("Tried to get importance bins on a non-importance filter."); + return OPENMC_E_INVALID_TYPE; + } + + // Output the importance. + *importance = filt->importance().data(); + *n = filt->importance().size(); + return 0; +} + +extern "C" int +openmc_importance_filter_set_importance(int32_t index, size_t n, const double* importance) +{ + // Make sure this is a valid index to an allocated filter. + if (int err = verify_filter(index)) return err; + + // Get a pointer to the filter and downcast. + const auto& filt_base = model::tally_filters[index].get(); + auto* filt = dynamic_cast(filt_base); + + // Check the filter type. + if (!filt) { + set_errmsg("Tried to set importance bins on a non-importance filter."); + return OPENMC_E_INVALID_TYPE; + } + + // Update the filter. + filt->set_importance({importance, n}); + return 0; +} + +extern "C" int +openmc_importance_filter_get_mesh(int32_t index, int32_t* index_mesh) +{ + // Make sure this is a valid index to an allocated filter. + if (int err = verify_filter(index)) return err; + + // Get a pointer to the filter and downcast. + const auto& filt_base = model::tally_filters[index].get(); + auto* filt = dynamic_cast(filt_base); + + // Check the filter type. + if (!filt) { + set_errmsg("Tried to get importance mesh on a non-importance filter."); + return OPENMC_E_INVALID_TYPE; + } + + // Output the mesh. + *index_mesh = filt->mesh(); + return 0; +} + +extern "C" int +openmc_importance_filter_set_mesh(int32_t index, int32_t index_mesh) +{ + // Make sure this is a valid index to an allocated filter. + if (int err = verify_filter(index)) return err; + + // Get a pointer to the filter and downcast. + const auto& filt_base = model::tally_filters[index].get(); + auto* filt = dynamic_cast(filt_base); + + // Check the filter type. + if (!filt) { + set_errmsg("Tried to set importance mesh on a non-importance filter."); + return OPENMC_E_INVALID_TYPE; + } + + // Check the mesh index. + if (index_mesh < 0 || index_mesh >= model::meshes.size()) { + set_errmsg("Index in 'meshes' array is out of bounds."); + return OPENMC_E_OUT_OF_BOUNDS; + } + + // Update the filter. + filt->set_mesh(index_mesh); + return 0; +} + +}// namespace openmc \ No newline at end of file diff --git a/src/tallies/sensitivity.cpp b/src/tallies/sensitivity.cpp new file mode 100644 index 00000000000..042971cbf83 --- /dev/null +++ b/src/tallies/sensitivity.cpp @@ -0,0 +1,808 @@ +#include "openmc/tallies/sensitivity.h" + +#include "openmc/error.h" +#include "openmc/material.h" +#include "openmc/nuclide.h" +#include "openmc/settings.h" +#include "openmc/source.h" +#include "openmc/search.h" +#include "openmc/simulation.h" +#include "openmc/tallies/tally.h" +#include "openmc/xml_interface.h" +#include "openmc/message_passing.h" +#include "openmc/tallies/filter_importance.h" + +#include +#include "xtensor/xadapt.hpp" +#include "xtensor/xbuilder.hpp" // for empty_like +#include "xtensor/xview.hpp" + +template class std::vector; + +namespace openmc { + +//============================================================================== +// Global variables +//============================================================================== + +namespace model { + std::vector tally_sens; + std::unordered_map tally_sens_map; +} + +//============================================================================== +// SensitivityTally implementation +//============================================================================== + +SensitivityTally::SensitivityTally(int32_t id) +{ + set_index(model::tallies.size()); // Avoids warning about narrowing + this->set_id(id); + this->set_filters({}); +} + +SensitivityTally::SensitivityTally(pugi::xml_node node) +{ + set_index(model::tallies.size()); // Avoids warning about narrowing + + // Copy and set tally id + if (!check_for_node(node, "id")) { + throw std::runtime_error{"Must specify id for tally in tally XML file."}; + } + int32_t id = std::stoi(get_node_value(node, "id")); + this->set_id(id); + + if (check_for_node(node, "name")) name_ = get_node_value(node, "name"); + + // ======================================================================= + // READ DATA FOR FILTERS + + // Check if user is using old XML format and throw an error if so + if (check_for_node(node, "filter")) { + throw std::runtime_error{"Tally filters must be specified independently of " + "tallies in a element. The element itself should " + "have a list of filters that apply, e.g., 1 2 " + "where 1 and 2 are the IDs of filters specified outside of " + "."}; + } + + // Determine number of filters + std::vector filter_ids; + if (check_for_node(node, "filters")) { + filter_ids = get_node_array(node, "filters"); + } + + // Allocate and store filter user ids + std::vector filters; + for (int filter_id : filter_ids) { + // Determine if filter ID is valid + auto it = model::filter_map.find(filter_id); + if (it == model::filter_map.end()) { + throw std::runtime_error{fmt::format( + "Could not find filter {} specified on tally {}", filter_id, id_)}; + } + + // Store the index of the filter + filters.push_back(model::tally_filters[it->second].get()); + } + + // Set the filters + this->set_filters(filters); + + // ======================================================================= + // READ DATA FOR NUCLIDES + + // Sensitivity only allows total material rates. + + nuclides_.clear(); + + // By default, we tally just the total material rates. + if (!check_for_node(node, "nuclides")) { + nuclides_.push_back(-1); + } else { + this->set_nuclides(node); + } + + // ======================================================================= + // READ DATA FOR SCORES + // Sensitivity only allows nu-fission score + + // Reset state and prepare for the new scores. + if (!check_for_node(node, "scores")){ + scores_.clear(); + simulation::need_depletion_rx = false; + scores_.reserve(1); + + auto score = SCORE_NU_FISSION; + + scores_.push_back(score); + } else { + this->set_scores(node); + } + + // ======================================================================= + // READ DATA FOR SENSITIVITIES + + if (!check_for_node(node, "sensitivity")) { + fatal_error(fmt::format("No sensitivity specified on tally {}.", id_)); + } + + // Check for a tally sensitivity. + if (check_for_node(node, "sensitivity")) { + int sens_id = std::stoi(get_node_value(node, "sensitivity")); + + // Find the sensitivity with the given id, and store it's index. + auto it = model::tally_sens_map.find(sens_id); + if (it == model::tally_sens_map.end()) { + fatal_error(fmt::format( + "Could not find sensitivity {} specified on tally {}", sens_id, id_)); + } + + sens_ = it->second; + //sens_ = it->first; + + // Only analog or collision estimators are supported for sensitivity + // tallies. + if (estimator_ == TallyEstimator::TRACKLENGTH) { + estimator_ = TallyEstimator::COLLISION; + } + } + + + // If settings.xml trigger is turned on, create tally triggers + if (settings::trigger_on) { + this->init_triggers(node); + } + + // check for gpt + if (check_for_node(node, "gpt")) { + gpt_ = true; + } + + // ======================================================================= + // SET TALLY ESTIMATOR + + // Check if user specified estimator + if (check_for_node(node, "estimator")) { + std::string est = get_node_value(node, "estimator"); + if (est == "analog") { + estimator_ = TallyEstimator::ANALOG; + } else if (est == "collision") { + // If the estimator was set to an analog estimator, this means the + // tally needs post-collision information + if (estimator_ == TallyEstimator::ANALOG) { + throw std::runtime_error{fmt::format("Cannot use collision estimator " + "for tally ", id_)}; + } + + // Set estimator to collision estimator + estimator_ = TallyEstimator::COLLISION; + + } else { + throw std::runtime_error{fmt::format( + "Invalid estimator '{}' on tally {}", est, id_)}; + } + } +} + +SensitivityTally::~SensitivityTally() +{ + model::tally_map.erase(id_); +} + +//SensitivityTally* +//SensitivityTally::create(int32_t id) +//{ +// model::tallies.push_back(std::make_unique(id)); +// return model::tallies.back().get(); +//} + +void +SensitivityTally::set_filters(span filters) +{ + // Clear old data. + clearFiltersStrides(); + + // Copy in the given filter indices. + auto n = filters.size(); + if (settings::run_mode == RunMode::EIGENVALUE) { + if (n != 1) { + throw std::runtime_error{fmt::format("Cannot use more than one filter for eigenvalue sensitivity")}; + } + } + reserveFilters(n); + + for (int i = 0; i < n; ++i) { + // Add index to vector of filters + auto& f {filters[i]}; + addFilter(model::filter_map.at(f->id())); + // filters_.push_back(model::filter_map.at(f->id())); + + // Keep track of indices for special filters. + if (settings::run_mode == RunMode::EIGENVALUE) { + if (!dynamic_cast(f)) { + throw std::runtime_error{fmt::format("Must use an importance filter for eigenvalue sensitivity")}; + } + } + } + + // Set the strides. Filters are traversed in reverse so that the last filter + // has the shortest stride in memory and the first filter has the longest + // stride. + resize_strides(n); + int stride = 1; + for (int i = n-1; i >= 0; --i) { + setStrides(i, stride); + // stride *= model::tally_filters[filters_[i]]->n_bins(); + updateStrides(i, stride); + } + set_n_filter_bins(stride); +} + +void SensitivityTally::init_results() +{ + int n_scores = 1; + int n_filter_bins = model::tally_sens[sens_].n_bins_; + results_ = xt::empty({n_filter_bins, n_scores, 3}); + previous_results_ = xt::empty({n_filter_bins, n_scores, 1}); +} + +void SensitivityTally::reset() +{ + n_realizations_ = 0; + if (results_.size() != 0) { + xt::view(results_, xt::all()) = 0.0; + xt::view(previous_results_, xt::all()) = 0.0; + } +} + +void SensitivityTally::accumulate() +{ + // Increment number of realizations + n_realizations_ += settings::reduce_tallies ? 1 : mpi::n_procs; + + if (mpi::master || !settings::reduce_tallies) { + // Calculate total source strength for normalization + double total_source = 0.0; + if (settings::run_mode == RunMode::FIXED_SOURCE) { + for (const auto& s : model::external_sources) { + total_source += s->strength(); + } + } else { + total_source = 1.0; + } + + // Account for number of source particles in normalization + if (gpt_) { + double norm = total_source / (settings::n_particles * settings::gen_per_batch); + denominator_ = 1.0 / norm; + } + + // Accumulate each result + // TODO: ignore the first realization + for (int i = 0; i < results_.shape()[0]; ++i) { + for (int j = 0; j < results_.shape()[1]; ++j) { + double val = previous_results_(i, j, SensitivityTallyResult::VALUE) / denominator_; + results_(i, j, SensitivityTallyResult::SUM) += val; + results_(i, j, SensitivityTallyResult::SUM_SQ) += val*val; + } + } + + denominator_ = 0.0; // elements in the tally " + "XML file"); + } + + if (id <= 0) + fatal_error(" IDs must be an integer greater than zero"); + + std::string variable_str = get_node_value(node, "variable"); + + std::string nuclide_name = get_node_value(node, "nuclide"); + bool found = false; + for (auto i = 0; i < data::nuclides.size(); ++i) { + if (data::nuclides[i]->name_ == nuclide_name) { + found = true; + sens_nuclide = i; + } + } + if (!found) { + fatal_error(fmt::format("Could not find the nuclide \"{}\" specified in " + "derivative {} in any material.", nuclide_name, id)); + } + + if (variable_str == "cross_section") { + variable = SensitivityVariable::CROSS_SECTION; + + // ADD LOGIC TO LOOK FOR ENERGY BINS OTHERWISE DEFAULT TO MAX AND MIN NEUTRON ENERGY + auto bins = get_node_array(node, "energy"); + this->set_bins(bins); + + // ADD LOGIC TO SET THE SCORE TYPE + auto reaction = get_node_value(node, "reaction"); + sens_reaction = reaction_type(reaction); + + } else if (variable_str == "multipole") { + variable = SensitivityVariable::MULTIPOLE; + + // check if curvefit sensitivities were asked for, maybe move to a different variable + + // ADD LOGIC TO SET THE BINS TO SIZE OF MULTIPOLE PARAMETERS + // set n_bins_ + const auto& nuc {*data::nuclides[sens_nuclide]}; + n_bins_ = nuc.multipole_->data_.shape()[0] * nuc.multipole_->data_.shape()[1] * 2; + + } else if (variable_str == "curve_fit") { + variable = SensitivityVariable::CURVE_FIT; + + // check if curvefit sensitivities were asked for, maybe move to a different variable + + // ADD LOGIC TO SET THE BINS TO SIZE OF MULTIPOLE PARAMETERS + // set n_bins_ + const auto& nuc {*data::nuclides[sens_nuclide]}; + n_bins_ = nuc.multipole_->curvefit_.shape()[0] * nuc.multipole_->curvefit_.shape()[1] * nuc.multipole_->curvefit_.shape()[2]; + + } else { + fatal_error(fmt::format("Unrecognized variable \"{}\" on derivative {}", + variable_str, id)); + } + + // sens_material = std::stoi(get_node_value(node, "material")); + // sens_cell = std::stoi(get_node_value(node, "cell")); + +} + +void +TallySensitivity::set_bins(span bins) +{ + // Clear existing bins + energy_bins_.clear(); + energy_bins_.reserve(bins.size()); + + // Copy bins, ensuring they are valid + for (gsl::index i = 0; i < bins.size(); ++i) { + if (i > 0 && bins[i] <= bins[i-1]) { + throw std::runtime_error{"Energy bins must be monotonically increasing."}; + } + energy_bins_.push_back(bins[i]); + } + + n_bins_ = energy_bins_.size() - 1; +} + +//============================================================================== +// Non-method functions +//============================================================================== + +void +read_tally_sensitivities(pugi::xml_node node) +{ + // Populate the sensitivities array. + for (auto sens_node : node.children("sensitivity")) + model::tally_sens.emplace_back(sens_node); + + // Fill the sensitivity map. + for (auto i = 0; i < model::tally_sens.size(); ++i) { + auto id = model::tally_sens[i].id; + auto search = model::tally_sens_map.find(id); + if (search == model::tally_sens_map.end()) { + model::tally_sens_map[id] = i; + } else { + fatal_error("Two or more sensitivities use the same unique ID: " + + std::to_string(id)); + } + } + + // Make sure sensitivities were not requested for an MG run. + if (!settings::run_CE && !model::tally_sens.empty()) + fatal_error("Sensitivities not supported in multi-group mode"); +} + +double get_nuclide_xs_sens(const Particle& p, int i_nuclide, int score_bin) +{ + const auto& nuc {*data::nuclides[i_nuclide]}; + + // Get reaction object, or return 0 if reaction is not present + auto m = nuc.reaction_index_[score_bin]; + if (m == C_NONE) + return 0.0; + const auto& rx {*nuc.reactions_[m]}; + const auto& micro {p.neutron_xs(i_nuclide)}; + + // In the URR, the (n,gamma) cross section is sampled randomly from + // probability tables. Make sure we use the sampled value (which is equal to + // absorption - fission) rather than the dilute average value + if (micro.use_ptable && score_bin == N_GAMMA) { + return micro.absorption - micro.fission; + } + + auto i_temp = micro.index_temp; + if (i_temp >= 0) { // Can be false due to multipole + // Get index on energy grid and interpolation factor + auto i_grid = micro.index_grid; + auto f = micro.interp_factor; + + // Calculate interpolated cross section + double xs = rx.xs(micro); + + // if (settings::run_mode == RunMode::EIGENVALUE && + // score_bin == HEATING_LOCAL) { + // // Determine kerma for fission as (EFR + EGP + EGD + EB)*sigma_f + // double kerma_fission = + // nuc.fragments_ + // ? ((*nuc.fragments_)(p.E_last()) + (*nuc.betas_)(p.E_last()) + + // (*nuc.prompt_photons_)(p.E_last()) + + // (*nuc.delayed_photons_)(p.E_last())) * + // micro.fission + // : 0.0; + // + // // Determine non-fission kerma as difference + // double kerma_non_fission = xs - kerma_fission; + // + // // Re-weight non-fission kerma by keff to properly balance energy release + // // and deposition. See D. P. Griesheimer, S. J. Douglass, and M. H. + // // Stedry, "Self-consistent energy normalization for quasistatic reactor + // // calculations", Proc. PHYSOR, Cambridge, UK, Mar 29-Apr 2, 2020. + // xs = simulation::keff * kerma_non_fission + kerma_fission; + // } + return xs; + } else { + // For multipole, calculate (n,gamma) from other reactions + return rx.mt_ == N_GAMMA ? micro.absorption - micro.fission : 0.0; + } + return 0.0; +} + +void +score_track_sensitivity(Particle& p, double distance) +{ + // A void material cannot be perturbed so it will not affect sensitivities. + if (p.material() == MATERIAL_VOID) return; + const Material& material {*model::materials[p.material()]}; + + for (auto idx = 0; idx < model::tally_sens.size(); idx++) { + const auto& sens = model::tally_sens[idx]; + auto& cumulative_sensitivities = p.cumulative_sensitivities(idx); + // if (sens.sens_material != material.id_) // this particle location must be inside detector region? confirm + // continue; + + double atom_density = 0.; + if (sens.sens_nuclide >= 0) { + auto j = model::materials[p.material()]->mat_nuclide_index_[sens.sens_nuclide]; + if (j == C_NONE) continue; + atom_density = model::materials[p.material()]->atom_density_(j); + } + + switch (sens.variable) { + + case SensitivityVariable::CROSS_SECTION: + { + // Calculate the sensitivity with respect to the cross section + // at this energy + + // Get the post-collision energy of the particle. + auto E = p.E(); + + // Get the correct cross section + double macro_xs; + switch (sens.sens_reaction) { + case SCORE_TOTAL: + if (sens.sens_nuclide >=0){ + macro_xs = p.neutron_xs(sens.sens_nuclide).total * atom_density; + } else { + macro_xs = p.macro_xs().total; + } + break; + case SCORE_SCATTER: + if (sens.sens_nuclide >=0){ + macro_xs = (p.neutron_xs(sens.sens_nuclide).total + - p.neutron_xs(sens.sens_nuclide).absorption) * atom_density; + } else { + macro_xs = p.macro_xs().total - p.macro_xs().absorption; + } + break; + case ELASTIC: + if (sens.sens_nuclide >= 0) { + if (p.neutron_xs(sens.sens_nuclide).elastic == CACHE_INVALID) + data::nuclides[sens.sens_nuclide]->calculate_elastic_xs(p); + macro_xs = p.neutron_xs(sens.sens_nuclide).elastic * atom_density; + } + break; + case SCORE_ABSORPTION: + if (sens.sens_nuclide >=0){ + macro_xs = p.neutron_xs(sens.sens_nuclide).absorption * atom_density; + } else { + macro_xs = p.macro_xs().absorption; + } + break; + case SCORE_FISSION: + if (p.macro_xs().absorption == 0) continue; + + if (sens.sens_nuclide >= 0) { + macro_xs = p.neutron_xs(sens.sens_nuclide).fission * atom_density; + } else { + macro_xs = p.macro_xs().fission; + } + break; + // for reactions with multiplicities + case N_2N: + case N_2NA: + case N_3N: + case N_3NA: + case N_4N: + case N_2NP: + if (sens.sens_nuclide >= 0) { + macro_xs = p.neutron_xs(sens.sens_nuclide).reaction[sens.sens_reaction] * atom_density; + } + break; + default: + if (sens.sens_nuclide >= 0) { + macro_xs = get_nuclide_xs_sens(p, sens.sens_nuclide, sens.sens_reaction) * atom_density; + } + break; + } + // Bin the contribution. + if (E >= sens.energy_bins_.front() && E <= sens.energy_bins_.back()) { + auto bin = lower_bound_index(sens.energy_bins_.begin(), sens.energy_bins_.end(), E); + cumulative_sensitivities[bin] -= distance * macro_xs; + } + } + break; + + case SensitivityVariable::MULTIPOLE: + { + // check if in resonance range + const auto& nuc {*data::nuclides[sens.sens_nuclide]}; + if (multipole_in_range(nuc, p.E())){ + // Calculate derivative of the total cross section at p->E_ + auto derivative = nuc.multipole_->evaluate_pole_deriv_total(p.E(), p.sqrtkT()); + + // the score is atom_density * derivative_total * distance + int start = derivative.first; + int size = derivative.second.size(); + + double score = atom_density*distance; + + for (int deriv_idx = start; deriv_idx < start + size ; deriv_idx++){ + cumulative_sensitivities[deriv_idx] -= score*derivative.second[deriv_idx - start]; + } + } + } + break; + + case SensitivityVariable::CURVE_FIT: + { + // check if in resonance range + const auto& nuc {*data::nuclides[sens.sens_nuclide]}; + if (multipole_in_range(nuc, p.E())){ + // Calculate derivative of the total cross section at p->E_ + auto derivative = nuc.multipole_->evaluate_fit_deriv_total(p.E(), p.sqrtkT()); + + // the score is atom_density * derivative_total * distance + int start = derivative.first; + int size = derivative.second.size(); + + double score = atom_density*distance; + + for (int deriv_idx = start; deriv_idx < start + size ; deriv_idx++){ + cumulative_sensitivities[deriv_idx] -= score*derivative.second[deriv_idx - start]; + } + } + } + break; + } + } +} + +void score_collision_sensitivity(Particle& p) +{ + // A void material cannot be perturbed so it will not affect sensitivities. + if (p.material() == MATERIAL_VOID) return; + + // only scattering events effect the cumulative tallies + if (p.event() != TallyEvent::SCATTER) return; + + const Material& material {*model::materials[p.material()]}; + + for (auto idx = 0; idx < model::tally_sens.size(); idx++) { + const auto& sens = model::tally_sens[idx]; + auto& cumulative_sensitivities = p.cumulative_sensitivities(idx); + + // if (sens.sens_material != material.id_) continue; + + if (p.event_nuclide() != sens.sens_nuclide) continue; + // Find the index in this material for the diff_nuclide. + int i; + for (i = 0; i < material.nuclide_.size(); ++i) + if (material.nuclide_[i] == sens.sens_nuclide) break; + // Make sure we found the nuclide. + if (material.nuclide_[i] != sens.sens_nuclide) { + fatal_error(fmt::format( + "Could not find nuclide {} in material {} for tally sensitivity {}", + data::nuclides[sens.sens_nuclide]->name_, material.id_, sens.id)); + } + + switch (sens.variable) { + + case SensitivityVariable::CROSS_SECTION: + { + + // Get the pre-collision energy of the particle. + double E = p.E_last(); + + // Get the correct cross section + double score; + switch (sens.sens_reaction) { + case SCORE_TOTAL: + score = 0.0; + break; + case SCORE_SCATTER: + score = 1.0; + break; + case ELASTIC: + if (p.event_mt() != ELASTIC) continue; + score = 1.0; + break; + case SCORE_ABSORPTION: + score = 0.0; + break; + case SCORE_FISSION: + score = 0.0; + break; + // reactions where the neutron is absorbed, no contribution to collision sensitivity + case N_T: + case N_XT: + case N_GAMMA: + case N_P: + case N_A: + case N_D: + case N_3HE: + score = 0.0; + break; + default: + if (p.event_mt() != sens.sens_reaction) continue; + score = 1.0; + break; + } + + // Bin the contribution. + if (E >= sens.energy_bins_.front() && E <= sens.energy_bins_.back()) { + auto bin = lower_bound_index(sens.energy_bins_.begin(), sens.energy_bins_.end(), E); + cumulative_sensitivities[bin] += score; + } + } + break; + + case SensitivityVariable::MULTIPOLE: + { + // check if in resonance range + const auto& nuc {*data::nuclides[i]}; + if (multipole_in_range(nuc, p.E_last())){ + // Calculate derivative of the scattering cross section at p->E_last_ + const auto& micro_xs {p.neutron_xs(i)}; + auto derivative = nuc.multipole_->evaluate_pole_deriv_scatter(p.E_last(), p.sqrtkT()); + + // sum/bin 1/micro_sigma_scatter * derivative + int start = derivative.first; + int size = derivative.second.size(); + + double scatter = (micro_xs.total - micro_xs.absorption); + + for (int deriv_idx = start; deriv_idx < start + size ; deriv_idx++){ + cumulative_sensitivities[deriv_idx] += derivative.second[deriv_idx - start]/scatter; + } + } + } + break; + + case SensitivityVariable::CURVE_FIT: + { + // check if in resonance range + const auto& nuc {*data::nuclides[i]}; + if (multipole_in_range(nuc, p.E_last())){ + // Calculate derivative of the scattering cross section at p->E_last_ + const auto& micro_xs {p.neutron_xs(i)}; + auto derivative = nuc.multipole_->evaluate_fit_deriv_scatter(p.E_last(), p.sqrtkT()); + + // sum/bin 1/micro_sigma_scatter * derivative + int start = derivative.first; + int size = derivative.second.size(); + + double scatter = (micro_xs.total - micro_xs.absorption); + + for (int deriv_idx = start; deriv_idx < start + size ; deriv_idx++){ + cumulative_sensitivities[deriv_idx] += derivative.second[deriv_idx - start]/scatter; + } + } + } + break; + } + } +} + +void score_source_sensitivity(Particle& p) +{ + // A void material cannot be perturbed so it will not affect sensitivities. + if (p.material() == MATERIAL_VOID) return; + + // only scattering events affect source + if (p.event() != TallyEvent::SCATTER) return; + + const Material& material {*model::materials[p.material()]}; + + for (auto idx = 0; idx < model::tally_sens.size(); idx++) { + const auto& sens = model::tally_sens[idx]; + auto& cumulative_sensitivities = p.cumulative_sensitivities(idx); + + if (p.event_nuclide() != sens.sens_nuclide) continue; + // Find the index in this material for the diff_nuclide. + int i; + for (i = 0; i < material.nuclide_.size(); ++i) + if (material.nuclide_[i] == sens.sens_nuclide) break; + // Make sure we found the nuclide. + if (material.nuclide_[i] != sens.sens_nuclide) { + fatal_error(fmt::format( + "Could not find nuclide {} in material {} for tally sensitivity {}", + data::nuclides[sens.sens_nuclide]->name_, material.id_, sens.id)); + } + + switch (sens.variable) { + + case SensitivityVariable::CROSS_SECTION: + { + + // Get the energy of the secondary particle. + double E = p.E_last(); + + // only scattering events that produce secondary particles + double score; + switch (sens.sens_reaction) { + case N_2N: + case N_2NA: + case N_3N: + case N_3NA: + if (p.event_mt() != sens.sens_reaction) continue; + // fraction of scattering due to (n,xn) reaction + double nxn_fraction; + nxn_fraction = get_nuclide_xs_sens(p, sens.sens_nuclide, sens.sens_reaction) / + (p.neutron_xs(sens.sens_nuclide).total - p.neutron_xs(sens.sens_nuclide).absorption); + score = 1.0 - nxn_fraction; + break; + default: + score = 0.0; + break; + } + + // Bin the energy. + if (E >= sens.energy_bins_.front() && E <= sens.energy_bins_.back()) { + auto bin = lower_bound_index(sens.energy_bins_.begin(), sens.energy_bins_.end(), E); + cumulative_sensitivities[bin] += score; + } + } + break; + } + } +} + +}// namespace openmc diff --git a/src/tallies/tally.cpp b/src/tallies/tally.cpp index b9c615ecb5b..4bae65519bd 100644 --- a/src/tallies/tally.cpp +++ b/src/tallies/tally.cpp @@ -17,6 +17,7 @@ #include "openmc/simulation.h" #include "openmc/source.h" #include "openmc/tallies/derivative.h" +#include "openmc/tallies/sensitivity.h" #include "openmc/tallies/filter.h" #include "openmc/tallies/filter_cell.h" #include "openmc/tallies/filter_cellborn.h" @@ -81,6 +82,10 @@ double global_tally_leakage; //============================================================================== // Tally object implementation //============================================================================== +Tally::Tally() +{ + // Empty constructor that does nothing +} Tally::Tally(int32_t id) { @@ -466,6 +471,21 @@ bool Tally::has_filter(FilterType filter_type) const return false; } +// void Tally::set_filters(gsl::span filters) +// { +// // Clear old data. +// filters_.clear(); +// strides_.clear(); +// +// // Copy in the given filter indices. +// auto n = filters.size(); +// filters_.reserve(n); +// +// for (auto* filter : filters) { +// add_filter(filter); +// } +// } + void Tally::set_filters(span filters) { // Clear old data. @@ -476,11 +496,46 @@ void Tally::set_filters(span filters) auto n = filters.size(); filters_.reserve(n); - for (auto* filter : filters) { - add_filter(filter); + for (auto* filter : filters) { + int32_t filter_idx = model::filter_map.at(filter->id()); + // if this filter is already present, do nothing and return + if (std::find(filters_.begin(), filters_.end(), filter_idx) != filters_.end()) + return; + + // Keep track of indices for special filters + if (filter->type() == FilterType::ENERGY_OUT) { + energyout_filter_ = filters_.size(); + } else if (filter->type() == FilterType::DELAYED_GROUP) { + delayedgroup_filter_ = filters_.size(); + } else if (filter->type() == FilterType::CELL) { + cell_filter_ = filters_.size(); + } else if (filter->type() == FilterType::ENERGY) { + energy_filter_ = filters_.size(); + } + filters_.push_back(filter_idx); } } +// void Tally::add_filter(Filter* filter) +// { +// int32_t filter_idx = model::filter_map.at(filter->id()); +// // if this filter is already present, do nothing and return +// if (std::find(filters_.begin(), filters_.end(), filter_idx) != filters_.end()) +// return; +// +// // Keep track of indices for special filters +// if (filter->type() == FilterType::ENERGY_OUT) { +// energyout_filter_ = filters_.size(); +// } else if (filter->type() == FilterType::DELAYED_GROUP) { +// delayedgroup_filter_ = filters_.size(); +// } else if (filter->type() == FilterType::CELL) { +// cell_filter_ = filters_.size(); +// } else if (filter->type() == FilterType::ENERGY) { +// energy_filter_ = filters_.size(); +// } +// filters_.push_back(filter_idx); +// } + void Tally::add_filter(Filter* filter) { int32_t filter_idx = model::filter_map.at(filter->id()); @@ -937,6 +992,9 @@ void read_tallies_xml(pugi::xml_node root) // Read data for tally derivatives read_tally_derivatives(root); + + // Read data for tally sensitivities + read_tally_sensitivities(root); // ========================================================================== // READ FILTER DATA @@ -960,6 +1018,10 @@ void read_tallies_xml(pugi::xml_node root) for (auto node_tal : root.children("tally")) { model::tallies.push_back(make_unique(node_tal)); } + + for (auto node_tal : root.children("sensitivity_tally")) { + model::tallies.push_back(make_unique(node_tal)); + } } #ifdef OPENMC_MPI @@ -1165,6 +1227,9 @@ void free_memory_tally() { model::tally_derivs.clear(); model::tally_deriv_map.clear(); + + model::tally_sens.clear(); + model::tally_sens_map.clear(); model::tally_filters.clear(); model::filter_map.clear(); diff --git a/src/tallies/tally_scoring.cpp b/src/tallies/tally_scoring.cpp index 67e851644a9..f387aa85429 100644 --- a/src/tallies/tally_scoring.cpp +++ b/src/tallies/tally_scoring.cpp @@ -15,6 +15,7 @@ #include "openmc/simulation.h" #include "openmc/string_utils.h" #include "openmc/tallies/derivative.h" +#include "openmc/tallies/sensitivity.h" #include "openmc/tallies/filter.h" #include "openmc/tallies/filter_cell.h" #include "openmc/tallies/filter_delayedgroup.h" @@ -2595,6 +2596,14 @@ void score_collision_tally(Particle& p) } } + // Add a check to see if tally is a sensitivity tally, if so use special function + // check if sens_ != C_NONE + if (tally.sens_ != C_NONE) { + score_collision_sensitivity_tally(p, i_tally, i*tally.scores_.size(), filter_index, + filter_weight, i_nuclide, atom_density, flux); + continue; + } + // TODO: consider replacing this "if" with pointers or templates if (settings::run_CE) { score_general_ce_nonanalog(p, i_tally, i * tally.scores_.size(), @@ -2619,6 +2628,1020 @@ void score_collision_tally(Particle& p) match.bins_present_ = false; } +void score_collision_sensitivity_tally(Particle& p, int i_tally, int start_index, int filter_index, + double filter_weight, int i_nuclide, double atom_density, double flux) +{ + SensitivityTally& tally = dynamic_cast(*model::tallies[i_tally]); + + // Add check to see if filter is an importance filter, if not return + + // Get the pre-collision energy of the particle. + auto E = p.E_last(); + + // Determine how much weight was absorbed due to survival biasing + double wgt_absorb = 0.0; + if (settings::survival_biasing) { + wgt_absorb = p.wgt_last() * p.neutron_xs(i_nuclide).absorption / + p.neutron_xs(i_nuclide).total; + flux = (p.wgt_last() - wgt_absorb) / p.macro_xs().total; + } + + for (auto i = 0; i < tally.scores_.size(); ++i) { + auto score_bin = tally.scores_[i]; + auto score_index = start_index + i; + double score = 0.0; + + switch (score_bin) { + case SCORE_FLUX: + if (tally.estimator_ == TallyEstimator::ANALOG) { + // All events score to a flux bin. We actually use a collision estimator + // in place of an analog one since there is no way to count 'events' + // exactly for the flux + if (settings::survival_biasing) { + // We need to account for the fact that some weight was already + // absorbed + score = p.wgt_last() - wgt_absorb; + } else { + score = p.wgt_last(); + } + + if (p.type() == ParticleType::neutron || + p.type() == ParticleType::photon) { + score *= flux / p.macro_xs().total; + } else { + score = 0.; + } + } else { + score = flux; + } + break; + + + case SCORE_TOTAL: + if (tally.estimator_ == TallyEstimator::ANALOG) { + // All events will score to the total reaction rate. We can just use + // use the weight of the particle entering the collision as the score + if (settings::survival_biasing) { + // We need to account for the fact that some weight was already + // absorbed + score = (p.wgt_last() - wgt_absorb) * flux; + } else { + score = p.wgt_last() * flux; + } + + } else { + if (i_nuclide >= 0) { + if (p.type() == ParticleType::neutron) { + score = p.neutron_xs(i_nuclide).total * atom_density * flux; + } else if (p.type() == ParticleType::photon) { + score = p.photon_xs(i_nuclide).total * atom_density * flux; + } + } else { + score = p.macro_xs().total * flux; + } + } + break; + + + case SCORE_INVERSE_VELOCITY: + if (tally.estimator_ == TallyEstimator::ANALOG) { + // All events score to an inverse velocity bin. We actually use a + // collision estimator in place of an analog one since there is no way + // to count 'events' exactly for the inverse velocity + if (settings::survival_biasing) { + // We need to account for the fact that some weight was already + // absorbed + score = p.wgt_last() - wgt_absorb; + } else { + score = p.wgt_last(); + } + score *= flux / p.macro_xs().total; + } else { + score = flux; + } + // Score inverse velocity in units of s/cm. + score /= std::sqrt(2. * E / MASS_NEUTRON_EV) * C_LIGHT * 100.; + break; + + + case SCORE_SCATTER: + if (tally.estimator_ == TallyEstimator::ANALOG) { + // Skip any event where the particle didn't scatter + if (p.event() != TallyEvent::SCATTER) continue; + // Since only scattering events make it here, again we can use the + // weight entering the collision as the estimator for the reaction rate + score = p.wgt_last() * flux; + } else { + if (i_nuclide >= 0) { + score = (p.neutron_xs(i_nuclide).total + - p.neutron_xs(i_nuclide).absorption) * atom_density * flux; + } else { + score = (p.macro_xs().total + - p.macro_xs().absorption) * flux; + } + } + break; + + + case SCORE_NU_SCATTER: + // Only analog estimators are available. + // Skip any event where the particle didn't scatter + if (p.event() != TallyEvent::SCATTER) continue; + // For scattering production, we need to use the pre-collision weight + // times the yield as the estimate for the number of neutrons exiting a + // reaction with neutrons in the exit channel + if (p.event_mt() == ELASTIC || p.event_mt() == N_LEVEL || + (p.event_mt() >= N_N1 && p.event_mt() <= N_NC)) { + // Don't waste time on very common reactions we know have + // multiplicities of one. + score = p.wgt_last() * flux; + } else { + // Get yield and apply to score + auto m = data::nuclides[p.event_nuclide()]->reaction_index_[p.event_mt()]; + const auto& rxn {*data::nuclides[p.event_nuclide()]->reactions_[m]}; + score = p.wgt_last() * flux * (*rxn.products_[0].yield_)(E); + } + break; + + + case SCORE_ABSORPTION: + if (tally.estimator_ == TallyEstimator::ANALOG) { + if (settings::survival_biasing) { + // No absorption events actually occur if survival biasing is on -- + // just use weight absorbed in survival biasing + score = wgt_absorb * flux; + } else { + // Skip any event where the particle wasn't absorbed + if (p.event() == TallyEvent::SCATTER) continue; + // All fission and absorption events will contribute here, so we + // can just use the particle's weight entering the collision + score = p.wgt_last() * flux; + } + } else { + if (i_nuclide >= 0) { + score = p.neutron_xs(i_nuclide).absorption * atom_density + * flux; + } else { + score = p.macro_xs().absorption * flux; + } + } + break; + + + case SCORE_FISSION: + if (p.macro_xs().absorption == 0) continue; + if (tally.estimator_ == TallyEstimator::ANALOG) { + if (settings::survival_biasing) { + // No fission events occur if survival biasing is on -- need to + // calculate fraction of absorptions that would have resulted in + // fission + if (p.neutron_xs(p.event_nuclide()).absorption > 0) { + score = wgt_absorb + * p.neutron_xs(p.event_nuclide()).fission + / p.neutron_xs(p.event_nuclide()).absorption * flux; + } else { + score = 0.; + } + } else { + // Skip any non-absorption events + if (p.event() == TallyEvent::SCATTER) continue; + // All fission events will contribute, so again we can use particle's + // weight entering the collision as the estimate for the fission + // reaction rate + score = p.wgt_last() + * p.neutron_xs(p.event_nuclide()).fission + / p.neutron_xs(p.event_nuclide()).absorption * flux; + } + } else { + if (i_nuclide >= 0) { + score = p.neutron_xs(i_nuclide).fission * atom_density * flux; + } else { + score = p.macro_xs().fission * flux; + } + } + break; + + + case SCORE_NU_FISSION: + if (p.macro_xs().absorption == 0) continue; + if (tally.estimator_ == TallyEstimator::ANALOG) { + if (settings::survival_biasing || p.fission()) { + if (tally.energyout_filter_ != C_NONE) { + // Fission has multiple outgoing neutrons so this helper function + // is used to handle scoring the multiple filter bins. + score_fission_eout(p, i_tally, score_index, score_bin); + continue; + } + } + if (settings::survival_biasing) { + // No fission events occur if survival biasing is on -- need to + // calculate fraction of absorptions that would have resulted in + // nu-fission + if (p.neutron_xs(p.event_nuclide()).absorption > 0) { + score = wgt_absorb + * p.neutron_xs(p.event_nuclide()).nu_fission + / p.neutron_xs(p.event_nuclide()).absorption * flux; + } else { + score = 0.; + } + } else { + // Skip any non-fission events + if (!p.fission()) continue; + // If there is no outgoing energy filter, than we only need to score + // to one bin. For the score to be 'analog', we need to score the + // number of particles that were banked in the fission bank. Since + // this was weighted by 1/keff, we multiply by keff to get the proper + // score. + score = simulation::keff * p.wgt_bank() * flux; + } + } else { + if (i_nuclide >= 0) { + score = p.neutron_xs(i_nuclide).nu_fission * atom_density + * flux; + } else { + score = p.macro_xs().nu_fission * flux; + } + } + break; + + + case SCORE_PROMPT_NU_FISSION: + if (p.macro_xs().absorption == 0) continue; + if (tally.estimator_ == TallyEstimator::ANALOG) { + if (settings::survival_biasing || p.fission()) { + if (tally.energyout_filter_ != C_NONE) { + // Fission has multiple outgoing neutrons so this helper function + // is used to handle scoring the multiple filter bins. + score_fission_eout(p, i_tally, score_index, score_bin); + continue; + } + } + if (settings::survival_biasing) { + // No fission events occur if survival biasing is on -- need to + // calculate fraction of absorptions that would have resulted in + // prompt-nu-fission + if (p.neutron_xs(p.event_nuclide()).absorption > 0) { + score = wgt_absorb + * p.neutron_xs(p.event_nuclide()).fission + * data::nuclides[p.event_nuclide()] + ->nu(E, ReactionProduct::EmissionMode::prompt) + / p.neutron_xs(p.event_nuclide()).absorption * flux; + } else { + score = 0.; + } + } else { + // Skip any non-fission events + if (!p.fission()) continue; + // If there is no outgoing energy filter, than we only need to score + // to one bin. For the score to be 'analog', we need to score the + // number of particles that were banked in the fission bank. Since + // this was weighted by 1/keff, we multiply by keff to get the proper + // score. + auto n_delayed = std::accumulate(p.n_delayed_bank(), + p.n_delayed_bank()+MAX_DELAYED_GROUPS, 0); + auto prompt_frac = 1. - n_delayed / static_cast(p.n_bank()); + score = simulation::keff * p.wgt_bank() * prompt_frac * flux; + } + } else { + if (i_nuclide >= 0) { + score = p.neutron_xs(i_nuclide).fission + * data::nuclides[i_nuclide] + ->nu(E, ReactionProduct::EmissionMode::prompt) + * atom_density * flux; + } else { + score = 0.; + // Add up contributions from each nuclide in the material. + if (p.material() != MATERIAL_VOID) { + const Material& material {*model::materials[p.material()]}; + for (auto i = 0; i < material.nuclide_.size(); ++i) { + auto j_nuclide = material.nuclide_[i]; + auto atom_density = material.atom_density_(i); + score += p.neutron_xs(j_nuclide).fission + * data::nuclides[j_nuclide] + ->nu(E, ReactionProduct::EmissionMode::prompt) + * atom_density * flux; + } + } + } + } + break; + + + case SCORE_DELAYED_NU_FISSION: + if (p.macro_xs().absorption == 0) continue; + if (tally.estimator_ == TallyEstimator::ANALOG) { + if (settings::survival_biasing || p.fission()) { + if (tally.energyout_filter_ != C_NONE) { + // Fission has multiple outgoing neutrons so this helper function + // is used to handle scoring the multiple filter bins. + score_fission_eout(p, i_tally, score_index, score_bin); + continue; + } + } + if (settings::survival_biasing) { + // No fission events occur if survival biasing is on -- need to + // calculate fraction of absorptions that would have resulted in + // delayed-nu-fission + if (p.neutron_xs(p.event_nuclide()).absorption > 0 + && data::nuclides[p.event_nuclide()]->fissionable_) { + if (tally.delayedgroup_filter_ != C_NONE) { + auto i_dg_filt = tally.filters()[tally.delayedgroup_filter_]; + const DelayedGroupFilter& filt + {*dynamic_cast( + model::tally_filters[i_dg_filt].get())}; + // Tally each delayed group bin individually + for (auto d_bin = 0; d_bin < filt.n_bins(); ++d_bin) { + auto dg = filt.groups()[d_bin]; + auto yield = data::nuclides[p.event_nuclide()] + ->nu(E, ReactionProduct::EmissionMode::delayed, dg); + score = wgt_absorb * yield + * p.neutron_xs(p.event_nuclide()).fission + / p.neutron_xs(p.event_nuclide()).absorption * flux; + score_fission_delayed_dg(i_tally, d_bin, score, + score_index, p.filter_matches()); + } + continue; + } else { + // If the delayed group filter is not present, compute the score + // by multiplying the absorbed weight by the fraction of the + // delayed-nu-fission xs to the absorption xs + score = wgt_absorb + * p.neutron_xs(p.event_nuclide()).fission + * data::nuclides[p.event_nuclide()] + ->nu(E, ReactionProduct::EmissionMode::delayed) + / p.neutron_xs(p.event_nuclide()).absorption *flux; + } + } + } else { + // Skip any non-fission events + if (!p.fission()) continue; + // If there is no outgoing energy filter, than we only need to score + // to one bin. For the score to be 'analog', we need to score the + // number of particles that were banked in the fission bank. Since + // this was weighted by 1/keff, we multiply by keff to get the proper + // score. Loop over the neutrons produced from fission and check which + // ones are delayed. If a delayed neutron is encountered, add its + // contribution to the fission bank to the score. + if (tally.delayedgroup_filter_ != C_NONE) { + auto i_dg_filt = tally.filters()[tally.delayedgroup_filter_]; + const DelayedGroupFilter& filt + {*dynamic_cast( + model::tally_filters[i_dg_filt].get())}; + // Tally each delayed group bin individually + for (auto d_bin = 0; d_bin < filt.n_bins(); ++d_bin) { + auto d = filt.groups()[d_bin]; + score = simulation::keff * p.wgt_bank() / p.n_bank() + * p.n_delayed_bank(d-1) * flux; + score_fission_delayed_dg(i_tally, d_bin, score, score_index, p.filter_matches()); + } + continue; + } else { + // Add the contribution from all delayed groups + auto n_delayed = std::accumulate(p.n_delayed_bank(), + p.n_delayed_bank()+MAX_DELAYED_GROUPS, 0); + score = simulation::keff * p.wgt_bank() / p.n_bank() * n_delayed + * flux; + } + } + } else { + if (i_nuclide >= 0) { + if (tally.delayedgroup_filter_ != C_NONE) { + auto i_dg_filt = tally.filters()[tally.delayedgroup_filter_]; + const DelayedGroupFilter& filt + {*dynamic_cast( + model::tally_filters[i_dg_filt].get())}; + // Tally each delayed group bin individually + for (auto d_bin = 0; d_bin < filt.n_bins(); ++d_bin) { + auto d = filt.groups()[d_bin]; + auto yield = data::nuclides[i_nuclide] + ->nu(E, ReactionProduct::EmissionMode::delayed, d); + score = p.neutron_xs(i_nuclide).fission * yield + * atom_density * flux; + score_fission_delayed_dg(i_tally, d_bin, score, score_index, p.filter_matches()); + } + continue; + } else { + // If the delayed group filter is not present, compute the score + // by multiplying the delayed-nu-fission macro xs by the flux + score = p.neutron_xs(i_nuclide).fission + * data::nuclides[i_nuclide] + ->nu(E, ReactionProduct::EmissionMode::delayed) + * atom_density * flux; + } + } else { + if (tally.delayedgroup_filter_ != C_NONE) { + auto i_dg_filt = tally.filters()[tally.delayedgroup_filter_]; + const DelayedGroupFilter& filt + {*dynamic_cast( + model::tally_filters[i_dg_filt].get())}; + if (p.material() != MATERIAL_VOID) { + const Material& material {*model::materials[p.material()]}; + for (auto i = 0; i < material.nuclide_.size(); ++i) { + auto j_nuclide = material.nuclide_[i]; + auto atom_density = material.atom_density_(i); + // Tally each delayed group bin individually + for (auto d_bin = 0; d_bin < filt.n_bins(); ++d_bin) { + auto d = filt.groups()[d_bin]; + auto yield = data::nuclides[j_nuclide] + ->nu(E, ReactionProduct::EmissionMode::delayed, d); + score = p.neutron_xs(j_nuclide).fission * yield + * atom_density * flux; + score_fission_delayed_dg(i_tally, d_bin, score, + score_index, p.filter_matches()); + } + } + } + continue; + } else { + score = 0.; + if (p.material() != MATERIAL_VOID) { + const Material& material {*model::materials[p.material()]}; + for (auto i = 0; i < material.nuclide_.size(); ++i) { + auto j_nuclide = material.nuclide_[i]; + auto atom_density = material.atom_density_(i); + score += p.neutron_xs(j_nuclide).fission + * data::nuclides[j_nuclide] + ->nu(E, ReactionProduct::EmissionMode::delayed) + * atom_density * flux; + } + } + } + } + } + break; + + + case SCORE_DECAY_RATE: + if (p.macro_xs().absorption == 0) continue; + if (tally.estimator_ == TallyEstimator::ANALOG) { + if (settings::survival_biasing) { + // No fission events occur if survival biasing is on -- need to + // calculate fraction of absorptions that would have resulted in + // delayed-nu-fission + const auto& nuc {*data::nuclides[p.event_nuclide()]}; + if (p.neutron_xs(p.event_nuclide()).absorption > 0 + && nuc.fissionable_) { + const auto& rxn {*nuc.fission_rx_[0]}; + if (tally.delayedgroup_filter_ != C_NONE) { + auto i_dg_filt = tally.filters()[tally.delayedgroup_filter_]; + const DelayedGroupFilter& filt + {*dynamic_cast( + model::tally_filters[i_dg_filt].get())}; + // Tally each delayed group bin individually + for (auto d_bin = 0; d_bin < filt.n_bins(); ++d_bin) { + auto d = filt.groups()[d_bin]; + auto yield + = nuc.nu(E, ReactionProduct::EmissionMode::delayed, d); + auto rate = rxn.products_[d].decay_rate_; + score = wgt_absorb * yield + * p.neutron_xs(p.event_nuclide()).fission + / p.neutron_xs(p.event_nuclide()).absorption + * rate * flux; + score_fission_delayed_dg(i_tally, d_bin, score, + score_index, p.filter_matches()); + } + continue; + } else { + // If the delayed group filter is not present, compute the score + // by multiplying the absorbed weight by the fraction of the + // delayed-nu-fission xs to the absorption xs for all delayed + // groups + score = 0.; + // We need to be careful not to overshoot the number of + // delayed groups since this could cause the range of the + // rxn.products_ array to be exceeded. Hence, we use the size + // of this array and not the MAX_DELAYED_GROUPS constant for + // this loop. + for (auto d = 0; d < rxn.products_.size() - 2; ++d) { + auto yield + = nuc.nu(E, ReactionProduct::EmissionMode::delayed, d+1); + auto rate = rxn.products_[d+1].decay_rate_; + score += rate * wgt_absorb + * p.neutron_xs(p.event_nuclide()).fission * yield + / p.neutron_xs(p.event_nuclide()).absorption * flux; + } + } + } + } else { + // Skip any non-fission events + if (!p.fission()) continue; + // If there is no outgoing energy filter, than we only need to score + // to one bin. For the score to be 'analog', we need to score the + // number of particles that were banked in the fission bank. Since + // this was weighted by 1/keff, we multiply by keff to get the proper + // score. Loop over the neutrons produced from fission and check which + // ones are delayed. If a delayed neutron is encountered, add its + // contribution to the fission bank to the score. + score = 0.; + for (auto i = 0; i < p.n_bank(); ++i) { + const auto& bank = p.nu_bank(i); + auto g = bank.delayed_group; + if (g != 0) { + const auto& nuc {*data::nuclides[p.event_nuclide()]}; + const auto& rxn {*nuc.fission_rx_[0]}; + auto rate = rxn.products_[g].decay_rate_; + score += simulation::keff * bank.wgt * rate * flux; + if (tally.delayedgroup_filter_ != C_NONE) { + auto i_dg_filt = tally.filters()[tally.delayedgroup_filter_]; + const DelayedGroupFilter& filt + {*dynamic_cast( + model::tally_filters[i_dg_filt].get())}; + // Find the corresponding filter bin and then score + for (auto d_bin = 0; d_bin < filt.n_bins(); ++d_bin) { + auto d = filt.groups()[d_bin]; + if (d == g) + score_fission_delayed_dg(i_tally, d_bin, score, + score_index, p.filter_matches()); + } + score = 0.; + } + } + } + } + } else { + if (i_nuclide >= 0) { + const auto& nuc {*data::nuclides[i_nuclide]}; + if (!nuc.fissionable_) continue; + const auto& rxn {*nuc.fission_rx_[0]}; + if (tally.delayedgroup_filter_ != C_NONE) { + auto i_dg_filt = tally.filters()[tally.delayedgroup_filter_]; + const DelayedGroupFilter& filt + {*dynamic_cast( + model::tally_filters[i_dg_filt].get())}; + // Tally each delayed group bin individually + for (auto d_bin = 0; d_bin < filt.n_bins(); ++d_bin) { + auto d = filt.groups()[d_bin]; + auto yield + = nuc.nu(E, ReactionProduct::EmissionMode::delayed, d); + auto rate = rxn.products_[d].decay_rate_; + score = p.neutron_xs(i_nuclide).fission * yield * flux + * atom_density * rate; + score_fission_delayed_dg(i_tally, d_bin, score, score_index, p.filter_matches()); + } + continue; + } else { + score = 0.; + // We need to be careful not to overshoot the number of + // delayed groups since this could cause the range of the + // rxn.products_ array to be exceeded. Hence, we use the size + // of this array and not the MAX_DELAYED_GROUPS constant for + // this loop. + for (auto d = 0; d < rxn.products_.size() - 2; ++d) { + auto yield + = nuc.nu(E, ReactionProduct::EmissionMode::delayed, d+1); + auto rate = rxn.products_[d+1].decay_rate_; + score += p.neutron_xs(i_nuclide).fission * flux + * yield * atom_density * rate; + } + } + } else { + if (tally.delayedgroup_filter_ != C_NONE) { + auto i_dg_filt = tally.filters()[tally.delayedgroup_filter_]; + const DelayedGroupFilter& filt + {*dynamic_cast( + model::tally_filters[i_dg_filt].get())}; + if (p.material() != MATERIAL_VOID) { + const Material& material {*model::materials[p.material()]}; + for (auto i = 0; i < material.nuclide_.size(); ++i) { + auto j_nuclide = material.nuclide_[i]; + auto atom_density = material.atom_density_(i); + const auto& nuc {*data::nuclides[j_nuclide]}; + if (nuc.fissionable_) { + const auto& rxn {*nuc.fission_rx_[0]}; + // Tally each delayed group bin individually + for (auto d_bin = 0; d_bin < filt.n_bins(); ++d_bin) { + auto d = filt.groups()[d_bin]; + auto yield + = nuc.nu(E, ReactionProduct::EmissionMode::delayed, d); + auto rate = rxn.products_[d].decay_rate_; + score = p.neutron_xs(j_nuclide).fission * yield + * flux * atom_density * rate; + score_fission_delayed_dg(i_tally, d_bin, score, + score_index, p.filter_matches()); + } + } + } + } + continue; + } else { + score = 0.; + if (p.material() != MATERIAL_VOID) { + const Material& material {*model::materials[p.material()]}; + for (auto i = 0; i < material.nuclide_.size(); ++i) { + auto j_nuclide = material.nuclide_[i]; + auto atom_density = material.atom_density_(i); + const auto& nuc {*data::nuclides[j_nuclide]}; + if (nuc.fissionable_) { + const auto& rxn {*nuc.fission_rx_[0]}; + // We need to be careful not to overshoot the number of + // delayed groups since this could cause the range of the + // rxn.products_ array to be exceeded. Hence, we use the size + // of this array and not the MAX_DELAYED_GROUPS constant for + // this loop. + for (auto d = 0; d < rxn.products_.size() - 2; ++d) { + auto yield + = nuc.nu(E, ReactionProduct::EmissionMode::delayed, d+1); + auto rate = rxn.products_[d+1].decay_rate_; + score += p.neutron_xs(j_nuclide).fission + * yield * atom_density * flux * rate; + } + } + } + } + } + } + } + break; + + + case SCORE_KAPPA_FISSION: + if (p.macro_xs().absorption == 0.) continue; + score = 0.; + // Kappa-fission values are determined from the Q-value listed for the + // fission cross section. + if (tally.estimator_ == TallyEstimator::ANALOG) { + if (settings::survival_biasing) { + // No fission events occur if survival biasing is on -- need to + // calculate fraction of absorptions that would have resulted in + // fission scaled by the Q-value + const auto& nuc {*data::nuclides[p.event_nuclide()]}; + if (p.neutron_xs(p.event_nuclide()).absorption > 0 + && nuc.fissionable_) { + const auto& rxn {*nuc.fission_rx_[0]}; + score = wgt_absorb * rxn.q_value_ + * p.neutron_xs(p.event_nuclide()).fission + / p.neutron_xs(p.event_nuclide()).absorption * flux; + } + } else { + // Skip any non-absorption events + if (p.event() == TallyEvent::SCATTER) continue; + // All fission events will contribute, so again we can use particle's + // weight entering the collision as the estimate for the fission + // reaction rate + const auto& nuc {*data::nuclides[p.event_nuclide()]}; + if (p.neutron_xs(p.event_nuclide()).absorption > 0 + && nuc.fissionable_) { + const auto& rxn {*nuc.fission_rx_[0]}; + score = p.wgt_last() * rxn.q_value_ + * p.neutron_xs(p.event_nuclide()).fission + / p.neutron_xs(p.event_nuclide()).absorption * flux; + } + } + } else { + if (i_nuclide >= 0) { + const auto& nuc {*data::nuclides[i_nuclide]}; + if (nuc.fissionable_) { + const auto& rxn {*nuc.fission_rx_[0]}; + score = rxn.q_value_ * p.neutron_xs(i_nuclide).fission + * atom_density * flux; + } + } else if (p.material() != MATERIAL_VOID) { + const Material& material {*model::materials[p.material()]}; + for (auto i = 0; i < material.nuclide_.size(); ++i) { + auto j_nuclide = material.nuclide_[i]; + auto atom_density = material.atom_density_(i); + const auto& nuc {*data::nuclides[j_nuclide]}; + if (nuc.fissionable_) { + const auto& rxn {*nuc.fission_rx_[0]}; + score += rxn.q_value_ * p.neutron_xs(j_nuclide).fission + * atom_density * flux; + } + } + } + } + break; + + + case SCORE_EVENTS: + // Simply count the number of scoring events + #pragma omp atomic + tally.results_(filter_index, score_index, TallyResult::VALUE) += 1.0; + continue; + + + case ELASTIC: + if (tally.estimator_ == TallyEstimator::ANALOG) { + // Check if event MT matches + if (p.event_mt() != ELASTIC) continue; + score = p.wgt_last() * flux; + } else { + if (i_nuclide >= 0) { + if (p.neutron_xs(i_nuclide).elastic == CACHE_INVALID) + data::nuclides[i_nuclide]->calculate_elastic_xs(p); + score = p.neutron_xs(i_nuclide).elastic * atom_density * flux; + } else { + score = 0.; + if (p.material() != MATERIAL_VOID) { + const Material& material {*model::materials[p.material()]}; + for (auto i = 0; i < material.nuclide_.size(); ++i) { + auto j_nuclide = material.nuclide_[i]; + auto atom_density = material.atom_density_(i); + if (p.neutron_xs(j_nuclide).elastic == CACHE_INVALID) + data::nuclides[j_nuclide]->calculate_elastic_xs(p); + score += p.neutron_xs(j_nuclide).elastic * atom_density + * flux; + } + } + } + } + break; + + case SCORE_FISS_Q_PROMPT: + case SCORE_FISS_Q_RECOV: + if (p.macro_xs().absorption == 0.) continue; + score = score_fission_q(p, score_bin, tally, flux, i_nuclide, atom_density); + break; + + + case N_2N: + case N_3N: + case N_4N: + case N_GAMMA: + case N_P: + case N_A: + if (tally.estimator_ == TallyEstimator::ANALOG) { + // Check if the event MT matches + if (p.event_mt() != score_bin) continue; + score = p.wgt_last() * flux; + } else { + int m; + switch (score_bin) { + case N_GAMMA: m = 0; break; + case N_P: m = 1; break; + case N_A: m = 2; break; + case N_2N: m = 3; break; + case N_3N: m = 4; break; + case N_4N: m = 5; break; + } + if (i_nuclide >= 0) { + score = p.neutron_xs(i_nuclide).reaction[m] * atom_density + * flux; + } else { + score = 0.; + if (p.material() != MATERIAL_VOID) { + const Material& material {*model::materials[p.material()]}; + for (auto i = 0; i < material.nuclide_.size(); ++i) { + auto j_nuclide = material.nuclide_[i]; + auto atom_density = material.atom_density_(i); + score += p.neutron_xs(j_nuclide).reaction[m] + * atom_density * flux; + } + } + } + } + break; + + + case HEATING: + if (p.type() == ParticleType::neutron) { + score = score_neutron_heating(p, tally, flux, HEATING, + i_nuclide, atom_density); + } else { + // The energy deposited is the difference between the pre-collision and + // post-collision energy... + score = E - p.E(); + + // ...less the energy of any secondary particles since they will be + // transported individually later + const auto& bank = p.secondary_bank(); + for (auto it = bank.end() - p.bank_second_E(); it < bank.end(); ++it) { + score -= it->E; + } + + score *= p.wgt_last(); + } + break; + + default: + + // The default block is really only meant for redundant neutron reactions + // (e.g. 444, 901) + if (p.type() != ParticleType::neutron) continue; + + if (tally.estimator_ == TallyEstimator::ANALOG) { + + // Any other score is assumed to be a MT number. Thus, we just need + // to check if it matches the MT number of the event + if (p.event_mt() != score_bin) continue; + score = p.wgt_last()*flux; + } else { + // Any other cross section has to be calculated on-the-fly + if (score_bin < 2) fatal_error("Invalid score type on tally " + + std::to_string(tally.id_)); + score = 0.; + if (i_nuclide >= 0) { + const auto& nuc {*data::nuclides[i_nuclide]}; + auto m = nuc.reaction_index_[score_bin]; + if (m == C_NONE) continue; + const auto& rxn {*nuc.reactions_[m]}; + auto i_temp = p.neutron_xs(i_nuclide).index_temp; + if (i_temp >= 0) { // Can be false due to multipole + auto i_grid = p.neutron_xs(i_nuclide).index_grid; + auto f = p.neutron_xs(i_nuclide).interp_factor; + const auto& xs {rxn.xs_[i_temp]}; + if (i_grid >= xs.threshold) { + score = ((1.0 - f) * xs.value[i_grid-xs.threshold] + + f * xs.value[i_grid-xs.threshold+1]) * atom_density * flux; + } + } + } else if (p.material() != MATERIAL_VOID) { + const Material& material {*model::materials[p.material()]}; + for (auto i = 0; i < material.nuclide_.size(); ++i) { + auto j_nuclide = material.nuclide_[i]; + auto atom_density = material.atom_density_(i); + const auto& nuc {*data::nuclides[j_nuclide]}; + auto m = nuc.reaction_index_[score_bin]; + if (m == C_NONE) continue; + const auto& rxn {*nuc.reactions_[m]}; + auto i_temp = p.neutron_xs(j_nuclide).index_temp; + if (i_temp >= 0) { // Can be false due to multipole + auto i_grid = p.neutron_xs(j_nuclide).index_grid; + auto f = p.neutron_xs(j_nuclide).interp_factor; + const auto& xs {rxn.xs_[i_temp]}; + if (i_grid >= xs.threshold) { + score += ((1.0 - f) * xs.value[i_grid-xs.threshold] + + f * xs.value[i_grid-xs.threshold+1]) * atom_density + * flux; + } + } + } + } + } + } + + // Add sensitivity information on score for sensitivity tallies. + // Retrieve particle's cumulative sensitivity + + if (score == 0.0) return; + + const auto& sens {model::tally_sens[tally.sens_]}; + const auto cumulative_sensitivities = p.cumulative_sensitivities(tally.sens_); + + if (settings::run_mode == RunMode::EIGENVALUE) { + + #pragma omp atomic + tally.denominator_ += score*filter_weight; //add an if statement if we want denominator? ; + + // Update tally results + for (auto idx = 0; idx < cumulative_sensitivities.size(); idx++){ + #pragma omp atomic + tally.results_(idx, score_index, SensitivityTallyResult::VALUE) += cumulative_sensitivities[idx]*score*filter_weight; + } + + if (sens.sens_nuclide == p.fission_nuclide()){ + switch (sens.variable) { + + case SensitivityVariable::CROSS_SECTION: + { + if (sens.sens_reaction == SCORE_FISSION ){ + // Get the energy of the parent particle. + auto E = p.E_parent(); + // Bin the energy. + if (E >= sens.energy_bins_.front() && E <= sens.energy_bins_.back()) { + auto bin = lower_bound_index(sens.energy_bins_.begin(), sens.energy_bins_.end(), E); + #pragma omp atomic + tally.previous_results_(bin, score_index, SensitivityTallyResult::VALUE) += score*filter_weight; + } + } + } + break; + case SensitivityVariable::MULTIPOLE: + { + // check if in resonance range + const auto& nuc {*data::nuclides[sens.sens_nuclide]}; + if (multipole_in_range(nuc, p.E_parent())){ + // Calculate derivative of the fission cross section at p->E_parent() + double sig_s, sig_a, sig_f; + std::tie(sig_s, sig_a, sig_f) + = nuc.multipole_->evaluate(p.E_parent(), p.sqrtkT()); + auto derivative = nuc.multipole_->evaluate_pole_deriv_fission(p.E_parent(), p.sqrtkT()); //This actually needs to be parent kT + // sum/bin 1/micro_sigma_scatter * derivative + int start = derivative.first; + int size = derivative.second.size(); + double sen_score = score*filter_weight/sig_f; + for (int deriv_idx = start; deriv_idx < start + size ; deriv_idx++){ + #pragma omp atomic + tally.previous_results_(deriv_idx, score_index, SensitivityTallyResult::VALUE) += sen_score*derivative.second[deriv_idx - start]; + } + } + // multiply score by derivative of fission cross section wrt multipole parameters / fission cross section + // at parent energy. so I need to also evaluate the fission derivative.. + } + break; + + case SensitivityVariable::CURVE_FIT: + { + // check if in resonance range + const auto& nuc {*data::nuclides[sens.sens_nuclide]}; + if (multipole_in_range(nuc, p.E_parent())){ + // Calculate derivative of the fission cross section at p->E_parent_ + double sig_s, sig_a, sig_f; + std::tie(sig_s, sig_a, sig_f) + = nuc.multipole_->evaluate(p.E_parent(), p.sqrtkT()); + auto derivative = nuc.multipole_->evaluate_fit_deriv_fission(p.E_parent(), p.sqrtkT()); //This actually needs to be parent kT + // sum/bin 1/micro_sigma_scatter * derivative + int start = derivative.first; + int size = derivative.second.size(); + double sen_score = score*filter_weight/sig_f; + for (int deriv_idx = start; deriv_idx < start + size ; deriv_idx++){ + #pragma omp atomic + tally.previous_results_(deriv_idx, score_index, SensitivityTallyResult::VALUE) += sen_score*derivative.second[deriv_idx - start]; + } + } + // multiply score by derivative of fission cross section wrt multipole parameters / fission cross section + // at parent energy. so I need to also evaluate the fission derivative.. + } + break; + } + } + } else { + + #pragma omp atomic + tally.denominator_ += score*filter_weight; + + for (int idx = 0; idx < cumulative_sensitivities.size(); idx++){ + #pragma omp atomic + tally.results_(idx, score_index, SensitivityTallyResult::VALUE) += cumulative_sensitivities[idx]*score*filter_weight; + } + + // // direct effect??? + // double atom_density = 0.; + // if (sens.sens_nuclide >= 0) { + // auto j = model::materials[p.material()]->mat_nuclide_index_[sens.sens_nuclide]; + // if (j == C_NONE) continue; + // atom_density = model::materials[p.material()]->atom_density_(j); + // } + // + // switch (sens.variable) { + // + // case SensitivityVariable::CROSS_SECTION: + // { + // // Calculate the sensitivity with respect to the cross section + // // at this energy + // + // // Get the post-collision energy of the particle. + // auto E = p.E(); + // + // // Get the correct cross section + // double macro_xs; + // switch (sens.sens_reaction) { + // case SCORE_TOTAL: + // if (sens.sens_nuclide >=0){ + // macro_xs = p.neutron_xs(sens.sens_nuclide).total * atom_density; + // } else { + // macro_xs = p.macro_xs().total; + // } + // break; + // case SCORE_SCATTER: + // if (sens.sens_nuclide >=0){ + // macro_xs = (p.neutron_xs(sens.sens_nuclide).total + // - p.neutron_xs(sens.sens_nuclide).absorption) * atom_density; + // } else { + // macro_xs = p.macro_xs().total - p.macro_xs().absorption; + // } + // break; + // case ELASTIC: + // if (sens.sens_nuclide >= 0) { + // if (p.neutron_xs(sens.sens_nuclide).elastic == CACHE_INVALID) + // data::nuclides[sens.sens_nuclide]->calculate_elastic_xs(p); + // macro_xs = p.neutron_xs(sens.sens_nuclide).elastic * atom_density; + // } + // break; + // case SCORE_ABSORPTION: + // if (sens.sens_nuclide >=0){ + // macro_xs = p.neutron_xs(sens.sens_nuclide).absorption * atom_density; + // } else { + // macro_xs = p.macro_xs().absorption; + // } + // break; + // case SCORE_FISSION: + // if (p.macro_xs().absorption == 0) continue; + // + // if (sens.sens_nuclide >= 0) { + // macro_xs = p.neutron_xs(sens.sens_nuclide).fission * atom_density; + // } else { + // macro_xs = p.macro_xs().fission; + // } + // break; + // default: + // if (sens.sens_nuclide >= 0) { + // macro_xs = get_nuclide_xs(p, sens.sens_nuclide, sens.sens_reaction) * atom_density; + // } + // break; + // } + // // Bin the contribution. + // if (E >= sens.energy_bins_.front() && E <= sens.energy_bins_.back()) { + // auto bin = lower_bound_index(sens.energy_bins_.begin(), sens.energy_bins_.end(), E); + // tally.previous_results_(bin, score_index, SensitivityTallyResult::VALUE) += flux * macro_xs; + // } + // } + // break; + // } + //} + } + } +} + void score_surface_tally(Particle& p, const vector& tallies) { double current = p.wgt_last(); diff --git a/src/wmp.cpp b/src/wmp.cpp index e9c4fb5e3e6..e1bbc1c3ed0 100644 --- a/src/wmp.cpp +++ b/src/wmp.cpp @@ -217,6 +217,349 @@ std::tuple WindowedMultipole::evaluate_deriv( return std::make_tuple(sig_s, sig_a, sig_f); } +//======================================================================== +// WindowedMultipole parameter derivatives +//======================================================================== + +std::pair> +WindowedMultipole::evaluate_pole_deriv_total(double E, double sqrtkT) +{ + using namespace std::complex_literals; + + // ========================================================================== + // Bookkeeping + + // Define some frequently used variables. + double sqrtE = std::sqrt(E); + double invE = 1.0 / E; + + // Locate window containing energy + int i_window = std::min(window_info_.size() - 1, + static_cast((sqrtE - std::sqrt(E_min_)) * inv_spacing_)); + const auto& window {window_info_[i_window]}; + + // Calculate size of vector and initialize + int size = 2*(window.index_end - window.index_start + 1)*data_.shape()[1]; + std::vector derivative(size, 0.0); + int vector_start = window.index_start * 2 * data_.shape()[1]; + + // ========================================================================== + // Add the contribution from the poles in this window. + + if (sqrtkT == 0.0) { + // If at 0K, use asymptotic form. + for (int i_pole = window.index_start; i_pole <= window.index_end; ++i_pole) { + int start_idx = (i_pole - window.index_start) * 2 * data_.shape()[1]; + std::complex psi_chi = 1.0 / (data_(i_pole, MP_EA) - sqrtE); + std::complex c_temp = psi_chi / E; + + derivative[start_idx] = (-1.0 * (data_(i_pole, MP_RS) + data_(i_pole, MP_RA)) * psi_chi * c_temp).imag(); + derivative[start_idx+1] = (-1.0 * (data_(i_pole, MP_RS) + data_(i_pole, MP_RA)) * psi_chi * c_temp).real(); + derivative[start_idx+2] = (c_temp).imag(); + derivative[start_idx+3] = (c_temp).real(); + derivative[start_idx+4] = (c_temp).imag(); + derivative[start_idx+5] = (c_temp).real(); + // Total = Scatter + Absorption, therefore changing r_f does not affect Total Cross Section + //if (fissionable_){ + // derivative[start_idx] += (-1.0 * data_(i_pole, MP_RF) * psi_chi * c_temp).imag(); + // derivative[start_idx+1] += (-1.0 * data_(i_pole, MP_RF) * psi_chi * c_temp).real(); + // derivative[start_idx+6] = (c_temp).imag(); + // derivative[start_idx+7] = (c_temp).real(); + //} + } + } else { + // At temperature, use Faddeeva function-based form. + double dopp = sqrt_awr_ / sqrtkT; + if (window.index_end >= window.index_start) { + for (int i_pole = window.index_start; i_pole <= window.index_end; ++i_pole) { + int start_idx = (i_pole - window.index_start) * 2 * data_.shape()[1]; + std::complex z = (sqrtE - data_(i_pole, MP_EA)) * dopp; + std::complex w_val = faddeeva(z) * dopp * invE * SQRT_PI; + std::complex dw_val = w_derivative(z,1) * dopp * invE * SQRT_PI; + + derivative[start_idx] = (-1.0 * dopp * (data_(i_pole, MP_RS) + data_(i_pole, MP_RA)) * dw_val).real(); + derivative[start_idx+1] = (-1.0i * dopp * (data_(i_pole, MP_RS) + data_(i_pole, MP_RA)) * dw_val).real(); + derivative[start_idx+2] = (w_val).real(); + derivative[start_idx+3] = (1.0i * w_val).real(); + derivative[start_idx+4] = (w_val).real(); + derivative[start_idx+5] = (1.0i * w_val).real(); + // Total = Scatter + Absorption, therefore changing r_f does not affect Total Cross Section + //if (fissionable_){ + // derivative[start_idx] += (-1.0 * data_(i_pole, MP_RF) * dopp * dw_val).real(); + // derivative[start_idx+1] += (-1.0i * data_(i_pole, MP_RF) * dopp * dw_val).real(); + // derivative[start_idx+6] = (w_val).real(); + // derivative[start_idx+7] = (1.0i * w_val).real(); + //} + } + } + } + + return std::make_pair(vector_start, derivative); +} + +std::pair> +WindowedMultipole::evaluate_pole_deriv_scatter(double E, double sqrtkT) +{ + using namespace std::complex_literals; + + // ========================================================================== + // Bookkeeping + + // Define some frequently used variables. + double sqrtE = std::sqrt(E); + double invE = 1.0 / E; + + // Locate window containing energy + int i_window = std::min(window_info_.size() - 1, + static_cast((sqrtE - std::sqrt(E_min_)) * inv_spacing_)); + const auto& window {window_info_[i_window]}; + + // Calculate size of vector and initialize + int size = 2*(window.index_end - window.index_start + 1)*data_.shape()[1]; + std::vector derivative(size, 0.0); + int vector_start = window.index_start * 2 * data_.shape()[1]; + + // ========================================================================== + // Add the contribution from the poles in this window. + + if (sqrtkT == 0.0) { + // If at 0K, use asymptotic form. + for (int i_pole = window.index_start; i_pole <= window.index_end; ++i_pole) { + int start_idx = (i_pole - window.index_start) * 2 * data_.shape()[1]; + std::complex psi_chi = 1.0 / (data_(i_pole, MP_EA) - sqrtE); + std::complex c_temp = psi_chi / E; + + derivative[start_idx] = (-1.0 * data_(i_pole, MP_RS) * psi_chi * c_temp).imag(); + derivative[start_idx+1] = (-1.0 * data_(i_pole, MP_RS) * psi_chi * c_temp).real(); + derivative[start_idx+2] = (c_temp).imag(); + derivative[start_idx+3] = (c_temp).real(); + } + } else { + // At temperature, use Faddeeva function-based form. + double dopp = sqrt_awr_ / sqrtkT; + if (window.index_end >= window.index_start) { + for (int i_pole = window.index_start; i_pole <= window.index_end; ++i_pole) { + int start_idx = (i_pole - window.index_start) * 2 * data_.shape()[1]; + std::complex z = (sqrtE - data_(i_pole, MP_EA)) * dopp; + std::complex w_val = faddeeva(z) * dopp * invE * SQRT_PI; + std::complex dw_val = w_derivative(z,1) * dopp * invE * SQRT_PI; + + derivative[start_idx] = (-1.0 * dopp * data_(i_pole, MP_RS) * dw_val).real(); + derivative[start_idx+1] = (-1.0i * dopp * data_(i_pole, MP_RS) * dw_val).real(); + derivative[start_idx+2] = (w_val).real(); + derivative[start_idx+3] = (1.0i * w_val).real(); + } + } + } + + return std::make_pair(vector_start, derivative); +} + +std::pair> +WindowedMultipole::evaluate_pole_deriv_fission(double E, double sqrtkT) +{ + using namespace std::complex_literals; + + // ========================================================================== + // Bookkeeping + + // Define some frequently used variables. + double sqrtE = std::sqrt(E); + double invE = 1.0 / E; + + // Locate window containing energy + int i_window = std::min(window_info_.size() - 1, + static_cast((sqrtE - std::sqrt(E_min_)) * inv_spacing_)); + const auto& window {window_info_[i_window]}; + + // Calculate size of vector and initialize + int size = 2*(window.index_end - window.index_start + 1)*data_.shape()[1]; + std::vector derivative(size, 0.0); + int vector_start = window.index_start * 2 * data_.shape()[1]; + + // ========================================================================== + // Add the contribution from the poles in this window. + + if (sqrtkT == 0.0) { + // If at 0K, use asymptotic form. + for (int i_pole = window.index_start; i_pole <= window.index_end; ++i_pole) { + int start_idx = (i_pole - window.index_start) * 2 * data_.shape()[1]; + std::complex psi_chi = 1.0 / (data_(i_pole, MP_EA) - sqrtE); + std::complex c_temp = psi_chi / E; + + derivative[start_idx] = (-1.0 * data_(i_pole, MP_RF) * psi_chi * c_temp).imag(); + derivative[start_idx+1] = (-1.0 * data_(i_pole, MP_RF) * psi_chi * c_temp).real(); + derivative[start_idx+6] = (c_temp).imag(); + derivative[start_idx+7] = (c_temp).real(); + } + } else { + // At temperature, use Faddeeva function-based form. + double dopp = sqrt_awr_ / sqrtkT; + if (window.index_end >= window.index_start) { + for (int i_pole = window.index_start; i_pole <= window.index_end; ++i_pole) { + int start_idx = (i_pole - window.index_start) * 2 * data_.shape()[1]; + std::complex z = (sqrtE - data_(i_pole, MP_EA)) * dopp; + std::complex w_val = faddeeva(z) * dopp * invE * SQRT_PI; + std::complex dw_val = w_derivative(z,1) * dopp * invE * SQRT_PI; + + derivative[start_idx] = (-1.0 * data_(i_pole, MP_RF) * dopp * dw_val).real(); + derivative[start_idx+1] = (-1.0i * data_(i_pole, MP_RF) * dopp * dw_val).real(); + derivative[start_idx+6] = (w_val).real(); + derivative[start_idx+7] = (1.0i * w_val).real(); + } + } + } + + return std::make_pair(vector_start, derivative); +} + +std::pair> +WindowedMultipole::evaluate_fit_deriv_total(double E, double sqrtkT) +{ + using namespace std::complex_literals; + + // ========================================================================== + // Bookkeeping + + // Define some frequently used variables. + double sqrtE = std::sqrt(E); + double invE = 1.0 / E; + + // Locate window containing energy + int i_window = std::min(window_info_.size() - 1, + static_cast((sqrtE - std::sqrt(E_min_)) * inv_spacing_)); + const auto& window {window_info_[i_window]}; + + // Calculate size of vector and initialize + int size = curvefit_.shape()[1]*curvefit_.shape()[2]; + std::vector derivative(size, 0.0); + int vector_start = i_window*curvefit_.shape()[1]*curvefit_.shape()[2]; + + // ========================================================================== + // Add the contribution from the curvefit polynomial. + + if (sqrtkT > 0.0 && window.broaden_poly) { + // Broaden the curvefit. + double dopp = sqrt_awr_ / sqrtkT; + std::vector broadened_polynomials(fit_order_ + 1); + broaden_wmp_polynomials(E, dopp, fit_order_ + 1, broadened_polynomials.data()); + for (int i_poly = 0; i_poly < fit_order_ + 1; ++i_poly) { + derivative[i_poly] = broadened_polynomials[i_poly]; + derivative[i_poly + FIT_A*curvefit_.shape()[1]] = broadened_polynomials[i_poly]; + // Total = Scatter + Absorption, therefore changing r_f does not affect Total Cross Section + //if (fissionable_) { + // derivative[i_poly + FIT_F*curvefit_.shape()[1]] = broadened_polynomials[i_poly]; + //} + } + } else { + // Evaluate as if it were a polynomial + double temp = invE; + for (int i_poly = 0; i_poly < fit_order_ + 1; ++i_poly) { + derivative[i_poly] = temp; + derivative[i_poly + FIT_A*curvefit_.shape()[1]] = temp; + // Total = Scatter + Absorption, therefore changing r_f does not affect Total Cross Section + //if (fissionable_) { + // derivative[i_poly + FIT_F*curvefit_.shape()[1]] = temp; + //} + temp *= sqrtE; + } + } + + return std::make_pair(vector_start, derivative); +} + +std::pair> +WindowedMultipole::evaluate_fit_deriv_scatter(double E, double sqrtkT) +{ + using namespace std::complex_literals; + + // ========================================================================== + // Bookkeeping + + // Define some frequently used variables. + double sqrtE = std::sqrt(E); + double invE = 1.0 / E; + + // Locate window containing energy + int i_window = std::min(window_info_.size() - 1, + static_cast((sqrtE - std::sqrt(E_min_)) * inv_spacing_)); + const auto& window {window_info_[i_window]}; + + // Calculate size of vector and initialize + int size = curvefit_.shape()[1]; + std::vector derivative(size, 0.0); + int vector_start = i_window*curvefit_.shape()[1]*curvefit_.shape()[2] + FIT_S*curvefit_.shape()[1]; + + // ========================================================================== + // Add the contribution from the curvefit polynomial. + + if (sqrtkT > 0.0 && window.broaden_poly) { + // Broaden the curvefit. + double dopp = sqrt_awr_ / sqrtkT; + std::vector broadened_polynomials(fit_order_ + 1); + broaden_wmp_polynomials(E, dopp, fit_order_ + 1, broadened_polynomials.data()); + for (int i_poly = 0; i_poly < fit_order_ + 1; ++i_poly) { + derivative[i_poly] = broadened_polynomials[i_poly]; + } + } else { + // Evaluate as if it were a polynomial + double temp = invE; + for (int i_poly = 0; i_poly < fit_order_ + 1; ++i_poly) { + derivative[i_poly] = temp; + temp *= sqrtE; + } + } + + return std::make_pair(vector_start, derivative); +} + +std::pair> +WindowedMultipole::evaluate_fit_deriv_fission(double E, double sqrtkT) +{ + using namespace std::complex_literals; + + // ========================================================================== + // Bookkeeping + + // Define some frequently used variables. + double sqrtE = std::sqrt(E); + double invE = 1.0 / E; + + // Locate window containing energy + // int i_window = (sqrtE - std::sqrt(E_min_)) * inv_spacing_; + + int i_window = std::min(window_info_.size() - 1, + static_cast((sqrtE - std::sqrt(E_min_)) * inv_spacing_)); + const auto& window {window_info_[i_window]}; + + // Calculate size of vector and initialize + int size = curvefit_.shape()[1]; + std::vector derivative(size, 0.0); + int vector_start = i_window*curvefit_.shape()[1]*curvefit_.shape()[2] + FIT_F*curvefit_.shape()[1]; + + // ========================================================================== + // Add the contribution from the curvefit polynomial. + + if (sqrtkT > 0.0 && window.broaden_poly) { + // Broaden the curvefit. + double dopp = sqrt_awr_ / sqrtkT; + std::vector broadened_polynomials(fit_order_ + 1); + broaden_wmp_polynomials(E, dopp, fit_order_ + 1, broadened_polynomials.data()); + for (int i_poly = 0; i_poly < fit_order_ + 1; ++i_poly) { + derivative[i_poly] = broadened_polynomials[i_poly]; + } + } else { + // Evaluate as if it were a polynomial + double temp = invE; + for (int i_poly = 0; i_poly < fit_order_ + 1; ++i_poly) { + derivative[i_poly] = temp; + temp *= sqrtE; + } + } + + return std::make_pair(vector_start, derivative); +} + //======================================================================== // Non-member functions //========================================================================