v0.15.0
Loading...
Searching...
No Matches
VEC-2: Nonlinear elastic
Note
Prerequisites of this tutorial include VEC-0: Linear elasticity and SCL-4: Nonlinear Poisson's equation


Note
Intended learning outcome:
  • solving nonlinear vector-valued problem in MoFEM
  • extension of linear elasticity to large strains with Hencky material
  • developing code that can be compiled for 2D or 3D cases
  • using matrix functions
  • implementation of tangent stiffness matrix and verification with PETSc
/**
* \file nonlinear_elastic.cpp
* \example nonlinear_elastic.cpp
*
* Plane stress elastic dynamic problem
*
*/
#include <MoFEM.hpp>
using namespace MoFEM;
constexpr int SPACE_DIM =
EXECUTABLE_DIMENSION; //< Space dimension of problem, mesh
using BoundaryEle =
using DomainEleOp = DomainEle::UserDataOperator;
using BoundaryEleOp = BoundaryEle::UserDataOperator;
using OpBodyForce =
DomainNaturalBC::OpFlux<NaturalMeshsetType<BLOCKSET>, 1, SPACE_DIM>;
using OpForce = BoundaryNaturalBC::OpFlux<NaturalForceMeshsets, 1, SPACE_DIM>;
template <int DIM> struct PostProcEleByDim;
template <> struct PostProcEleByDim<2> {
};
template <> struct PostProcEleByDim<3> {
};
#include <HenckyOps.hpp>
using namespace HenckyOps;
struct Example {
Example(MoFEM::Interface &m_field) : mField(m_field) {}
private:
};
//! [Run problem]
}
//! [Run problem]
//! [Read mesh]
char meshFileName[255];
CHKERR PetscOptionsGetString(PETSC_NULL, PETSC_NULL, "-file_name",
meshFileName, 255, PETSC_NULL);
CHKERR simple->loadFile("", meshFileName,
}
//! [Read mesh]
//! [Set up problem]
enum bases { AINSWORTH, DEMKOWICZ, LASBASETOPT };
const char *list_bases[LASBASETOPT] = {"ainsworth", "demkowicz"};
PetscInt choice_base_value = AINSWORTH;
CHKERR PetscOptionsGetEList(PETSC_NULL, NULL, "-base", list_bases,
LASBASETOPT, &choice_base_value, PETSC_NULL);
switch (choice_base_value) {
case AINSWORTH:
MOFEM_LOG("WORLD", Sev::inform)
<< "Set AINSWORTH_LEGENDRE_BASE for displacements";
break;
case DEMKOWICZ:
MOFEM_LOG("WORLD", Sev::inform)
<< "Set DEMKOWICZ_JACOBI_BASE for displacements";
break;
default:
break;
}
// Add field
CHKERR simple->addDomainField("U", H1, base, SPACE_DIM);
CHKERR simple->addBoundaryField("U", H1, base, SPACE_DIM);
int order = 2;
CHKERR PetscOptionsGetInt(PETSC_NULL, "", "-order", &order, PETSC_NULL);
CHKERR simple->setFieldOrder("U", order);
CHKERR simple->setUp();
}
//! [Set up problem]
//! [Boundary condition]
auto *pipeline_mng = mField.getInterface<PipelineManager>();
auto bc_mng = mField.getInterface<BcManager>();
auto time_scale = boost::make_shared<TimeScale>();
auto integration_rule = [](int, int, int approx_order) {
return 2 * (approx_order - 1);
};
CHKERR pipeline_mng->setDomainRhsIntegrationRule(integration_rule);
CHKERR pipeline_mng->setDomainLhsIntegrationRule(integration_rule);
CHKERR pipeline_mng->setBoundaryRhsIntegrationRule(integration_rule);
CHKERR BoundaryNaturalBC::AddFluxToPipeline<OpForce>::add(
pipeline_mng->getOpBoundaryRhsPipeline(), mField, "U", {time_scale},
"FORCE", Sev::inform);
//! [Define gravity vector]
CHKERR DomainNaturalBC::AddFluxToPipeline<OpBodyForce>::add(
pipeline_mng->getOpDomainRhsPipeline(), mField, "U", {time_scale},
"BODY_FORCE", Sev::inform);
// Essential BC
CHKERR bc_mng->removeBlockDOFsOnEntities(simple->getProblemName(), "REMOVE_X",
"U", 0, 0);
CHKERR bc_mng->removeBlockDOFsOnEntities(simple->getProblemName(), "REMOVE_Y",
"U", 1, 1);
CHKERR bc_mng->removeBlockDOFsOnEntities(simple->getProblemName(), "REMOVE_Z",
"U", 2, 2);
CHKERR bc_mng->pushMarkDOFsOnEntities<DisplacementCubitBcData>(
simple->getProblemName(), "U");
// Essential MPCs BC
CHKERR bc_mng->addBlockDOFsToMPCs(simple->getProblemName(), "U");
}
//! [Boundary condition]
//! [Push operators to pipeline]
auto *pipeline_mng = mField.getInterface<PipelineManager>();
auto add_domain_ops_lhs = [&](auto &pip) {
CHKERR HenckyOps::opFactoryDomainLhs<SPACE_DIM, PETSC, GAUSS, DomainEleOp>(
mField, pip, "U", "MAT_ELASTIC", Sev::inform);
};
auto add_domain_ops_rhs = [&](auto &pip) {
CHKERR HenckyOps::opFactoryDomainRhs<SPACE_DIM, PETSC, GAUSS, DomainEleOp>(
mField, pip, "U", "MAT_ELASTIC", Sev::inform);
};
CHKERR add_domain_ops_lhs(pipeline_mng->getOpDomainLhsPipeline());
CHKERR add_domain_ops_rhs(pipeline_mng->getOpDomainRhsPipeline());
}
//! [Push operators to pipeline]
/**
* @brief Monitor solution
*
* This functions is called by TS solver at the end of each step. It is used
* to output results to the hard drive.
*/
struct Monitor : public FEMethod {
Monitor(SmartPetscObj<DM> dm, boost::shared_ptr<PostProcEleBdy> post_proc)
: dM(dm), postProc(post_proc){};
constexpr int save_every_nth_step = 1;
postProc->mField, postProc->getPostProcMesh(), {"U"});
CHKERR postProc->writeFile(
"out_step_" + boost::lexical_cast<std::string>(ts_step) + ".h5m");
}
}
private:
boost::shared_ptr<PostProcEleBdy> postProc;
};
//! [Solve]
auto *pipeline_mng = mField.getInterface<PipelineManager>();
auto dm = simple->getDM();
auto ts = pipeline_mng->createTSIM();
// Setup postprocessing
auto create_post_proc_fe = [dm, this, simple]() {
auto post_proc_ele_domain = [dm, this](auto &pip_domain) {
auto common_ptr = commonDataFactory<SPACE_DIM, GAUSS, DomainEleOp>(
mField, pip_domain, "U", "MAT_ELASTIC", Sev::inform);
return common_ptr;
};
auto post_proc_fe_bdy = boost::make_shared<PostProcEleBdy>(mField);
auto u_ptr = boost::make_shared<MatrixDouble>();
post_proc_fe_bdy->getOpPtrVector().push_back(
auto op_loop_side =
new OpLoopSide<SideEle>(mField, simple->getDomainFEName(), SPACE_DIM);
auto common_ptr = post_proc_ele_domain(op_loop_side->getOpPtrVector());
post_proc_fe_bdy->getOpPtrVector().push_back(op_loop_side);
post_proc_fe_bdy->getOpPtrVector().push_back(
new OpPPMap(
post_proc_fe_bdy->getPostProcMesh(),
post_proc_fe_bdy->getMapGaussPts(),
{},
{{"U", u_ptr}},
{{"GRAD", common_ptr->matGradPtr},
{"FIRST_PIOLA", common_ptr->getMatFirstPiolaStress()}},
{}
)
);
return post_proc_fe_bdy;
};
auto add_extra_finite_elements_to_ksp_solver_pipelines = [&]() {
auto pre_proc_ptr = boost::make_shared<FEMethod>();
auto post_proc_rhs_ptr = boost::make_shared<FEMethod>();
auto post_proc_lhs_ptr = boost::make_shared<FEMethod>();
auto time_scale = boost::make_shared<TimeScale>();
auto get_bc_hook_rhs = [this, pre_proc_ptr, time_scale]() {
{time_scale}, false)();
};
pre_proc_ptr->preProcessHook = get_bc_hook_rhs;
auto get_post_proc_hook_rhs = [this, post_proc_rhs_ptr]() {
mField, post_proc_rhs_ptr, nullptr, Sev::verbose)();
mField, post_proc_rhs_ptr, 1.)();
CHKERR EssentialPostProcRhs<MPCsType>(mField, post_proc_rhs_ptr)();
};
auto get_post_proc_hook_lhs = [this, post_proc_lhs_ptr]() {
mField, post_proc_lhs_ptr, 1.)();
CHKERR EssentialPostProcLhs<MPCsType>(mField, post_proc_lhs_ptr)();
};
post_proc_rhs_ptr->postProcessHook = get_post_proc_hook_rhs;
post_proc_lhs_ptr->postProcessHook = get_post_proc_hook_lhs;
// This is low level pushing finite elements (pipelines) to solver
auto ts_ctx_ptr = getDMTsCtx(simple->getDM());
ts_ctx_ptr->getPreProcessIFunction().push_front(pre_proc_ptr);
ts_ctx_ptr->getPreProcessIJacobian().push_front(pre_proc_ptr);
ts_ctx_ptr->getPostProcessIFunction().push_back(post_proc_rhs_ptr);
ts_ctx_ptr->getPostProcessIJacobian().push_back(post_proc_lhs_ptr);
};
// Add extra finite elements to SNES solver pipelines to resolve essential
// boundary conditions
CHKERR add_extra_finite_elements_to_ksp_solver_pipelines();
auto create_monitor_fe = [dm](auto &&post_proc_fe) {
return boost::make_shared<Monitor>(dm, post_proc_fe);
};
// Set monitor which postprocessing results and saves them to the hard drive
boost::shared_ptr<FEMethod> null_fe;
auto monitor_ptr = create_monitor_fe(create_post_proc_fe());
CHKERR DMMoFEMTSSetMonitor(dm, ts, simple->getDomainFEName(), null_fe,
null_fe, monitor_ptr);
// Set time solver
double ftime = 1;
CHKERR TSSetDuration(ts, PETSC_DEFAULT, ftime);
CHKERR TSSetExactFinalTime(ts, TS_EXACTFINALTIME_MATCHSTEP);
auto D = createDMVector(simple->getDM());
CHKERR TSSetSolution(ts, D);
CHKERR TSSetFromOptions(ts);
CHKERR TSSolve(ts, NULL);
CHKERR TSGetTime(ts, &ftime);
PetscInt steps, snesfails, rejects, nonlinits, linits;
CHKERR TSGetStepNumber(ts, &steps);
CHKERR TSGetSNESFailures(ts, &snesfails);
CHKERR TSGetStepRejections(ts, &rejects);
CHKERR TSGetSNESIterations(ts, &nonlinits);
CHKERR TSGetKSPIterations(ts, &linits);
MOFEM_LOG_C("EXAMPLE", Sev::inform,
"steps %d (%d rejected, %d SNES fails), ftime %g, nonlinits "
"%d, linits %d",
steps, rejects, snesfails, ftime, nonlinits, linits);
}
//! [Solve]
//! [Getting norms]
auto dm = simple->getDM();
auto T = createDMVector(simple->getDM());
CHKERR DMoFEMMeshToLocalVector(simple->getDM(), T, INSERT_VALUES,
SCATTER_FORWARD);
double nrm2;
CHKERR VecNorm(T, NORM_2, &nrm2);
MOFEM_LOG("EXAMPLE", Sev::inform) << "Solution norm " << nrm2;
auto post_proc_norm_fe = boost::make_shared<DomainEle>(mField);
auto post_proc_norm_rule_hook = [](int, int, int p) -> int { return 2 * p; };
post_proc_norm_fe->getRuleHook = post_proc_norm_rule_hook;
post_proc_norm_fe->getOpPtrVector(), {H1});
enum NORMS { U_NORM_L2 = 0, PIOLA_NORM, LAST_NORM };
auto norms_vec =
(mField.get_comm_rank() == 0) ? LAST_NORM : 0, LAST_NORM);
CHKERR VecZeroEntries(norms_vec);
auto u_ptr = boost::make_shared<MatrixDouble>();
post_proc_norm_fe->getOpPtrVector().push_back(
post_proc_norm_fe->getOpPtrVector().push_back(
new OpCalcNormL2Tensor1<SPACE_DIM>(u_ptr, norms_vec, U_NORM_L2));
auto common_ptr = commonDataFactory<SPACE_DIM, GAUSS, DomainEleOp>(
mField, post_proc_norm_fe->getOpPtrVector(), "U", "MAT_ELASTIC",
Sev::inform);
post_proc_norm_fe->getOpPtrVector().push_back(
common_ptr->getMatFirstPiolaStress(), norms_vec, PIOLA_NORM));
CHKERR DMoFEMLoopFiniteElements(dm, simple->getDomainFEName(),
post_proc_norm_fe);
CHKERR VecAssemblyBegin(norms_vec);
CHKERR VecAssemblyEnd(norms_vec);
MOFEM_LOG_CHANNEL("SELF"); // Clear channel from old tags
if (mField.get_comm_rank() == 0) {
const double *norms;
CHKERR VecGetArrayRead(norms_vec, &norms);
MOFEM_TAG_AND_LOG("SELF", Sev::inform, "example")
<< "norm_u: " << std::scientific << std::sqrt(norms[U_NORM_L2]);
MOFEM_TAG_AND_LOG("SELF", Sev::inform, "example")
<< "norm_piola: " << std::scientific << std::sqrt(norms[PIOLA_NORM]);
CHKERR VecRestoreArrayRead(norms_vec, &norms);
}
}
//! [Getting norms]
//! [Postprocessing results]
PetscInt test_nb = 0;
PetscBool test_flg = PETSC_FALSE;
CHKERR PetscOptionsGetInt(PETSC_NULL, "", "-test", &test_nb, &test_flg);
if (test_flg) {
auto T = createDMVector(simple->getDM());
CHKERR DMoFEMMeshToLocalVector(simple->getDM(), T, INSERT_VALUES,
SCATTER_FORWARD);
double nrm2;
CHKERR VecNorm(T, NORM_2, &nrm2);
MOFEM_LOG("EXAMPLE", Sev::verbose) << "Regression norm " << nrm2;
double regression_value = 0;
switch (test_nb) {
case 1:
regression_value = 1.02789;
break;
case 2:
regression_value = 1.8841e+00;
break;
case 3:
regression_value = 1.8841e+00;
break;
case 4: // just links
regression_value = 4.9625e+00;
break;
case 5: // link master
regression_value = 6.6394e+00;
break;
case 6: // link master swap
regression_value = 4.98764e+00;
break;
case 7: // link Y
regression_value = 4.9473e+00;
break;
case 8: // link_3D_repr
regression_value = 2.5749e-01;
break;
default:
SETERRQ(PETSC_COMM_WORLD, MOFEM_ATOM_TEST_INVALID, "Wrong test number.");
break;
}
if (fabs(nrm2 - regression_value) > 1e-2)
SETERRQ2(PETSC_COMM_WORLD, MOFEM_ATOM_TEST_INVALID,
"Regression test field; wrong norm value. %6.4e != %6.4e", nrm2,
regression_value);
}
}
//! [Postprocessing results]
//! [Check]
}
//! [Check]
static char help[] = "...\n\n";
int main(int argc, char *argv[]) {
// Initialisation of MoFEM/PETSc and MOAB data structures
const char param_file[] = "param_file.petsc";
MoFEM::Core::Initialize(&argc, &argv, param_file, help);
// Add logging channel for example
auto core_log = logging::core::get();
core_log->add_sink(
LogManager::createSink(LogManager::getStrmWorld(), "EXAMPLE"));
LogManager::setLog("EXAMPLE");
MOFEM_LOG_TAG("EXAMPLE", "example");
try {
//! [Register MoFEM discrete manager in PETSc]
DMType dm_name = "DMMOFEM";
//! [Register MoFEM discrete manager in PETSc
//! [Create MoAB]
moab::Core mb_instance; ///< mesh database
moab::Interface &moab = mb_instance; ///< mesh database interface
//! [Create MoAB]
//! [Create MoFEM]
MoFEM::Core core(moab); ///< finite element database
MoFEM::Interface &m_field = core; ///< finite element database interface
//! [Create MoFEM]
//! [Example]
Example ex(m_field);
CHKERR ex.runProblem();
//! [Example]
}
}
#define MOFEM_LOG_C(channel, severity, format,...)
#define MOFEM_TAG_AND_LOG(channel, severity, tag)
Tag and log in channel.
void simple(double P1[], double P2[], double P3[], double c[], const int N)
Definition acoustic.cpp:69
static char help[]
int main()
ElementsAndOps< SPACE_DIM >::DomainEle DomainEle
constexpr int SPACE_DIM
[Define dimension]
ElementsAndOps< SPACE_DIM >::BoundaryEle BoundaryEle
#define CATCH_ERRORS
Catch errors.
FieldApproximationBase
approximation base
Definition definitions.h:58
@ LASTBASE
Definition definitions.h:69
@ AINSWORTH_LEGENDRE_BASE
Ainsworth Cole (Legendre) approx. base .
Definition definitions.h:60
@ DEMKOWICZ_JACOBI_BASE
Definition definitions.h:66
#define MoFEMFunctionReturnHot(a)
Last executable line of each PETSc function used for error handling. Replaces return()
@ H1
continuous field
Definition definitions.h:85
#define MoFEMFunctionBegin
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
@ MOFEM_ATOM_TEST_INVALID
Definition definitions.h:40
#define MoFEMFunctionReturn(a)
Last executable line of each PETSc function used for error handling. Replaces return()
#define CHKERR
Inline error check.
#define MoFEMFunctionBeginHot
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
constexpr int order
Order displacement.
NaturalBC< DomainEleOp >::Assembly< PETSC >::LinearForm< GAUSS > DomainNaturalBC
PostProcEleByDim< SPACE_DIM >::PostProcEleDomain PostProcEleDomain
PostProcEleByDim< SPACE_DIM >::PostProcEleBdy PostProcEleBdy
auto integration_rule
PetscErrorCode DMoFEMMeshToLocalVector(DM dm, Vec l, InsertMode mode, ScatterMode scatter_mode)
set local (or ghosted) vector values on mesh for partition only
Definition DMMoFEM.cpp:514
PetscErrorCode DMRegister_MoFEM(const char sname[])
Register MoFEM problem.
Definition DMMoFEM.cpp:43
PetscErrorCode DMoFEMLoopFiniteElements(DM dm, const char fe_name[], MoFEM::FEMethod *method, CacheTupleWeakPtr cache_ptr=CacheTupleSharedPtr())
Executes FEMethod for finite elements in DM.
Definition DMMoFEM.cpp:576
auto createDMVector(DM dm)
Get smart vector from DM.
Definition DMMoFEM.hpp:1234
#define MOFEM_LOG(channel, severity)
Log.
#define MOFEM_LOG_TAG(channel, tag)
Tag channel.
#define MOFEM_LOG_CHANNEL(channel)
Set and reset channel.
double D
PetscErrorCode MoFEMErrorCode
MoFEM/PETSc error code.
implementation of Data Operators for Forces and Sources
Definition Common.hpp:10
PetscErrorCode DMMoFEMTSSetMonitor(DM dm, TS ts, const std::string fe_name, boost::shared_ptr< MoFEM::FEMethod > method, boost::shared_ptr< MoFEM::BasicMethod > pre_only, boost::shared_ptr< MoFEM::BasicMethod > post_only)
Set Monitor To TS solver.
Definition DMMoFEM.cpp:1046
auto getDMTsCtx(DM dm)
Get TS context data structure used by DM.
Definition DMMoFEM.hpp:1276
PetscErrorCode PetscOptionsGetInt(PetscOptions *, const char pre[], const char name[], PetscInt *ivalue, PetscBool *set)
auto createVectorMPI(MPI_Comm comm, PetscInt n, PetscInt N)
Create MPI Vector.
PetscErrorCode PetscOptionsGetEList(PetscOptions *, const char pre[], const char name[], const char *const *list, PetscInt next, PetscInt *value, PetscBool *set)
PetscErrorCode PetscOptionsGetString(PetscOptions *, const char pre[], const char name[], char str[], size_t size, PetscBool *set)
int save_every_nth_step
OpPostProcMapInMoab< SPACE_DIM, SPACE_DIM > OpPPMap
static constexpr int approx_order
[Example]
Definition plastic.cpp:216
MoFEMErrorCode boundaryCondition()
[Set up problem]
MoFEMErrorCode assembleSystem()
[Push operators to pipeline]
MoFEMErrorCode readMesh()
[Run problem]
FieldApproximationBase base
Choice of finite element basis functions
Definition plot_base.cpp:68
MoFEMErrorCode checkResults()
[Postprocess results]
MoFEMErrorCode solveSystem()
[Solve]
MoFEMErrorCode runProblem()
[Run problem]
Definition plastic.cpp:256
MoFEMErrorCode gettingNorms()
[Solve]
MoFEM::Interface & mField
Reference to MoFEM interface.
Definition plastic.cpp:226
MoFEMErrorCode setupProblem()
[Run problem]
Definition plastic.cpp:275
MoFEMErrorCode outputResults()
[Solve]
Boundary condition manager for finite element problem setup.
virtual MPI_Comm & get_comm() const =0
virtual int get_comm_rank() const =0
Core (interface) class.
Definition Core.hpp:82
static MoFEMErrorCode Initialize(int *argc, char ***args, const char file[], const char help[])
Initializes the MoFEM database PETSc, MOAB and MPI.
Definition Core.cpp:72
static MoFEMErrorCode Finalize()
Checks for options to be called at the conclusion of the program.
Definition Core.cpp:118
Deprecated interface functions.
Definition of the displacement bc data structure.
Definition BCData.hpp:72
Data on single entity (This is passed as argument to DataOperator::doWork)
Class (Function) to enforce essential constrains on the left hand side diagonal.
Definition Essential.hpp:33
Class (Function) to enforce essential constrains on the right hand side diagonal.
Definition Essential.hpp:41
Class (Function) to calculate residual side diagonal.
Definition Essential.hpp:49
Class (Function) to enforce essential constrains.
Definition Essential.hpp:25
Assembly methods.
Definition Natural.hpp:65
Get norm of input MatrixDouble for Tensor1.
Get norm of input MatrixDouble for Tensor2.
Specialization for double precision vector field values calculation.
Element used to execute operators on side of the element.
Post post-proc data at points from hash maps.
Template struct for dimension-specific finite element types.
PipelineManager interface.
Simple interface for fast problem set-up.
Definition Simple.hpp:27
MoFEMErrorCode getOptions()
get options
Definition Simple.cpp:180
intrusive_ptr for managing petsc objects
PetscInt ts_step
Current time step number.
MoFEMErrorCode getInterface(IFACE *&iface) const
Get interface reference to pointer of interface.
[Push operators to pipeline]
SmartPetscObj< DM > dM
MoFEMErrorCode postProcess()
Post-processing function executed at loop completion.
boost::shared_ptr< PostProcEle > postProc
ElementsAndOps< SPACE_DIM >::SideEle SideEle
Definition plastic.cpp:61
#define EXECUTABLE_DIMENSION
Definition plastic.cpp:13
BoundaryNaturalBC::OpFlux< NaturalForceMeshsets, 1, SPACE_DIM > OpForce
DomainNaturalBCRhs::OpFlux< NaturalMeshsetType< BLOCKSET >, 1, SPACE_DIM > OpBodyForce
NaturalBC< BoundaryEleOp >::Assembly< PETSC >::LinearForm< GAUSS > BoundaryNaturalBC
[Body and heat source]
PostProcBrokenMeshInMoabBase< EdgeElementForcesAndSourcesCore > PostProcEdges
constexpr int SPACE_DIM
/**
* @file NonlinearElasticExample.hpp
* @example mofem/tutorials/vec-2/src/NonlinearElasticExample.hpp
* @date 2025-10-5
*
* @copyright Anonymous authors (c) 2025 under the MIT license (see LICENSE for
* details)
*/
MoFEMErrorCode runProblem();
protected:
boost::shared_ptr<DomainEle> reactionFe;
MoFEMErrorCode readMesh();
MoFEMErrorCode setupProblem();
MoFEMErrorCode boundaryCondition();
MoFEMErrorCode assembleSystem();
MoFEMErrorCode TsSolve();
MoFEMErrorCode gettingNorms();
MoFEMErrorCode outputResults();
MoFEMErrorCode checkResults();
};
//! [Run problem]
}
//! [Run problem]
//! [Read mesh]
auto simple = mField.getInterface<Simple>();
CHKERR simple->getOptions();
CHKERR simple->loadFile();
}
//! [Read mesh]
//! [Set up problem]
Simple *simple = mField.getInterface<Simple>();
enum bases { AINSWORTH, DEMKOWICZ, LASBASETOPT };
const char *list_bases[LASBASETOPT] = {"ainsworth", "demkowicz"};
PetscInt choice_base_value = AINSWORTH;
CHKERR PetscOptionsGetEList(PETSC_NULLPTR, NULL, "-base", list_bases,
LASBASETOPT, &choice_base_value, PETSC_NULLPTR);
switch (choice_base_value) {
case AINSWORTH:
MOFEM_LOG("WORLD", Sev::inform)
<< "Set AINSWORTH_LEGENDRE_BASE for displacements";
break;
case DEMKOWICZ:
MOFEM_LOG("WORLD", Sev::inform)
<< "Set DEMKOWICZ_JACOBI_BASE for displacements";
break;
default:
base = LASTBASE;
break;
}
// Add field
CHKERR simple->addDomainField("U", H1, base, SPACE_DIM);
CHKERR simple->addBoundaryField("U", H1, base, SPACE_DIM);
CHKERR simple->addDataField("GEOMETRY", H1, base, SPACE_DIM);
int order = 2;
CHKERR PetscOptionsGetInt(PETSC_NULLPTR, "", "-order", &order, PETSC_NULLPTR);
CHKERR simple->setFieldOrder("U", order);
CHKERR simple->setFieldOrder("GEOMETRY", 2);
CHKERR simple->setUp();
auto project_ho_geometry = [&]() {
Projection10NodeCoordsOnField ent_method(mField, "GEOMETRY");
return mField.loop_dofs("GEOMETRY", ent_method);
};
CHKERR project_ho_geometry();
}
//! [Set up problem]
//! [Boundary condition]
auto *pipeline_mng = mField.getInterface<PipelineManager>();
auto simple = mField.getInterface<Simple>();
auto bc_mng = mField.getInterface<BcManager>();
auto time_scale = boost::make_shared<ExampleTimeScale>();
auto integration_rule = [](int, int, int approx_order) {
return 2 * (approx_order - 1) + 1;
};
CHKERR pipeline_mng->setDomainRhsIntegrationRule(integration_rule);
CHKERR pipeline_mng->setDomainLhsIntegrationRule(integration_rule);
CHKERR pipeline_mng->setBoundaryRhsIntegrationRule(integration_rule);
reactionFe = boost::make_shared<DomainEle>(mField);
reactionFe->getRuleHook = integration_rule;
CHKERR BoundaryNaturalBC::AddFluxToPipeline<OpForce>::add(
pipeline_mng->getOpBoundaryRhsPipeline(), mField, "U", {time_scale},
"FORCE", Sev::inform);
//! [Define gravity vector]
CHKERR DomainNaturalBC::AddFluxToPipeline<OpBodyForce>::add(
pipeline_mng->getOpDomainRhsPipeline(), mField, "U", {time_scale},
"BODY_FORCE", Sev::inform);
// Essential BC
CHKERR bc_mng->removeBlockDOFsOnEntities(simple->getProblemName(), "REMOVE_X",
"U", 0, 0);
CHKERR bc_mng->removeBlockDOFsOnEntities(simple->getProblemName(), "REMOVE_Y",
"U", 1, 1);
CHKERR bc_mng->removeBlockDOFsOnEntities(simple->getProblemName(), "REMOVE_Z",
"U", 2, 2);
CHKERR bc_mng->pushMarkDOFsOnEntities<DisplacementCubitBcData>(
simple->getProblemName(), "U");
}
//! [Boundary condition]
//! [Push operators to pipeline]
auto *pipeline_mng = mField.getInterface<PipelineManager>();
auto add_domain_ops_lhs = [&](auto &pip) {
"GEOMETRY");
CHKERR HenckyOps::opFactoryDomainLhs<SPACE_DIM, PETSC, GAUSS, DomainEleOp>(
mField, pip, "U", "MAT_ELASTIC", Sev::inform);
};
auto add_domain_ops_rhs = [&](auto &pip) {
"GEOMETRY");
CHKERR HenckyOps::opFactoryDomainRhs<SPACE_DIM, PETSC, GAUSS, DomainEleOp>(
mField, pip, "U", "MAT_ELASTIC", Sev::inform);
};
CHKERR add_domain_ops_lhs(pipeline_mng->getOpDomainLhsPipeline());
CHKERR add_domain_ops_rhs(pipeline_mng->getOpDomainRhsPipeline());
// push operators to reaction pipeline
CHKERR add_domain_ops_rhs(reactionFe->getOpPtrVector());
reactionFe->postProcessHook =
EssentialPreProcReaction<DisplacementCubitBcData>(mField, reactionFe);
}
//! [Push operators to pipeline]
//! [TS Solve]
auto *simple = mField.getInterface<Simple>();
auto *pipeline_mng = mField.getInterface<PipelineManager>();
auto dm = simple->getDM();
auto ts = pipeline_mng->createTSIM();
PetscBool post_proc_vol;
PetscBool post_proc_skin;
if constexpr (SPACE_DIM == 2) {
post_proc_vol = PETSC_TRUE;
post_proc_skin = PETSC_FALSE;
} else {
post_proc_vol = PETSC_FALSE;
post_proc_skin = PETSC_TRUE;
}
CHKERR PetscOptionsGetBool(PETSC_NULLPTR, "", "-post_proc_vol",
&post_proc_vol, PETSC_NULLPTR);
CHKERR PetscOptionsGetBool(PETSC_NULLPTR, "", "-post_proc_skin",
&post_proc_skin, PETSC_NULLPTR);
// Setup postprocessing
auto create_post_proc_fe = [dm, this, simple, post_proc_vol,
post_proc_skin]() {
auto post_proc_ele_domain = [dm, this](auto &pip_domain) {
"GEOMETRY");
auto common_ptr = commonDataFactory<SPACE_DIM, GAUSS, DomainEleOp>(
mField, pip_domain, "U", "MAT_ELASTIC", Sev::inform);
return common_ptr;
};
auto post_proc_map = [this](auto &pip, auto u_ptr, auto common_ptr) {
using OpPPMap = OpPostProcMapInMoab<SPACE_DIM, SPACE_DIM>;
pip->getOpPtrVector().push_back(
new OpPPMap(pip->getPostProcMesh(), pip->getMapGaussPts(), {},
{{"U", u_ptr}},
{{"GRAD", common_ptr->matGradPtr},
{"FIRST_PIOLA", common_ptr->getMatFirstPiolaStress()}},
{}));
};
auto push_post_proc_bdy = [dm, this, simple, post_proc_skin,
&post_proc_ele_domain,
&post_proc_map](auto &pip_bdy) {
if (post_proc_skin == PETSC_FALSE)
return boost::shared_ptr<PostProcEleBdy>();
auto u_ptr = boost::make_shared<MatrixDouble>();
pip_bdy->getOpPtrVector().push_back(
new OpCalculateVectorFieldValues<SPACE_DIM>("U", u_ptr));
auto op_loop_side =
new OpLoopSide<SideEle>(mField, simple->getDomainFEName(), SPACE_DIM);
auto common_ptr = post_proc_ele_domain(op_loop_side->getOpPtrVector());
pip_bdy->getOpPtrVector().push_back(op_loop_side);
CHKERR post_proc_map(pip_bdy, u_ptr, common_ptr);
return pip_bdy;
};
auto push_post_proc_domain = [dm, this, simple, post_proc_vol,
&post_proc_ele_domain,
&post_proc_map](auto &pip_domain) {
if (post_proc_vol == PETSC_FALSE)
return boost::shared_ptr<PostProcEleDomain>();
auto u_ptr = boost::make_shared<MatrixDouble>();
pip_domain->getOpPtrVector().push_back(
new OpCalculateVectorFieldValues<SPACE_DIM>("U", u_ptr));
auto common_ptr = post_proc_ele_domain(pip_domain->getOpPtrVector());
CHKERR post_proc_map(pip_domain, u_ptr, common_ptr);
return pip_domain;
};
auto post_proc_fe_domain = boost::make_shared<PostProcEleDomain>(mField);
auto post_proc_fe_bdy = boost::make_shared<PostProcEleBdy>(mField);
return std::make_pair(push_post_proc_domain(post_proc_fe_domain),
push_post_proc_bdy(post_proc_fe_bdy));
};
auto add_extra_finite_elements_to_solver_pipelines = [&]() {
auto pre_proc_ptr = boost::make_shared<FEMethod>();
auto post_proc_rhs_ptr = boost::make_shared<FEMethod>();
auto post_proc_lhs_ptr = boost::make_shared<FEMethod>();
auto time_scale = boost::make_shared<ExampleTimeScale>();
auto get_bc_hook_rhs = [this, pre_proc_ptr, time_scale]() {
CHKERR EssentialPreProc<DisplacementCubitBcData>(mField, pre_proc_ptr,
{time_scale}, false)();
};
pre_proc_ptr->preProcessHook = get_bc_hook_rhs;
auto get_post_proc_hook_rhs = [this, post_proc_rhs_ptr]() {
CHKERR EssentialPreProcReaction<DisplacementCubitBcData>(
mField, post_proc_rhs_ptr, nullptr, Sev::verbose)();
CHKERR EssentialPostProcRhs<DisplacementCubitBcData>(
mField, post_proc_rhs_ptr, 1.)();
};
auto get_post_proc_hook_lhs = [this, post_proc_lhs_ptr]() {
CHKERR EssentialPostProcLhs<DisplacementCubitBcData>(
mField, post_proc_lhs_ptr, 1.)();
};
post_proc_rhs_ptr->postProcessHook = get_post_proc_hook_rhs;
post_proc_lhs_ptr->postProcessHook = get_post_proc_hook_lhs;
// This is low level pushing finite elements (pipelines) to solver
auto ts_ctx_ptr = getDMTsCtx(simple->getDM());
ts_ctx_ptr->getPreProcessIFunction().push_front(pre_proc_ptr);
ts_ctx_ptr->getPreProcessIJacobian().push_front(pre_proc_ptr);
ts_ctx_ptr->getPostProcessIFunction().push_back(post_proc_rhs_ptr);
ts_ctx_ptr->getPostProcessIJacobian().push_back(post_proc_lhs_ptr);
};
// Add extra finite elements to SNES solver pipelines to resolve essential
// boundary conditions
CHKERR add_extra_finite_elements_to_solver_pipelines();
auto create_monitor_fe = [dm](auto &&post_proc_fe, auto &&reactionFe) {
return boost::make_shared<Monitor>(dm, post_proc_fe, reactionFe);
};
// Set monitor which postprocessing results and saves them to the hard drive
boost::shared_ptr<FEMethod> null_fe;
auto monitor_ptr = create_monitor_fe(create_post_proc_fe(), reactionFe);
CHKERR DMMoFEMTSSetMonitor(dm, ts, simple->getDomainFEName(), null_fe,
null_fe, monitor_ptr);
// Set time solver
double ftime = 1;
CHKERR TSSetMaxTime(ts, ftime);
CHKERR TSSetExactFinalTime(ts, TS_EXACTFINALTIME_MATCHSTEP);
auto B = createDMMatrix(dm);
CHKERR TSSetI2Jacobian(ts, B, B, PETSC_NULLPTR, PETSC_NULLPTR);
auto D = createDMVector(simple->getDM());
CHKERR TSSetSolution(ts, D);
CHKERR TSSetFromOptions(ts);
CHKERR TSSolve(ts, NULL);
CHKERR TSGetTime(ts, &ftime);
PetscInt steps, snesfails, rejects, nonlinits, linits;
CHKERR TSGetStepNumber(ts, &steps);
CHKERR TSGetSNESFailures(ts, &snesfails);
CHKERR TSGetStepRejections(ts, &rejects);
CHKERR TSGetSNESIterations(ts, &nonlinits);
CHKERR TSGetKSPIterations(ts, &linits);
MOFEM_LOG_C("EXAMPLE", Sev::inform,
"steps %d (%d rejected, %d SNES fails), ftime %g, nonlinits "
"%d, linits %d",
steps, rejects, snesfails, ftime, nonlinits, linits);
}
//! [TS Solve]
//! [Getting norms]
auto simple = mField.getInterface<Simple>();
auto dm = simple->getDM();
auto T = createDMVector(simple->getDM());
CHKERR DMoFEMMeshToLocalVector(simple->getDM(), T, INSERT_VALUES,
SCATTER_FORWARD);
double nrm2;
CHKERR VecNorm(T, NORM_2, &nrm2);
MOFEM_LOG("EXAMPLE", Sev::inform) << "Solution norm " << nrm2;
auto post_proc_norm_fe = boost::make_shared<DomainEle>(mField);
auto post_proc_norm_rule_hook = [](int, int, int p) -> int { return 2 * p; };
post_proc_norm_fe->getRuleHook = post_proc_norm_rule_hook;
post_proc_norm_fe->getOpPtrVector(), {H1}, "GEOMETRY");
enum NORMS { U_NORM_L2 = 0, PIOLA_NORM, LAST_NORM };
auto norms_vec =
(mField.get_comm_rank() == 0) ? LAST_NORM : 0, LAST_NORM);
CHKERR VecZeroEntries(norms_vec);
auto u_ptr = boost::make_shared<MatrixDouble>();
post_proc_norm_fe->getOpPtrVector().push_back(
new OpCalculateVectorFieldValues<SPACE_DIM>("U", u_ptr));
post_proc_norm_fe->getOpPtrVector().push_back(
new OpCalcNormL2Tensor1<SPACE_DIM>(u_ptr, norms_vec, U_NORM_L2));
auto common_ptr = commonDataFactory<SPACE_DIM, GAUSS, DomainEleOp>(
mField, post_proc_norm_fe->getOpPtrVector(), "U", "MAT_ELASTIC",
Sev::inform);
post_proc_norm_fe->getOpPtrVector().push_back(
new OpCalcNormL2Tensor2<SPACE_DIM, SPACE_DIM>(
common_ptr->getMatFirstPiolaStress(), norms_vec, PIOLA_NORM));
CHKERR DMoFEMLoopFiniteElements(dm, simple->getDomainFEName(),
post_proc_norm_fe);
CHKERR VecAssemblyBegin(norms_vec);
CHKERR VecAssemblyEnd(norms_vec);
MOFEM_LOG_CHANNEL("SELF"); // Clear channel from old tags
if (mField.get_comm_rank() == 0) {
const double *norms;
CHKERR VecGetArrayRead(norms_vec, &norms);
MOFEM_TAG_AND_LOG("SELF", Sev::inform, "example")
<< "norm_u: " << std::scientific << std::sqrt(norms[U_NORM_L2]);
MOFEM_TAG_AND_LOG("SELF", Sev::inform, "example")
<< "norm_piola: " << std::scientific << std::sqrt(norms[PIOLA_NORM]);
CHKERR VecRestoreArrayRead(norms_vec, &norms);
}
}
//! [Getting norms]
//! [Postprocessing results]
}
//! [Postprocessing results]
//! [Check]
PetscInt test_nb = 0;
PetscBool test_flg = PETSC_FALSE;
CHKERR PetscOptionsGetInt(PETSC_NULLPTR, "", "-test", &test_nb, &test_flg);
if (test_flg) {
auto simple = mField.getInterface<Simple>();
auto T = createDMVector(simple->getDM());
CHKERR DMoFEMMeshToLocalVector(simple->getDM(), T, INSERT_VALUES,
SCATTER_FORWARD);
double nrm2;
CHKERR VecNorm(T, NORM_2, &nrm2);
MOFEM_LOG("EXAMPLE", Sev::verbose) << "Regression norm " << nrm2;
double regression_value = 0;
switch (test_nb) {
case 1:
regression_value = 1.02789;
break;
case 2:
regression_value = 1.8841e+00;
break;
case 3:
regression_value = 1.8841e+00;
break;
default:
SETERRQ(PETSC_COMM_WORLD, MOFEM_ATOM_TEST_INVALID, "Wrong test number.");
break;
}
if (fabs(nrm2 - regression_value) > 1e-2)
SETERRQ(PETSC_COMM_WORLD, MOFEM_ATOM_TEST_INVALID,
"Regression test field; wrong norm value. %6.4e != %6.4e", nrm2,
regression_value);
}
}
//! [Check]
auto createDMMatrix(DM dm)
Get smart matrix from DM.
Definition DMMoFEM.hpp:1191
virtual MoFEMErrorCode loop_dofs(const Problem *problem_ptr, const std::string &field_name, RowColData rc, DofMethod &method, int lower_rank, int upper_rank, int verb=DEFAULT_VERBOSITY)=0
Make a loop over dofs.
PetscErrorCode PetscOptionsGetBool(PetscOptions *, const char pre[], const char name[], PetscBool *bval, PetscBool *set)
MoFEMErrorCode runProblem()
[Run problem]
MoFEMErrorCode assembleSystem()
[Boundary condition]
boost::shared_ptr< DomainEle > reactionFe
MoFEMErrorCode setupProblem()
[Read mesh]
MoFEMErrorCode readMesh()
[Run problem]
MoFEMErrorCode boundaryCondition()
[Set up problem]
MoFEMErrorCode TsSolve()
[Push operators to pipeline]
MoFEMErrorCode outputResults()
[Getting norms]
MoFEMErrorCode checkResults()
[Postprocessing results]
MoFEMErrorCode gettingNorms()
[TS Solve]
/**
* \file HenckyOps.hpp
* \example HenckyOps.hpp
*
* @copyright Copyright (c) 2023
*/
#ifndef __HENCKY_OPS_HPP__
#define __HENCKY_OPS_HPP__
namespace HenckyOps {
constexpr double eps = std::numeric_limits<float>::epsilon();
auto f = [](double v) { return 0.5 * std::log(v); };
auto d_f = [](double v) { return 0.5 / v; };
auto dd_f = [](double v) { return -0.5 / (v * v); };
struct isEq {
static inline auto check(const double &a, const double &b) {
return std::abs(a - b) / absMax < eps;
}
static double absMax;
};
double isEq::absMax = 1;
inline auto is_eq(const double &a, const double &b) {
return isEq::check(a, b);
};
template <int DIM> inline auto get_uniq_nb(double *ptr) {
std::array<double, DIM> tmp;
std::copy(ptr, &ptr[DIM], tmp.begin());
std::sort(tmp.begin(), tmp.end());
isEq::absMax = std::max(std::abs(tmp[0]), std::abs(tmp[DIM - 1]));
return std::distance(tmp.begin(), std::unique(tmp.begin(), tmp.end(), is_eq));
};
template <int DIM>
std::max(std::max(std::abs(eig(0)), std::abs(eig(1))), std::abs(eig(2)));
int i = 0, j = 1, k = 2;
if (is_eq(eig(0), eig(1))) {
i = 0;
j = 2;
k = 1;
} else if (is_eq(eig(0), eig(2))) {
i = 0;
j = 1;
k = 2;
} else if (is_eq(eig(1), eig(2))) {
i = 1;
j = 0;
k = 2;
}
eigen_vec(i, 0), eigen_vec(i, 1), eigen_vec(i, 2),
eigen_vec(j, 0), eigen_vec(j, 1), eigen_vec(j, 2),
eigen_vec(k, 0), eigen_vec(k, 1), eigen_vec(k, 2)};
FTensor::Tensor1<double, 3> eig_c{eig(i), eig(j), eig(k)};
{
FTensor::Index<'i', DIM> i;
FTensor::Index<'j', DIM> j;
eig(i) = eig_c(i);
eigen_vec(i, j) = eigen_vec_c(i, j);
}
};
struct CommonData : public boost::enable_shared_from_this<CommonData> {
boost::shared_ptr<MatrixDouble> matGradPtr;
boost::shared_ptr<MatrixDouble> matDPtr;
boost::shared_ptr<MatrixDouble> matLogCPlastic;
inline auto getMatFirstPiolaStress() {
return boost::shared_ptr<MatrixDouble>(shared_from_this(),
}
inline auto getMatHenckyStress() {
return boost::shared_ptr<MatrixDouble>(shared_from_this(),
}
inline auto getMatLogC() {
return boost::shared_ptr<MatrixDouble>(shared_from_this(), &matLogC);
}
inline auto getMatTangent() {
return boost::shared_ptr<MatrixDouble>(shared_from_this(), &matTangent);
}
};
template <int DIM, IntegrationType I, typename DomainEleOp>
struct OpCalculateEigenValsImpl;
template <int DIM, IntegrationType I, typename DomainEleOp>
struct OpCalculateLogCImpl;
template <int DIM, IntegrationType I, typename DomainEleOp>
struct OpCalculateLogC_dCImpl;
template <int DIM, IntegrationType I, typename DomainEleOp, int S>
struct OpCalculateHenckyStressImpl;
template <int DIM, IntegrationType I, typename DomainEleOp, int S>
struct OpCalculateHenckyThermalStressImpl;
template <int DIM, IntegrationType I, typename DomainEleOp, int S>
struct OpCalculateHenckyPlasticStressImpl;
template <int DIM, IntegrationType I, typename DomainEleOp, int S>
struct OpCalculateHenckyThermoPlasticStressImpl;
template <int DIM, IntegrationType I, typename DomainEleOp, int S>
struct OpCalculatePiolaStressImpl;
template <int DIM, IntegrationType I, typename DomainEleOp, int S>
struct OpHenckyTangentImpl;
template <int DIM, IntegrationType I, typename DomainEleOp, int S>
struct OpCalculateHenckyThermalStressdTImpl;
template <int DIM, typename DomainEleOp>
struct OpCalculateEigenValsImpl<DIM, GAUSS, DomainEleOp> : public DomainEleOp {
OpCalculateEigenValsImpl(const std::string field_name,
boost::shared_ptr<CommonData> common_data)
commonDataPtr(common_data) {
std::fill(&DomainEleOp::doEntities[MBEDGE],
&DomainEleOp::doEntities[MBMAXTYPE], false);
}
MoFEMErrorCode doWork(int side, EntityType type, EntData &data) {
FTensor::Index<'i', DIM> i;
FTensor::Index<'j', DIM> j;
FTensor::Index<'k', DIM> k;
// const size_t nb_gauss_pts = matGradPtr->size2();
const size_t nb_gauss_pts = DomainEleOp::getGaussPts().size2();
auto t_grad = getFTensor2FromMat<DIM, DIM>(*(commonDataPtr->matGradPtr));
commonDataPtr->matEigVal.resize(DIM, nb_gauss_pts, false);
commonDataPtr->matEigVec.resize(DIM * DIM, nb_gauss_pts, false);
auto t_eig_val = getFTensor1FromMat<DIM>(commonDataPtr->matEigVal);
auto t_eig_vec = getFTensor2FromMat<DIM, DIM>(commonDataPtr->matEigVec);
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
F(i, j) = t_grad(i, j) + t_kd(i, j);
C(i, j) = F(k, i) ^ F(k, j);
for (int ii = 0; ii != DIM; ii++)
for (int jj = 0; jj != DIM; jj++)
eigen_vec(ii, jj) = C(ii, jj);
for (auto ii = 0; ii != DIM; ++ii)
eig(ii) = std::max(std::numeric_limits<double>::epsilon(), eig(ii));
// rare case when two eigen values are equal
auto nb_uniq = get_uniq_nb<DIM>(&eig(0));
if constexpr (DIM == 3) {
if (nb_uniq == 2) {
sort_eigen_vals<DIM>(eig, eigen_vec);
}
}
t_eig_val(i) = eig(i);
t_eig_vec(i, j) = eigen_vec(i, j);
++t_grad;
++t_eig_val;
++t_eig_vec;
}
}
private:
boost::shared_ptr<CommonData> commonDataPtr;
};
template <int DIM, typename DomainEleOp>
struct OpCalculateLogCImpl<DIM, GAUSS, DomainEleOp> : public DomainEleOp {
OpCalculateLogCImpl(const std::string field_name,
boost::shared_ptr<CommonData> common_data)
commonDataPtr(common_data) {
std::fill(&DomainEleOp::doEntities[MBEDGE],
&DomainEleOp::doEntities[MBMAXTYPE], false);
}
MoFEMErrorCode doWork(int side, EntityType type, EntData &data) {
FTensor::Index<'i', DIM> i;
FTensor::Index<'j', DIM> j;
// const size_t nb_gauss_pts = matGradPtr->size2();
const size_t nb_gauss_pts = DomainEleOp::getGaussPts().size2();
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
commonDataPtr->matLogC.resize(size_symm, nb_gauss_pts, false);
auto t_eig_val = getFTensor1FromMat<DIM>(commonDataPtr->matEigVal);
auto t_eig_vec = getFTensor2FromMat<DIM, DIM>(commonDataPtr->matEigVec);
auto t_logC = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matLogC);
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
t_logC(i, j) = EigenMatrix::getMat(t_eig_val, t_eig_vec, f)(i, j);
++t_eig_val;
++t_eig_vec;
++t_logC;
}
}
private:
boost::shared_ptr<CommonData> commonDataPtr;
};
template <int DIM, typename DomainEleOp>
struct OpCalculateLogC_dCImpl<DIM, GAUSS, DomainEleOp> : public DomainEleOp {
OpCalculateLogC_dCImpl(const std::string field_name,
boost::shared_ptr<CommonData> common_data)
commonDataPtr(common_data) {
std::fill(&DomainEleOp::doEntities[MBEDGE],
&DomainEleOp::doEntities[MBMAXTYPE], false);
}
MoFEMErrorCode doWork(int side, EntityType type, EntData &data) {
FTensor::Index<'i', DIM> i;
FTensor::Index<'j', DIM> j;
FTensor::Index<'k', DIM> k;
FTensor::Index<'l', DIM> l;
const size_t nb_gauss_pts = DomainEleOp::getGaussPts().size2();
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
commonDataPtr->matLogCdC.resize(size_symm * size_symm, nb_gauss_pts, false);
auto t_logC_dC = getFTensor4DdgFromMat<DIM, DIM>(commonDataPtr->matLogCdC);
auto t_eig_val = getFTensor1FromMat<DIM>(commonDataPtr->matEigVal);
auto t_eig_vec = getFTensor2FromMat<DIM, DIM>(commonDataPtr->matEigVec);
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
// rare case when two eigen values are equal
auto nb_uniq = get_uniq_nb<DIM>(&t_eig_val(0));
t_logC_dC(i, j, k, l) =
2 * EigenMatrix::getDiffMat(t_eig_val, t_eig_vec, f, d_f,
nb_uniq)(i, j, k, l);
++t_logC_dC;
++t_eig_val;
++t_eig_vec;
}
}
private:
boost::shared_ptr<CommonData> commonDataPtr;
};
template <int DIM, typename DomainEleOp, int S>
struct OpCalculateHenckyStressImpl<DIM, GAUSS, DomainEleOp, S>
: public DomainEleOp {
OpCalculateHenckyStressImpl(const std::string field_name,
boost::shared_ptr<CommonData> common_data)
commonDataPtr(common_data) {
std::fill(&DomainEleOp::doEntities[MBEDGE],
&DomainEleOp::doEntities[MBMAXTYPE], false);
}
MoFEMErrorCode doWork(int side, EntityType type, EntData &data) {
FTensor::Index<'i', DIM> i;
FTensor::Index<'j', DIM> j;
FTensor::Index<'k', DIM> k;
FTensor::Index<'l', DIM> l;
// const size_t nb_gauss_pts = matGradPtr->size2();
const size_t nb_gauss_pts = DomainEleOp::getGaussPts().size2();
auto t_D = getFTensor4DdgFromMat<DIM, DIM, S>(*commonDataPtr->matDPtr);
auto t_logC = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matLogC);
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
commonDataPtr->matHenckyStress.resize(size_symm, nb_gauss_pts, false);
auto t_T = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matHenckyStress);
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
t_T(i, j) = t_D(i, j, k, l) * t_logC(k, l);
++t_logC;
++t_T;
++t_D;
}
}
private:
boost::shared_ptr<CommonData> commonDataPtr;
};
template <int DIM, typename DomainEleOp, int S>
struct OpCalculateHenckyThermalStressImpl<DIM, GAUSS, DomainEleOp, S>
: public DomainEleOp {
OpCalculateHenckyThermalStressImpl(
const std::string field_name, boost::shared_ptr<VectorDouble> temperature,
boost::shared_ptr<CommonData> common_data,
boost::shared_ptr<VectorDouble> coeff_expansion_ptr,
boost::shared_ptr<double> ref_temp_ptr)
: DomainEleOp(field_name, DomainEleOp::OPROW), tempPtr(temperature),
commonDataPtr(common_data), coeffExpansionPtr(coeff_expansion_ptr),
refTempPtr(ref_temp_ptr) {
std::fill(&DomainEleOp::doEntities[MBEDGE],
&DomainEleOp::doEntities[MBMAXTYPE], false);
}
MoFEMErrorCode doWork(int side, EntityType type, EntData &data) {
FTensor::Index<'i', DIM> i;
FTensor::Index<'j', DIM> j;
FTensor::Index<'k', DIM> k;
FTensor::Index<'l', DIM> l;
// const size_t nb_gauss_pts = matGradPtr->size2();
const size_t nb_gauss_pts = DomainEleOp::getGaussPts().size2();
auto t_D = getFTensor4DdgFromMat<DIM, DIM, S>(*commonDataPtr->matDPtr);
auto t_logC = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matLogC);
auto t_logC_dC = getFTensor4DdgFromMat<DIM, DIM>(commonDataPtr->matLogCdC);
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
commonDataPtr->matHenckyStress.resize(size_symm, nb_gauss_pts, false);
commonDataPtr->matFirstPiolaStress.resize(DIM * DIM, nb_gauss_pts, false);
commonDataPtr->matSecondPiolaStress.resize(size_symm, nb_gauss_pts, false);
auto t_T = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matHenckyStress);
auto t_P = getFTensor2FromMat<DIM, DIM>(commonDataPtr->matFirstPiolaStress);
auto t_S =
getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matSecondPiolaStress);
auto t_grad = getFTensor2FromMat<DIM, DIM>(*(commonDataPtr->matGradPtr));
auto t_temp = getFTensor0FromVec(*tempPtr);
t_coeff_exp(i, j) = 0;
for (auto d = 0; d != SPACE_DIM; ++d) {
t_coeff_exp(d, d) = (*coeffExpansionPtr)[d];
}
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
#ifdef HENCKY_SMALL_STRAIN
t_P(i, j) = t_D(i, j, k, l) *
(t_grad(k, l) - t_coeff_exp(k, l) * (t_temp - (*refTempPtr)));
#else
t_T(i, j) = t_D(i, j, k, l) *
(t_logC(k, l) - t_coeff_exp(k, l) * (t_temp - (*refTempPtr)));
t_F(i, j) = t_grad(i, j) + t_kd(i, j);
t_S(k, l) = t_T(i, j) * t_logC_dC(i, j, k, l);
t_P(i, l) = t_F(i, k) * t_S(k, l);
#endif
++t_grad;
++t_logC;
++t_logC_dC;
++t_P;
++t_T;
++t_S;
++t_D;
++t_temp;
}
}
private:
boost::shared_ptr<CommonData> commonDataPtr;
boost::shared_ptr<VectorDouble> tempPtr;
boost::shared_ptr<VectorDouble> coeffExpansionPtr;
boost::shared_ptr<double> refTempPtr;
};
template <int DIM, typename DomainEleOp, int S>
struct OpCalculateHenckyPlasticStressImpl<DIM, GAUSS, DomainEleOp, S>
: public DomainEleOp {
OpCalculateHenckyPlasticStressImpl(const std::string field_name,
boost::shared_ptr<CommonData> common_data,
boost::shared_ptr<MatrixDouble> mat_D_ptr,
const double scale = 1)
: DomainEleOp(field_name, DomainEleOp::OPROW), commonDataPtr(common_data),
scaleStress(scale), matDPtr(mat_D_ptr) {
std::fill(&DomainEleOp::doEntities[MBEDGE],
&DomainEleOp::doEntities[MBMAXTYPE], false);
matLogCPlastic = commonDataPtr->matLogCPlastic;
}
MoFEMErrorCode doWork(int side, EntityType type, EntData &data) {
FTensor::Index<'i', DIM> i;
FTensor::Index<'j', DIM> j;
FTensor::Index<'k', DIM> k;
FTensor::Index<'l', DIM> l;
// const size_t nb_gauss_pts = matGradPtr->size2();
const size_t nb_gauss_pts = DomainEleOp::getGaussPts().size2();
auto t_D = getFTensor4DdgFromMat<DIM, DIM, S>(*matDPtr);
auto t_logC = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matLogC);
auto t_logCPlastic = getFTensor2SymmetricFromMat<DIM>(*matLogCPlastic);
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
commonDataPtr->matHenckyStress.resize(size_symm, nb_gauss_pts, false);
auto t_T = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matHenckyStress);
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
t_T(i, j) = t_D(i, j, k, l) * (t_logC(k, l) - t_logCPlastic(k, l));
t_T(i, j) /= scaleStress;
++t_logC;
++t_T;
++t_D;
++t_logCPlastic;
}
}
private:
boost::shared_ptr<CommonData> commonDataPtr;
boost::shared_ptr<MatrixDouble> matDPtr;
boost::shared_ptr<MatrixDouble> matLogCPlastic;
const double scaleStress;
};
template <int DIM, typename DomainEleOp, int S>
struct OpCalculateHenckyThermoPlasticStressImpl<DIM, GAUSS, DomainEleOp, S>
: public DomainEleOp {
OpCalculateHenckyThermoPlasticStressImpl(
const std::string field_name, boost::shared_ptr<VectorDouble> temperature,
boost::shared_ptr<CommonData> common_data,
boost::shared_ptr<VectorDouble> coeff_expansion_ptr,
boost::shared_ptr<double> ref_temp_ptr)
: DomainEleOp(field_name, DomainEleOp::OPROW), tempPtr(temperature),
commonDataPtr(common_data), coeffExpansionPtr(coeff_expansion_ptr),
refTempPtr(ref_temp_ptr) {
std::fill(&DomainEleOp::doEntities[MBEDGE],
&DomainEleOp::doEntities[MBMAXTYPE], false);
matLogCPlastic = commonDataPtr->matLogCPlastic;
}
MoFEMErrorCode doWork(int side, EntityType type, EntData &data) {
FTensor::Index<'i', DIM> i;
FTensor::Index<'j', DIM> j;
FTensor::Index<'k', DIM> k;
FTensor::Index<'l', DIM> l;
// const size_t nb_gauss_pts = matGradPtr->size2();
const size_t nb_gauss_pts = DomainEleOp::getGaussPts().size2();
auto t_D = getFTensor4DdgFromMat<DIM, DIM, S>(*commonDataPtr->matDPtr);
auto t_logC = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matLogC);
auto t_logC_dC = getFTensor4DdgFromMat<DIM, DIM>(commonDataPtr->matLogCdC);
auto t_logCPlastic = getFTensor2SymmetricFromMat<DIM>(*matLogCPlastic);
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
commonDataPtr->matHenckyStress.resize(size_symm, nb_gauss_pts, false);
commonDataPtr->matFirstPiolaStress.resize(DIM * DIM, nb_gauss_pts, false);
commonDataPtr->matSecondPiolaStress.resize(size_symm, nb_gauss_pts, false);
auto t_T = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matHenckyStress);
auto t_P = getFTensor2FromMat<DIM, DIM>(commonDataPtr->matFirstPiolaStress);
auto t_S =
getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matSecondPiolaStress);
auto t_grad = getFTensor2FromMat<DIM, DIM>(*(commonDataPtr->matGradPtr));
auto t_temp = getFTensor0FromVec(*tempPtr);
t_coeff_exp(i, j) = 0;
for (auto d = 0; d != SPACE_DIM; ++d) {
t_coeff_exp(d, d) = (*coeffExpansionPtr)[d];
}
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
#ifdef HENCKY_SMALL_STRAIN
t_P(i, j) =
t_D(i, j, k, l) * (t_grad(k, l) - t_logCPlastic(k, l) -
t_coeff_exp(k, l) * (t_temp - (*refTempPtr)));
#else
t_T(i, j) =
t_D(i, j, k, l) * (t_logC(k, l) - t_logCPlastic(k, l) -
t_coeff_exp(k, l) * (t_temp - (*refTempPtr)));
t_F(i, j) = t_grad(i, j) + t_kd(i, j);
t_S(k, l) = t_T(i, j) * t_logC_dC(i, j, k, l);
t_P(i, l) = t_F(i, k) * t_S(k, l);
#endif
++t_grad;
++t_logC;
++t_P;
++t_T;
++t_S;
++t_D;
++t_temp;
++t_logCPlastic;
}
}
private:
boost::shared_ptr<CommonData> commonDataPtr;
boost::shared_ptr<VectorDouble> tempPtr;
boost::shared_ptr<MatrixDouble> matLogCPlastic;
boost::shared_ptr<VectorDouble> coeffExpansionPtr;
boost::shared_ptr<double> refTempPtr;
};
template <int DIM, typename DomainEleOp, int S>
struct OpCalculatePiolaStressImpl<DIM, GAUSS, DomainEleOp, S>
: public DomainEleOp {
OpCalculatePiolaStressImpl(const std::string field_name,
boost::shared_ptr<CommonData> common_data)
commonDataPtr(common_data) {
std::fill(&DomainEleOp::doEntities[MBEDGE],
&DomainEleOp::doEntities[MBMAXTYPE], false);
}
MoFEMErrorCode doWork(int side, EntityType type, EntData &data) {
FTensor::Index<'i', DIM> i;
FTensor::Index<'j', DIM> j;
FTensor::Index<'k', DIM> k;
FTensor::Index<'l', DIM> l;
constexpr auto t_kd = FTensor::Kronecker_Delta<int>();
// const size_t nb_gauss_pts = matGradPtr->size2();
const size_t nb_gauss_pts = DomainEleOp::getGaussPts().size2();
#ifdef HENCKY_SMALL_STRAIN
auto t_D = getFTensor4DdgFromMat<DIM, DIM, S>(*commonDataPtr->matDPtr);
#endif
auto t_logC = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matLogC);
auto t_logC_dC = getFTensor4DdgFromMat<DIM, DIM>(commonDataPtr->matLogCdC);
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
commonDataPtr->matFirstPiolaStress.resize(DIM * DIM, nb_gauss_pts, false);
commonDataPtr->matSecondPiolaStress.resize(size_symm, nb_gauss_pts, false);
auto t_P = getFTensor2FromMat<DIM, DIM>(commonDataPtr->matFirstPiolaStress);
auto t_T = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matHenckyStress);
auto t_S =
getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matSecondPiolaStress);
auto t_grad = getFTensor2FromMat<DIM, DIM>(*(commonDataPtr->matGradPtr));
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
#ifdef HENCKY_SMALL_STRAIN
t_P(i, j) = t_D(i, j, k, l) * t_grad(k, l);
#else
t_F(i, j) = t_grad(i, j) + t_kd(i, j);
t_S(k, l) = t_T(i, j) * t_logC_dC(i, j, k, l);
t_P(i, l) = t_F(i, k) * t_S(k, l);
#endif
++t_grad;
++t_logC;
++t_logC_dC;
++t_P;
++t_T;
++t_S;
#ifdef HENCKY_SMALL_STRAIN
++t_D;
#endif
}
}
private:
boost::shared_ptr<CommonData> commonDataPtr;
};
template <int DIM, typename DomainEleOp, int S>
struct OpHenckyTangentImpl<DIM, GAUSS, DomainEleOp, S> : public DomainEleOp {
OpHenckyTangentImpl(const std::string field_name,
boost::shared_ptr<CommonData> common_data,
boost::shared_ptr<MatrixDouble> mat_D_ptr = nullptr)
commonDataPtr(common_data) {
std::fill(&DomainEleOp::doEntities[MBEDGE],
&DomainEleOp::doEntities[MBMAXTYPE], false);
if (mat_D_ptr)
matDPtr = mat_D_ptr;
else
matDPtr = commonDataPtr->matDPtr;
}
MoFEMErrorCode doWork(int side, EntityType type, EntData &data) {
FTensor::Index<'i', DIM> i;
FTensor::Index<'j', DIM> j;
FTensor::Index<'k', DIM> k;
FTensor::Index<'l', DIM> l;
FTensor::Index<'m', DIM> m;
FTensor::Index<'n', DIM> n;
FTensor::Index<'o', DIM> o;
FTensor::Index<'p', DIM> p;
// const size_t nb_gauss_pts = matGradPtr->size2();
const size_t nb_gauss_pts = DomainEleOp::getGaussPts().size2();
commonDataPtr->matTangent.resize(DIM * DIM * DIM * DIM, nb_gauss_pts);
auto dP_dF =
getFTensor4FromMat<DIM, DIM, DIM, DIM, 1>(commonDataPtr->matTangent);
auto t_D = getFTensor4DdgFromMat<DIM, DIM, S>(*matDPtr);
auto t_eig_val = getFTensor1FromMat<DIM>(commonDataPtr->matEigVal);
auto t_eig_vec = getFTensor2FromMat<DIM, DIM>(commonDataPtr->matEigVec);
auto t_T = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matHenckyStress);
auto t_S =
getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matSecondPiolaStress);
auto t_grad = getFTensor2FromMat<DIM, DIM>(*(commonDataPtr->matGradPtr));
auto t_logC_dC = getFTensor4DdgFromMat<DIM, DIM>(commonDataPtr->matLogCdC);
commonDataPtr->matCdF.resize(DIM * DIM * DIM * DIM, nb_gauss_pts);
auto dC_dF =
getFTensor4FromMat<DIM, DIM, DIM, DIM, 1>(commonDataPtr->matCdF);
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
#ifdef HENCKY_SMALL_STRAIN
dP_dF(i, j, k, l) = t_D(i, j, k, l);
#else
t_F(i, j) = t_grad(i, j) + t_kd(i, j);
// rare case when two eigen values are equal
auto nb_uniq = get_uniq_nb<DIM>(&t_eig_val(0));
dC_dF(i, j, k, l) = (t_kd(i, l) * t_F(k, j)) + (t_kd(j, l) * t_F(k, i));
auto TL = EigenMatrix::getDiffDiffMat(t_eig_val, t_eig_vec, f, d_f, dd_f,
t_T, nb_uniq);
TL(i, j, k, l) *= 4;
P_D_P_plus_TL(i, j, k, l) =
TL(i, j, k, l) +
(t_logC_dC(i, j, o, p) * t_D(o, p, m, n)) * t_logC_dC(m, n, k, l);
P_D_P_plus_TL(i, j, k, l) *= 0.5;
dP_dF(i, j, m, n) = t_kd(i, m) * (t_kd(k, n) * t_S(k, j));
dP_dF(i, j, m, n) +=
t_F(i, k) * (P_D_P_plus_TL(k, j, o, p) * dC_dF(o, p, m, n));
#endif
++dP_dF;
++t_grad;
++t_eig_val;
++t_eig_vec;
++t_logC_dC;
++t_S;
++t_T;
++t_D;
++dC_dF;
}
}
private:
boost::shared_ptr<CommonData> commonDataPtr;
boost::shared_ptr<MatrixDouble> matDPtr;
};
template <int DIM, typename AssemblyDomainEleOp, int S>
struct OpCalculateHenckyThermalStressdTImpl<DIM, GAUSS, AssemblyDomainEleOp, S>
OpCalculateHenckyThermalStressdTImpl(
const std::string row_field_name, const std::string col_field_name,
boost::shared_ptr<CommonData> elastic_common_data_ptr,
boost::shared_ptr<VectorDouble> coeff_expansion_ptr);
MoFEMErrorCode iNtegrate(EntitiesFieldData::EntData &row_data,
EntitiesFieldData::EntData &col_data);
private:
boost::shared_ptr<CommonData> elasticCommonDataPtr;
boost::shared_ptr<VectorDouble> coeffExpansionPtr;
};
template <int DIM, typename AssemblyDomainEleOp, int S>
OpCalculateHenckyThermalStressdTImpl<DIM, GAUSS, AssemblyDomainEleOp, S>::
OpCalculateHenckyThermalStressdTImpl(
const std::string row_field_name, const std::string col_field_name,
boost::shared_ptr<CommonData> elastic_common_data_ptr,
boost::shared_ptr<VectorDouble> coeff_expansion_ptr)
: AssemblyDomainEleOp(row_field_name, col_field_name,
AssemblyDomainEleOp::OPROWCOL),
elasticCommonDataPtr(elastic_common_data_ptr),
coeffExpansionPtr(coeff_expansion_ptr) {
this->sYmm = false;
}
template <int DIM, typename AssemblyDomainEleOp, int S>
OpCalculateHenckyThermalStressdTImpl<DIM, GAUSS, AssemblyDomainEleOp, S>::
iNtegrate(EntitiesFieldData::EntData &row_data,
EntitiesFieldData::EntData &col_data) {
auto &locMat = AssemblyDomainEleOp::locMat;
const auto nb_integration_pts = row_data.getN().size1();
const auto nb_row_base_functions = row_data.getN().size2();
auto t_w = this->getFTensor0IntegrationWeight();
auto t_row_diff_base = row_data.getFTensor1DiffN<DIM>();
auto t_D = getFTensor4DdgFromMat<DIM, DIM, S>(*elasticCommonDataPtr->matDPtr);
auto t_grad =
getFTensor2FromMat<DIM, DIM>(*(elasticCommonDataPtr->matGradPtr));
auto t_logC_dC =
getFTensor4DdgFromMat<DIM, DIM>(elasticCommonDataPtr->matLogCdC);
FTensor::Index<'i', DIM> i;
FTensor::Index<'j', DIM> j;
FTensor::Index<'k', DIM> k;
FTensor::Index<'l', DIM> l;
FTensor::Index<'m', DIM> m;
FTensor::Index<'n', DIM> n;
FTensor::Index<'o', DIM> o;
t_coeff_exp(i, j) = 0;
for (auto d = 0; d != SPACE_DIM; ++d) {
t_coeff_exp(d, d) = (*coeffExpansionPtr)[d];
}
t_eigen_strain(i, j) = t_D(i, j, k, l) * t_coeff_exp(k, l);
for (auto gg = 0; gg != nb_integration_pts; ++gg) {
t_F(i, j) = t_grad(i, j) + t_kd(i, j);
double alpha = this->getMeasure() * t_w;
auto rr = 0;
for (; rr != AssemblyDomainEleOp::nbRows / DIM; ++rr) {
auto t_mat = getFTensor1FromMat<DIM, 1>(locMat, rr * DIM);
auto t_col_base = col_data.getFTensor0N(gg, 0);
for (auto cc = 0; cc != AssemblyDomainEleOp::nbCols; cc++) {
#ifdef HENCKY_SMALL_STRAIN
t_mat(i) -=
(t_row_diff_base(j) * t_eigen_strain(i, j)) * (t_col_base * alpha);
#else
t_mat(i) -= (t_row_diff_base(j) *
(t_F(i, o) * ((t_D(m, n, k, l) * t_coeff_exp(k, l)) *
t_logC_dC(m, n, o, j)))) *
(t_col_base * alpha);
#endif
++t_mat;
++t_col_base;
}
++t_row_diff_base;
}
for (; rr != nb_row_base_functions; ++rr)
++t_row_diff_base;
++t_w;
++t_grad;
++t_logC_dC;
++t_D;
}
}
template <typename DomainEleOp> struct HenckyIntegrators {
template <int DIM, IntegrationType I>
using OpCalculateEigenVals = OpCalculateEigenValsImpl<DIM, I, DomainEleOp>;
template <int DIM, IntegrationType I>
using OpCalculateLogC = OpCalculateLogCImpl<DIM, I, DomainEleOp>;
template <int DIM, IntegrationType I>
using OpCalculateLogC_dC = OpCalculateLogC_dCImpl<DIM, I, DomainEleOp>;
template <int DIM, IntegrationType I, int S>
using OpCalculateHenckyStress =
OpCalculateHenckyStressImpl<DIM, I, DomainEleOp, S>;
template <int DIM, IntegrationType I, int S>
using OpCalculateHenckyThermalStress =
OpCalculateHenckyThermalStressImpl<DIM, I, DomainEleOp, S>;
template <int DIM, IntegrationType I, int S>
using OpCalculateHenckyPlasticStress =
OpCalculateHenckyPlasticStressImpl<DIM, I, DomainEleOp, S>;
template <int DIM, IntegrationType I, int S>
using OpCalculateHenckyThermoPlasticStress =
OpCalculateHenckyThermoPlasticStressImpl<DIM, I, DomainEleOp, S>;
template <int DIM, IntegrationType I, int S>
using OpCalculatePiolaStress =
OpCalculatePiolaStressImpl<DIM, I, DomainEleOp, S>;
template <int DIM, IntegrationType I, int S>
using OpHenckyTangent = OpHenckyTangentImpl<DIM, GAUSS, DomainEleOp, S>;
template <int DIM, IntegrationType I, typename AssemblyDomainEleOp, int S>
using OpCalculateHenckyThermalStressdT =
OpCalculateHenckyThermalStressdTImpl<DIM, I, AssemblyDomainEleOp, S>;
};
template <int DIM>
boost::ptr_deque<ForcesAndSourcesCore::UserDataOperator> &pip,
std::string block_name,
boost::shared_ptr<MatrixDouble> mat_D_Ptr, Sev sev,
double scale = 1) {
PetscBool plane_strain_flag = PETSC_FALSE;
CHKERR PetscOptionsGetBool(PETSC_NULLPTR, "", "-plane_strain",
&plane_strain_flag, PETSC_NULLPTR);
struct OpMatBlocks : public DomainEleOp {
OpMatBlocks(boost::shared_ptr<MatrixDouble> m, double bulk_modulus_K,
double shear_modulus_G, MoFEM::Interface &m_field, Sev sev,
std::vector<const CubitMeshSets *> meshset_vec_ptr,
double scale, PetscBool plane_strain_flag)
: DomainEleOp(NOSPACE, DomainEleOp::OPSPACE), matDPtr(m),
bulkModulusKDefault(bulk_modulus_K),
shearModulusGDefault(shear_modulus_G), scaleYoungModulus(scale),
planeStrainFlag(plane_strain_flag) {
CHK_THROW_MESSAGE(extractBlockData(m_field, meshset_vec_ptr, sev),
"Can not get data from block");
}
MoFEMErrorCode doWork(int side, EntityType type,
EntitiesFieldData::EntData &data) {
for (auto &b : blockData) {
if (b.blockEnts.find(getFEEntityHandle()) != b.blockEnts.end()) {
CHKERR getMatDPtr(matDPtr, b.bulkModulusK * scaleYoungModulus,
b.shearModulusG * scaleYoungModulus,
planeStrainFlag);
}
}
CHKERR getMatDPtr(matDPtr, bulkModulusKDefault * scaleYoungModulus,
shearModulusGDefault * scaleYoungModulus,
planeStrainFlag);
}
private:
boost::shared_ptr<MatrixDouble> matDPtr;
const double scaleYoungModulus;
const PetscBool planeStrainFlag;
struct BlockData {
double bulkModulusK;
double shearModulusG;
Range blockEnts;
};
double bulkModulusKDefault;
double shearModulusGDefault;
std::vector<BlockData> blockData;
extractBlockData(MoFEM::Interface &m_field,
std::vector<const CubitMeshSets *> meshset_vec_ptr,
Sev sev) {
for (auto m : meshset_vec_ptr) {
MOFEM_TAG_AND_LOG("WORLD", sev, "MatBlock") << *m;
std::vector<double> block_data;
CHKERR m->getAttributes(block_data);
if (block_data.size() < 2) {
SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
"Expected that block has two attribute");
}
auto get_block_ents = [&]() {
Range ents;
m_field.get_moab().get_entities_by_handle(m->meshset, ents, true);
return ents;
};
double young_modulus = block_data[0];
double poisson_ratio = block_data[1];
double bulk_modulus_K = young_modulus / (3 * (1 - 2 * poisson_ratio));
MOFEM_TAG_AND_LOG("WORLD", sev, "MatBlock")
<< "E = " << young_modulus << " nu = " << poisson_ratio;
blockData.push_back(
{bulk_modulus_K, shear_modulus_G, get_block_ents()});
}
}
MoFEMErrorCode getMatDPtr(boost::shared_ptr<MatrixDouble> mat_D_ptr,
PetscBool is_plane_strain) {
//! [Calculate elasticity tensor]
auto set_material_stiffness = [&]() {
FTensor::Index<'i', DIM> i;
FTensor::Index<'j', DIM> j;
FTensor::Index<'k', DIM> k;
FTensor::Index<'l', DIM> l;
double A = (SPACE_DIM == 2 && !is_plane_strain)
: 1;
auto t_D = getFTensor4DdgFromMat<DIM, DIM, 0>(*mat_D_ptr);
t_D(i, j, k, l) =
2 * shear_modulus_G * ((t_kd(i, k) ^ t_kd(j, l)) / 4.) +
A * (bulk_modulus_K - (2. / 3.) * shear_modulus_G) * t_kd(i, j) *
t_kd(k, l);
};
//! [Calculate elasticity tensor]
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
mat_D_ptr->resize(size_symm * size_symm, 1);
set_material_stiffness();
}
};
double E = young_modulus;
double nu = poisson_ratio;
PetscOptionsBegin(PETSC_COMM_WORLD, "", "", "none");
CHKERR PetscOptionsScalar("-young_modulus", "Young modulus", "", E, &E,
PETSC_NULLPTR);
CHKERR PetscOptionsScalar("-poisson_ratio", "poisson ratio", "", nu, &nu,
PETSC_NULLPTR);
PetscOptionsEnd();
double bulk_modulus_K = E / (3 * (1 - 2 * nu));
double shear_modulus_G = E / (2 * (1 + nu));
pip.push_back(new OpMatBlocks(
mat_D_Ptr, bulk_modulus_K, shear_modulus_G, m_field, sev,
// Get blockset using regular expression
m_field.getInterface<MeshsetsManager>()->getCubitMeshsetPtr(std::regex(
(boost::format("%s(.*)") % block_name).str()
)),
scale, plane_strain_flag
));
}
template <int DIM, IntegrationType I, typename DomainEleOp>
MoFEM::Interface &m_field,
boost::ptr_deque<ForcesAndSourcesCore::UserDataOperator> &pip,
std::string field_name, std::string block_name, Sev sev, double scale = 1) {
auto common_ptr = boost::make_shared<HenckyOps::CommonData>();
common_ptr->matDPtr = boost::make_shared<MatrixDouble>();
common_ptr->matGradPtr = boost::make_shared<MatrixDouble>();
CHK_THROW_MESSAGE(addMatBlockOps<DIM>(m_field, pip, block_name,
common_ptr->matDPtr, sev, scale),
"addMatBlockOps");
using H = HenckyIntegrators<DomainEleOp>;
pip.push_back(new OpCalculateVectorFieldGradient<DIM, DIM>(
field_name, common_ptr->matGradPtr));
pip.push_back(new typename H::template OpCalculateEigenVals<DIM, I>(
field_name, common_ptr));
pip.push_back(
new typename H::template OpCalculateLogC<DIM, I>(field_name, common_ptr));
pip.push_back(new typename H::template OpCalculateLogC_dC<DIM, I>(
field_name, common_ptr));
// Assumes constant D matrix per entity
pip.push_back(new typename H::template OpCalculateHenckyStress<DIM, I, 0>(
field_name, common_ptr));
pip.push_back(new typename H::template OpCalculatePiolaStress<DIM, I, 0>(
field_name, common_ptr));
return common_ptr;
}
template <int DIM, AssemblyType A, IntegrationType I, typename DomainEleOp>
MoFEM::Interface &m_field,
boost::ptr_deque<ForcesAndSourcesCore::UserDataOperator> &pip,
std::string field_name, boost::shared_ptr<HenckyOps::CommonData> common_ptr,
Sev sev) {
using B = typename FormsIntegrators<DomainEleOp>::template Assembly<
A>::template LinearForm<I>;
typename B::template OpGradTimesTensor<1, DIM, DIM>;
pip.push_back(
new OpInternalForcePiola("U", common_ptr->getMatFirstPiolaStress()));
}
template <int DIM, AssemblyType A, IntegrationType I, typename DomainEleOp>
MoFEM::Interface &m_field,
boost::ptr_deque<ForcesAndSourcesCore::UserDataOperator> &pip,
std::string field_name, std::string block_name, Sev sev, double scale = 1) {
auto common_ptr = commonDataFactory<DIM, I, DomainEleOp>(
m_field, pip, field_name, block_name, sev, scale);
CHKERR opFactoryDomainRhs<DIM, A, I, DomainEleOp>(m_field, pip, field_name,
common_ptr, sev);
}
template <int DIM, AssemblyType A, IntegrationType I, typename DomainEleOp>
MoFEM::Interface &m_field,
boost::ptr_deque<ForcesAndSourcesCore::UserDataOperator> &pip,
std::string field_name, boost::shared_ptr<HenckyOps::CommonData> common_ptr,
Sev sev) {
using B = typename FormsIntegrators<DomainEleOp>::template Assembly<
A>::template BiLinearForm<I>;
using OpKPiola = typename B::template OpGradTensorGrad<1, DIM, DIM, 1>;
using H = HenckyIntegrators<DomainEleOp>;
// Assumes constant D matrix per entity
pip.push_back(new typename H::template OpHenckyTangent<DIM, I, 0>(
field_name, common_ptr));
pip.push_back(
new OpKPiola(field_name, field_name, common_ptr->getMatTangent()));
}
template <int DIM, AssemblyType A, IntegrationType I, typename DomainEleOp>
MoFEM::Interface &m_field,
boost::ptr_deque<ForcesAndSourcesCore::UserDataOperator> &pip,
std::string field_name, std::string block_name, Sev sev, double scale = 1) {
auto common_ptr = commonDataFactory<DIM, I, DomainEleOp>(
m_field, pip, field_name, block_name, sev, scale);
CHKERR opFactoryDomainLhs<DIM, A, I, DomainEleOp>(m_field, pip, field_name,
common_ptr, sev);
}
} // namespace HenckyOps
#endif // __HENCKY_OPS_HPP__
PetscBool is_plane_strain
Definition adjoint.cpp:42
constexpr double a
Kronecker Delta class symmetric.
Kronecker Delta class.
#define CHK_THROW_MESSAGE(err, msg)
Check and throw MoFEM exception.
@ NOSPACE
Definition definitions.h:83
@ MOFEM_DATA_INCONSISTENCY
Definition definitions.h:31
double bulk_modulus_K
double shear_modulus_G
@ F
constexpr auto t_kd
@ GAUSS
Gaussian quadrature integration.
FTensor::Index< 'i', SPACE_DIM > i
const double v
phase velocity of light in medium (cm/ns)
const double n
refractive index of diffusive medium
FTensor::Index< 'l', 3 > l
FTensor::Index< 'j', 3 > j
FTensor::Index< 'k', 3 > k
auto getMat(A &&t_val, B &&t_vec, Fun< double > f)
Get the Mat object.
auto getDiffMat(A &&t_val, B &&t_vec, Fun< double > f, Fun< double > d_f, const int nb)
Get the Diff Mat object.
auto getDiffDiffMat(A &&t_val, B &&t_vec, Fun< double > f, Fun< double > d_f, Fun< double > dd_f, C &&t_S, const int nb)
Get the Diff Diff Mat object.
auto get_uniq_nb(double *ptr)
Definition HenckyOps.hpp:32
auto sort_eigen_vals(FTensor::Tensor1< double, DIM > &eig, FTensor::Tensor2< double, DIM, DIM > &eigen_vec)
Definition HenckyOps.hpp:41
constexpr double eps
Definition HenckyOps.hpp:13
auto is_eq(const double &a, const double &b)
Definition HenckyOps.hpp:28
auto commonDataFactory(MoFEM::Interface &m_field, boost::ptr_deque< ForcesAndSourcesCore::UserDataOperator > &pip, std::string field_name, std::string block_name, Sev sev, double scale=1)
Definition HookeOps.hpp:215
UBlasMatrix< double > MatrixDouble
Definition Types.hpp:77
MoFEMErrorCode computeEigenValuesSymmetric(const MatrixDouble &mat, VectorDouble &eig, MatrixDouble &eigen_vec)
compute eigenvalues of a symmetric matrix using lapack dsyev
MoFEMErrorCode opFactoryDomainRhs(MoFEM::Interface &m_field, std::string block_name, Pip &pip, std::string u, std::string ep, std::string tau)
MoFEMErrorCode addMatBlockOps(MoFEM::Interface &m_field, std::string block_name, Pip &pip, boost::shared_ptr< MatrixDouble > mat_D_Ptr, boost::shared_ptr< CommonData::BlockParams > mat_params_ptr, double scale_value, Sev sev)
MoFEMErrorCode opFactoryDomainLhs(MoFEM::Interface &m_field, std::string block_name, Pip &pip, std::string u, std::string ep, std::string tau)
FormsIntegrators< DomainEleOp >::Assembly< A >::LinearForm< I >::OpGradTimesTensor< 1, FIELD_DIM, SPACE_DIM > OpGradTimesTensor
constexpr AssemblyType A
[Define dimension]
constexpr auto field_name
FormsIntegrators< DomainEleOp >::Assembly< PETSC >::LinearForm< GAUSS >::OpGradTimesTensor< 1, SPACE_DIM, SPACE_DIM > OpInternalForcePiola
Definition seepage.cpp:65
FormsIntegrators< DomainEleOp >::Assembly< PETSC >::BiLinearForm< GAUSS >::OpGradTensorGrad< 1, SPACE_DIM, SPACE_DIM, 1 > OpKPiola
[Only used for dynamics]
Definition seepage.cpp:63
FTensor::Index< 'm', 3 > m
MatrixDouble matEigVec
Definition HenckyOps.hpp:86
boost::shared_ptr< MatrixDouble > matLogCPlastic
Definition HenckyOps.hpp:83
MatrixDouble matCdF
Definition HenckyOps.hpp:89
MatrixDouble matHenckyStress
Definition HenckyOps.hpp:91
MatrixDouble matLogCdC
Definition HenckyOps.hpp:88
MatrixDouble matEigVal
Definition HenckyOps.hpp:85
MatrixDouble matFirstPiolaStress
Definition HenckyOps.hpp:89
boost::shared_ptr< MatrixDouble > matDPtr
Definition HenckyOps.hpp:82
MatrixDouble matSecondPiolaStress
Definition HenckyOps.hpp:90
MatrixDouble matLogC
Definition HenckyOps.hpp:87
MatrixDouble matTangent
Definition HenckyOps.hpp:92
boost::shared_ptr< MatrixDouble > matGradPtr
Definition HenckyOps.hpp:81
static double absMax
Definition HenckyOps.hpp:23
static auto check(const double &a, const double &b)
Definition HenckyOps.hpp:20
virtual moab::Interface & get_moab()=0
double young_modulus
Young modulus.
Definition plastic.cpp:125
double poisson_ratio
Poisson ratio.
Definition plastic.cpp:126
double scale
Definition plastic.cpp:123
constexpr auto size_symm
Definition plastic.cpp:42
double H
Hardening.
Definition plastic.cpp:128