DomainNaturalBC::OpFlux<NaturalMeshsetType<BLOCKSET>, 1,
SPACE_DIM>;
using OpForce = BoundaryNaturalBC::OpFlux<NaturalForceMeshsets, 1, SPACE_DIM>;
};
};
#include <HenckyOps.hpp>
private:
};
}
char meshFileName[255];
meshFileName, 255, PETSC_NULL);
}
enum bases { AINSWORTH, DEMKOWICZ, LASBASETOPT };
const char *list_bases[LASBASETOPT] = {"ainsworth", "demkowicz"};
PetscInt choice_base_value = AINSWORTH;
LASBASETOPT, &choice_base_value, PETSC_NULL);
switch (choice_base_value) {
case AINSWORTH:
<< "Set AINSWORTH_LEGENDRE_BASE for displacements";
break;
case DEMKOWICZ:
<< "Set DEMKOWICZ_JACOBI_BASE for displacements";
break;
default:
break;
}
}
auto time_scale = boost::make_shared<TimeScale>();
};
CHKERR BoundaryNaturalBC::AddFluxToPipeline<OpForce>::add(
pipeline_mng->getOpBoundaryRhsPipeline(),
mField,
"U", {time_scale},
"FORCE", Sev::inform);
CHKERR DomainNaturalBC::AddFluxToPipeline<OpBodyForce>::add(
pipeline_mng->getOpDomainRhsPipeline(),
mField,
"U", {time_scale},
"BODY_FORCE", Sev::inform);
CHKERR bc_mng->removeBlockDOFsOnEntities(
simple->getProblemName(),
"REMOVE_X",
"U", 0, 0);
CHKERR bc_mng->removeBlockDOFsOnEntities(
simple->getProblemName(),
"REMOVE_Y",
"U", 1, 1);
CHKERR bc_mng->removeBlockDOFsOnEntities(
simple->getProblemName(),
"REMOVE_Z",
"U", 2, 2);
simple->getProblemName(),
"U");
CHKERR bc_mng->addBlockDOFsToMPCs(
simple->getProblemName(),
"U");
}
auto add_domain_ops_lhs = [&](auto &pip) {
CHKERR HenckyOps::opFactoryDomainLhs<SPACE_DIM, PETSC, GAUSS, DomainEleOp>(
mField, pip,
"U",
"MAT_ELASTIC", Sev::inform);
};
auto add_domain_ops_rhs = [&](auto &pip) {
CHKERR HenckyOps::opFactoryDomainRhs<SPACE_DIM, PETSC, GAUSS, DomainEleOp>(
mField, pip,
"U",
"MAT_ELASTIC", Sev::inform);
};
CHKERR add_domain_ops_lhs(pipeline_mng->getOpDomainLhsPipeline());
CHKERR add_domain_ops_rhs(pipeline_mng->getOpDomainRhsPipeline());
}
"out_step_" + boost::lexical_cast<std::string>(
ts_step) +
".h5m");
}
}
private:
boost::shared_ptr<PostProcEleBdy>
postProc;
};
auto ts = pipeline_mng->createTSIM();
auto create_post_proc_fe = [dm,
this,
simple]() {
auto post_proc_ele_domain = [dm, this](auto &pip_domain) {
auto common_ptr = commonDataFactory<SPACE_DIM, GAUSS, DomainEleOp>(
mField, pip_domain,
"U",
"MAT_ELASTIC", Sev::inform);
return common_ptr;
};
auto post_proc_fe_bdy = boost::make_shared<PostProcEleBdy>(
mField);
auto u_ptr = boost::make_shared<MatrixDouble>();
post_proc_fe_bdy->getOpPtrVector().push_back(
auto op_loop_side =
auto common_ptr = post_proc_ele_domain(op_loop_side->getOpPtrVector());
post_proc_fe_bdy->getOpPtrVector().push_back(op_loop_side);
post_proc_fe_bdy->getOpPtrVector().push_back(
post_proc_fe_bdy->getPostProcMesh(),
post_proc_fe_bdy->getMapGaussPts(),
{},
{{"U", u_ptr}},
{{"GRAD", common_ptr->matGradPtr},
{"FIRST_PIOLA", common_ptr->getMatFirstPiolaStress()}},
{}
)
);
return post_proc_fe_bdy;
};
auto add_extra_finite_elements_to_ksp_solver_pipelines = [&]() {
auto pre_proc_ptr = boost::make_shared<FEMethod>();
auto post_proc_rhs_ptr = boost::make_shared<FEMethod>();
auto post_proc_lhs_ptr = boost::make_shared<FEMethod>();
auto time_scale = boost::make_shared<TimeScale>();
auto get_bc_hook_rhs = [this, pre_proc_ptr, time_scale]() {
{time_scale}, false)();
};
pre_proc_ptr->preProcessHook = get_bc_hook_rhs;
auto get_post_proc_hook_rhs = [this, post_proc_rhs_ptr]() {
mField, post_proc_rhs_ptr, nullptr, Sev::verbose)();
mField, post_proc_rhs_ptr, 1.)();
};
auto get_post_proc_hook_lhs = [this, post_proc_lhs_ptr]() {
mField, post_proc_lhs_ptr, 1.)();
};
post_proc_rhs_ptr->postProcessHook = get_post_proc_hook_rhs;
post_proc_lhs_ptr->postProcessHook = get_post_proc_hook_lhs;
ts_ctx_ptr->getPreProcessIFunction().push_front(pre_proc_ptr);
ts_ctx_ptr->getPreProcessIJacobian().push_front(pre_proc_ptr);
ts_ctx_ptr->getPostProcessIFunction().push_back(post_proc_rhs_ptr);
ts_ctx_ptr->getPostProcessIJacobian().push_back(post_proc_lhs_ptr);
};
CHKERR add_extra_finite_elements_to_ksp_solver_pipelines();
auto create_monitor_fe = [dm](auto &&post_proc_fe) {
return boost::make_shared<Monitor>(dm, post_proc_fe);
};
boost::shared_ptr<FEMethod> null_fe;
auto monitor_ptr = create_monitor_fe(create_post_proc_fe());
null_fe, monitor_ptr);
double ftime = 1;
CHKERR TSSetDuration(ts, PETSC_DEFAULT, ftime);
CHKERR TSSetExactFinalTime(ts, TS_EXACTFINALTIME_MATCHSTEP);
PetscInt steps, snesfails, rejects, nonlinits, linits;
CHKERR TSGetStepNumber(ts, &steps);
CHKERR TSGetSNESFailures(ts, &snesfails);
CHKERR TSGetStepRejections(ts, &rejects);
CHKERR TSGetSNESIterations(ts, &nonlinits);
CHKERR TSGetKSPIterations(ts, &linits);
"steps %d (%d rejected, %d SNES fails), ftime %g, nonlinits "
"%d, linits %d",
steps, rejects, snesfails, ftime, nonlinits, linits);
}
SCATTER_FORWARD);
double nrm2;
CHKERR VecNorm(T, NORM_2, &nrm2);
MOFEM_LOG(
"EXAMPLE", Sev::inform) <<
"Solution norm " << nrm2;
auto post_proc_norm_fe = boost::make_shared<DomainEle>(
mField);
auto post_proc_norm_rule_hook = [](
int,
int,
int p) ->
int {
return 2 * p; };
post_proc_norm_fe->getRuleHook = post_proc_norm_rule_hook;
post_proc_norm_fe->getOpPtrVector(), {H1});
enum NORMS { U_NORM_L2 = 0, PIOLA_NORM, LAST_NORM };
auto norms_vec =
CHKERR VecZeroEntries(norms_vec);
auto u_ptr = boost::make_shared<MatrixDouble>();
post_proc_norm_fe->getOpPtrVector().push_back(
post_proc_norm_fe->getOpPtrVector().push_back(
auto common_ptr = commonDataFactory<SPACE_DIM, GAUSS, DomainEleOp>(
mField, post_proc_norm_fe->getOpPtrVector(),
"U",
"MAT_ELASTIC",
Sev::inform);
post_proc_norm_fe->getOpPtrVector().push_back(
common_ptr->getMatFirstPiolaStress(), norms_vec, PIOLA_NORM));
post_proc_norm_fe);
CHKERR VecAssemblyBegin(norms_vec);
CHKERR VecAssemblyEnd(norms_vec);
const double *norms;
CHKERR VecGetArrayRead(norms_vec, &norms);
<< "norm_u: " << std::scientific << std::sqrt(norms[U_NORM_L2]);
<< "norm_piola: " << std::scientific << std::sqrt(norms[PIOLA_NORM]);
CHKERR VecRestoreArrayRead(norms_vec, &norms);
}
}
PetscInt test_nb = 0;
PetscBool test_flg = PETSC_FALSE;
if (test_flg) {
SCATTER_FORWARD);
double nrm2;
CHKERR VecNorm(T, NORM_2, &nrm2);
MOFEM_LOG(
"EXAMPLE", Sev::verbose) <<
"Regression norm " << nrm2;
double regression_value = 0;
switch (test_nb) {
case 1:
regression_value = 1.02789;
break;
case 2:
regression_value = 1.8841e+00;
break;
case 3:
regression_value = 1.8841e+00;
break;
case 4:
regression_value = 4.9625e+00;
break;
case 5:
regression_value = 6.6394e+00;
break;
case 6:
regression_value = 4.98764e+00;
break;
case 7:
regression_value = 4.9473e+00;
break;
case 8:
regression_value = 2.5749e-01;
break;
default:
break;
}
if (fabs(nrm2 - regression_value) > 1e-2)
"Regression test field; wrong norm value. %6.4e != %6.4e", nrm2,
regression_value);
}
}
}
static char help[] =
"...\n\n";
int main(
int argc,
char *argv[]) {
const char param_file[] = "param_file.petsc";
auto core_log = logging::core::get();
core_log->add_sink(
LogManager::createSink(LogManager::getStrmWorld(), "EXAMPLE"));
LogManager::setLog("EXAMPLE");
try {
DMType dm_name = "DMMOFEM";
moab::Core mb_instance;
moab::Interface &moab = mb_instance;
}
}
#define MOFEM_LOG_C(channel, severity, format,...)
#define MOFEM_TAG_AND_LOG(channel, severity, tag)
Tag and log in channel.
void simple(double P1[], double P2[], double P3[], double c[], const int N)
ElementsAndOps< SPACE_DIM >::DomainEle DomainEle
constexpr int SPACE_DIM
[Define dimension]
ElementsAndOps< SPACE_DIM >::BoundaryEle BoundaryEle
#define CATCH_ERRORS
Catch errors.
FieldApproximationBase
approximation base
@ AINSWORTH_LEGENDRE_BASE
Ainsworth Cole (Legendre) approx. base .
#define MoFEMFunctionReturnHot(a)
Last executable line of each PETSc function used for error handling. Replaces return()
#define MoFEMFunctionBegin
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
@ MOFEM_ATOM_TEST_INVALID
#define MoFEMFunctionReturn(a)
Last executable line of each PETSc function used for error handling. Replaces return()
#define CHKERR
Inline error check.
#define MoFEMFunctionBeginHot
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
constexpr int order
Order displacement.
NaturalBC< DomainEleOp >::Assembly< PETSC >::LinearForm< GAUSS > DomainNaturalBC
PostProcEleByDim< SPACE_DIM >::PostProcEleDomain PostProcEleDomain
PostProcEleByDim< SPACE_DIM >::PostProcEleBdy PostProcEleBdy
PetscErrorCode DMoFEMMeshToLocalVector(DM dm, Vec l, InsertMode mode, ScatterMode scatter_mode)
set local (or ghosted) vector values on mesh for partition only
PetscErrorCode DMRegister_MoFEM(const char sname[])
Register MoFEM problem.
PetscErrorCode DMoFEMLoopFiniteElements(DM dm, const char fe_name[], MoFEM::FEMethod *method, CacheTupleWeakPtr cache_ptr=CacheTupleSharedPtr())
Executes FEMethod for finite elements in DM.
auto createDMVector(DM dm)
Get smart vector from DM.
#define MOFEM_LOG(channel, severity)
Log.
#define MOFEM_LOG_TAG(channel, tag)
Tag channel.
#define MOFEM_LOG_CHANNEL(channel)
Set and reset channel.
PetscErrorCode MoFEMErrorCode
MoFEM/PETSc error code.
implementation of Data Operators for Forces and Sources
PetscErrorCode DMMoFEMTSSetMonitor(DM dm, TS ts, const std::string fe_name, boost::shared_ptr< MoFEM::FEMethod > method, boost::shared_ptr< MoFEM::BasicMethod > pre_only, boost::shared_ptr< MoFEM::BasicMethod > post_only)
Set Monitor To TS solver.
auto getDMTsCtx(DM dm)
Get TS context data structure used by DM.
PetscErrorCode PetscOptionsGetInt(PetscOptions *, const char pre[], const char name[], PetscInt *ivalue, PetscBool *set)
auto createVectorMPI(MPI_Comm comm, PetscInt n, PetscInt N)
Create MPI Vector.
PetscErrorCode PetscOptionsGetEList(PetscOptions *, const char pre[], const char name[], const char *const *list, PetscInt next, PetscInt *value, PetscBool *set)
PetscErrorCode PetscOptionsGetString(PetscOptions *, const char pre[], const char name[], char str[], size_t size, PetscBool *set)
OpPostProcMapInMoab< SPACE_DIM, SPACE_DIM > OpPPMap
static constexpr int approx_order
MoFEMErrorCode boundaryCondition()
[Set up problem]
MoFEMErrorCode assembleSystem()
[Push operators to pipeline]
MoFEMErrorCode readMesh()
[Run problem]
FieldApproximationBase base
Choice of finite element basis functions
MoFEMErrorCode checkResults()
[Postprocess results]
MoFEMErrorCode solveSystem()
[Solve]
MoFEMErrorCode runProblem()
[Run problem]
MoFEMErrorCode gettingNorms()
[Solve]
MoFEM::Interface & mField
Reference to MoFEM interface.
MoFEMErrorCode setupProblem()
[Run problem]
MoFEMErrorCode outputResults()
[Solve]
Boundary condition manager for finite element problem setup.
virtual MPI_Comm & get_comm() const =0
virtual int get_comm_rank() const =0
static MoFEMErrorCode Initialize(int *argc, char ***args, const char file[], const char help[])
Initializes the MoFEM database PETSc, MOAB and MPI.
static MoFEMErrorCode Finalize()
Checks for options to be called at the conclusion of the program.
Deprecated interface functions.
Definition of the displacement bc data structure.
Data on single entity (This is passed as argument to DataOperator::doWork)
Class (Function) to enforce essential constrains on the left hand side diagonal.
Class (Function) to enforce essential constrains on the right hand side diagonal.
Class (Function) to calculate residual side diagonal.
Class (Function) to enforce essential constrains.
Get norm of input MatrixDouble for Tensor1.
Get norm of input MatrixDouble for Tensor2.
Specialization for double precision vector field values calculation.
Element used to execute operators on side of the element.
Post post-proc data at points from hash maps.
Template struct for dimension-specific finite element types.
PipelineManager interface.
Simple interface for fast problem set-up.
MoFEMErrorCode getOptions()
get options
intrusive_ptr for managing petsc objects
PetscInt ts_step
Current time step number.
MoFEMErrorCode getInterface(IFACE *&iface) const
Get interface reference to pointer of interface.
[Push operators to pipeline]
MoFEMErrorCode postProcess()
Post-processing function executed at loop completion.
boost::shared_ptr< PostProcEle > postProc
ElementsAndOps< SPACE_DIM >::SideEle SideEle
#define EXECUTABLE_DIMENSION
BoundaryNaturalBC::OpFlux< NaturalForceMeshsets, 1, SPACE_DIM > OpForce
DomainNaturalBCRhs::OpFlux< NaturalMeshsetType< BLOCKSET >, 1, SPACE_DIM > OpBodyForce
NaturalBC< BoundaryEleOp >::Assembly< PETSC >::LinearForm< GAUSS > BoundaryNaturalBC
[Body and heat source]
PostProcBrokenMeshInMoabBase< EdgeElementForcesAndSourcesCore > PostProcEdges
protected:
};
}
}
enum bases { AINSWORTH, DEMKOWICZ, LASBASETOPT };
const char *list_bases[LASBASETOPT] = {"ainsworth", "demkowicz"};
PetscInt choice_base_value = AINSWORTH;
LASBASETOPT, &choice_base_value, PETSC_NULLPTR);
switch (choice_base_value) {
case AINSWORTH:
<< "Set AINSWORTH_LEGENDRE_BASE for displacements";
break;
case DEMKOWICZ:
<< "Set DEMKOWICZ_JACOBI_BASE for displacements";
break;
default:
break;
}
auto project_ho_geometry = [&]() {
Projection10NodeCoordsOnField ent_method(
mField,
"GEOMETRY");
};
}
auto time_scale = boost::make_shared<ExampleTimeScale>();
};
CHKERR BoundaryNaturalBC::AddFluxToPipeline<OpForce>::add(
pipeline_mng->getOpBoundaryRhsPipeline(),
mField,
"U", {time_scale},
"FORCE", Sev::inform);
CHKERR DomainNaturalBC::AddFluxToPipeline<OpBodyForce>::add(
pipeline_mng->getOpDomainRhsPipeline(),
mField,
"U", {time_scale},
"BODY_FORCE", Sev::inform);
CHKERR bc_mng->removeBlockDOFsOnEntities(
simple->getProblemName(),
"REMOVE_X",
"U", 0, 0);
CHKERR bc_mng->removeBlockDOFsOnEntities(
simple->getProblemName(),
"REMOVE_Y",
"U", 1, 1);
CHKERR bc_mng->removeBlockDOFsOnEntities(
simple->getProblemName(),
"REMOVE_Z",
"U", 2, 2);
CHKERR bc_mng->pushMarkDOFsOnEntities<DisplacementCubitBcData>(
simple->getProblemName(),
"U");
}
auto add_domain_ops_lhs = [&](auto &pip) {
"GEOMETRY");
CHKERR HenckyOps::opFactoryDomainLhs<SPACE_DIM, PETSC, GAUSS, DomainEleOp>(
mField, pip,
"U",
"MAT_ELASTIC", Sev::inform);
};
auto add_domain_ops_rhs = [&](auto &pip) {
"GEOMETRY");
CHKERR HenckyOps::opFactoryDomainRhs<SPACE_DIM, PETSC, GAUSS, DomainEleOp>(
mField, pip,
"U",
"MAT_ELASTIC", Sev::inform);
};
CHKERR add_domain_ops_lhs(pipeline_mng->getOpDomainLhsPipeline());
CHKERR add_domain_ops_rhs(pipeline_mng->getOpDomainRhsPipeline());
}
auto ts = pipeline_mng->createTSIM();
PetscBool post_proc_vol;
PetscBool post_proc_skin;
post_proc_vol = PETSC_TRUE;
post_proc_skin = PETSC_FALSE;
} else {
post_proc_vol = PETSC_FALSE;
post_proc_skin = PETSC_TRUE;
}
&post_proc_vol, PETSC_NULLPTR);
&post_proc_skin, PETSC_NULLPTR);
auto create_post_proc_fe = [dm,
this,
simple, post_proc_vol,
post_proc_skin]() {
auto post_proc_ele_domain = [dm, this](auto &pip_domain) {
"GEOMETRY");
auto common_ptr = commonDataFactory<SPACE_DIM, GAUSS, DomainEleOp>(
mField, pip_domain,
"U",
"MAT_ELASTIC", Sev::inform);
return common_ptr;
};
auto post_proc_map = [this](auto &pip, auto u_ptr, auto common_ptr) {
using OpPPMap = OpPostProcMapInMoab<SPACE_DIM, SPACE_DIM>;
pip->getOpPtrVector().push_back(
new OpPPMap(pip->getPostProcMesh(), pip->getMapGaussPts(), {},
{{"U", u_ptr}},
{{"GRAD", common_ptr->matGradPtr},
{"FIRST_PIOLA", common_ptr->getMatFirstPiolaStress()}},
{}));
};
auto push_post_proc_bdy = [dm,
this,
simple, post_proc_skin,
&post_proc_ele_domain,
&post_proc_map](auto &pip_bdy) {
if (post_proc_skin == PETSC_FALSE)
return boost::shared_ptr<PostProcEleBdy>();
auto u_ptr = boost::make_shared<MatrixDouble>();
pip_bdy->getOpPtrVector().push_back(
new OpCalculateVectorFieldValues<SPACE_DIM>("U", u_ptr));
auto op_loop_side =
auto common_ptr = post_proc_ele_domain(op_loop_side->getOpPtrVector());
pip_bdy->getOpPtrVector().push_back(op_loop_side);
CHKERR post_proc_map(pip_bdy, u_ptr, common_ptr);
return pip_bdy;
};
auto push_post_proc_domain = [dm,
this,
simple, post_proc_vol,
&post_proc_ele_domain,
&post_proc_map](auto &pip_domain) {
if (post_proc_vol == PETSC_FALSE)
return boost::shared_ptr<PostProcEleDomain>();
auto u_ptr = boost::make_shared<MatrixDouble>();
pip_domain->getOpPtrVector().push_back(
new OpCalculateVectorFieldValues<SPACE_DIM>("U", u_ptr));
auto common_ptr = post_proc_ele_domain(pip_domain->getOpPtrVector());
CHKERR post_proc_map(pip_domain, u_ptr, common_ptr);
return pip_domain;
};
auto post_proc_fe_domain = boost::make_shared<PostProcEleDomain>(
mField);
auto post_proc_fe_bdy = boost::make_shared<PostProcEleBdy>(
mField);
return std::make_pair(push_post_proc_domain(post_proc_fe_domain),
push_post_proc_bdy(post_proc_fe_bdy));
};
auto add_extra_finite_elements_to_solver_pipelines = [&]() {
auto pre_proc_ptr = boost::make_shared<FEMethod>();
auto post_proc_rhs_ptr = boost::make_shared<FEMethod>();
auto post_proc_lhs_ptr = boost::make_shared<FEMethod>();
auto time_scale = boost::make_shared<ExampleTimeScale>();
auto get_bc_hook_rhs = [this, pre_proc_ptr, time_scale]() {
CHKERR EssentialPreProc<DisplacementCubitBcData>(mField, pre_proc_ptr,
{time_scale}, false)();
};
pre_proc_ptr->preProcessHook = get_bc_hook_rhs;
auto get_post_proc_hook_rhs = [this, post_proc_rhs_ptr]() {
CHKERR EssentialPreProcReaction<DisplacementCubitBcData>(
mField, post_proc_rhs_ptr, nullptr, Sev::verbose)();
CHKERR EssentialPostProcRhs<DisplacementCubitBcData>(
mField, post_proc_rhs_ptr, 1.)();
};
auto get_post_proc_hook_lhs = [this, post_proc_lhs_ptr]() {
CHKERR EssentialPostProcLhs<DisplacementCubitBcData>(
mField, post_proc_lhs_ptr, 1.)();
};
post_proc_rhs_ptr->postProcessHook = get_post_proc_hook_rhs;
post_proc_lhs_ptr->postProcessHook = get_post_proc_hook_lhs;
ts_ctx_ptr->getPreProcessIFunction().push_front(pre_proc_ptr);
ts_ctx_ptr->getPreProcessIJacobian().push_front(pre_proc_ptr);
ts_ctx_ptr->getPostProcessIFunction().push_back(post_proc_rhs_ptr);
ts_ctx_ptr->getPostProcessIJacobian().push_back(post_proc_lhs_ptr);
};
CHKERR add_extra_finite_elements_to_solver_pipelines();
auto create_monitor_fe = [dm](auto &&post_proc_fe, auto &&reactionFe) {
return boost::make_shared<Monitor>(dm, post_proc_fe, reactionFe);
};
boost::shared_ptr<FEMethod> null_fe;
auto monitor_ptr = create_monitor_fe(create_post_proc_fe(), reactionFe);
null_fe, monitor_ptr);
double ftime = 1;
CHKERR TSSetMaxTime(ts, ftime);
CHKERR TSSetExactFinalTime(ts, TS_EXACTFINALTIME_MATCHSTEP);
CHKERR TSSetI2Jacobian(ts,
B,
B, PETSC_NULLPTR, PETSC_NULLPTR);
PetscInt steps, snesfails, rejects, nonlinits, linits;
CHKERR TSGetStepNumber(ts, &steps);
CHKERR TSGetSNESFailures(ts, &snesfails);
CHKERR TSGetStepRejections(ts, &rejects);
CHKERR TSGetSNESIterations(ts, &nonlinits);
CHKERR TSGetKSPIterations(ts, &linits);
"steps %d (%d rejected, %d SNES fails), ftime %g, nonlinits "
"%d, linits %d",
steps, rejects, snesfails, ftime, nonlinits, linits);
}
SCATTER_FORWARD);
double nrm2;
CHKERR VecNorm(T, NORM_2, &nrm2);
MOFEM_LOG(
"EXAMPLE", Sev::inform) <<
"Solution norm " << nrm2;
auto post_proc_norm_fe = boost::make_shared<DomainEle>(
mField);
auto post_proc_norm_rule_hook = [](
int,
int,
int p) ->
int {
return 2 * p; };
post_proc_norm_fe->getRuleHook = post_proc_norm_rule_hook;
post_proc_norm_fe->getOpPtrVector(), {H1}, "GEOMETRY");
enum NORMS { U_NORM_L2 = 0, PIOLA_NORM, LAST_NORM };
auto norms_vec =
CHKERR VecZeroEntries(norms_vec);
auto u_ptr = boost::make_shared<MatrixDouble>();
post_proc_norm_fe->getOpPtrVector().push_back(
new OpCalculateVectorFieldValues<SPACE_DIM>("U", u_ptr));
post_proc_norm_fe->getOpPtrVector().push_back(
new OpCalcNormL2Tensor1<SPACE_DIM>(u_ptr, norms_vec, U_NORM_L2));
auto common_ptr = commonDataFactory<SPACE_DIM, GAUSS, DomainEleOp>(
mField, post_proc_norm_fe->getOpPtrVector(),
"U",
"MAT_ELASTIC",
Sev::inform);
post_proc_norm_fe->getOpPtrVector().push_back(
new OpCalcNormL2Tensor2<SPACE_DIM, SPACE_DIM>(
common_ptr->getMatFirstPiolaStress(), norms_vec, PIOLA_NORM));
post_proc_norm_fe);
CHKERR VecAssemblyBegin(norms_vec);
CHKERR VecAssemblyEnd(norms_vec);
const double *norms;
CHKERR VecGetArrayRead(norms_vec, &norms);
<< "norm_u: " << std::scientific << std::sqrt(norms[U_NORM_L2]);
<< "norm_piola: " << std::scientific << std::sqrt(norms[PIOLA_NORM]);
CHKERR VecRestoreArrayRead(norms_vec, &norms);
}
}
}
PetscInt test_nb = 0;
PetscBool test_flg = PETSC_FALSE;
if (test_flg) {
SCATTER_FORWARD);
double nrm2;
CHKERR VecNorm(T, NORM_2, &nrm2);
MOFEM_LOG(
"EXAMPLE", Sev::verbose) <<
"Regression norm " << nrm2;
double regression_value = 0;
switch (test_nb) {
case 1:
regression_value = 1.02789;
break;
case 2:
regression_value = 1.8841e+00;
break;
case 3:
regression_value = 1.8841e+00;
break;
default:
break;
}
if (fabs(nrm2 - regression_value) > 1e-2)
"Regression test field; wrong norm value. %6.4e != %6.4e", nrm2,
regression_value);
}
}
auto createDMMatrix(DM dm)
Get smart matrix from DM.
virtual MoFEMErrorCode loop_dofs(const Problem *problem_ptr, const std::string &field_name, RowColData rc, DofMethod &method, int lower_rank, int upper_rank, int verb=DEFAULT_VERBOSITY)=0
Make a loop over dofs.
PetscErrorCode PetscOptionsGetBool(PetscOptions *, const char pre[], const char name[], PetscBool *bval, PetscBool *set)
MoFEMErrorCode runProblem()
[Run problem]
MoFEMErrorCode assembleSystem()
[Boundary condition]
boost::shared_ptr< DomainEle > reactionFe
MoFEMErrorCode setupProblem()
[Read mesh]
MoFEMErrorCode readMesh()
[Run problem]
MoFEMErrorCode boundaryCondition()
[Set up problem]
MoFEMErrorCode TsSolve()
[Push operators to pipeline]
MoFEMErrorCode outputResults()
[Getting norms]
MoFEM::Interface & mField
MoFEMErrorCode checkResults()
[Postprocessing results]
MoFEMErrorCode gettingNorms()
[TS Solve]
#ifndef __HENCKY_OPS_HPP__
#define __HENCKY_OPS_HPP__
constexpr double eps = std::numeric_limits<float>::epsilon();
auto f = [](
double v) {
return 0.5 * std::log(
v); };
auto d_f = [](
double v) {
return 0.5 /
v; };
auto dd_f = [](
double v) {
return -0.5 / (
v *
v); };
struct isEq {
static inline auto check(
const double &
a,
const double &b) {
}
};
inline auto is_eq(
const double &
a,
const double &b) {
};
template <
int DIM>
inline auto get_uniq_nb(
double *ptr) {
std::array<double, DIM> tmp;
std::copy(ptr, &ptr[DIM], tmp.begin());
std::sort(tmp.begin(), tmp.end());
isEq::absMax = std::max(std::abs(tmp[0]), std::abs(tmp[DIM - 1]));
return std::distance(tmp.begin(), std::unique(tmp.begin(), tmp.end(),
is_eq));
};
template <int DIM>
std::max(std::max(std::abs(eig(0)), std::abs(eig(1))), std::abs(eig(2)));
if (
is_eq(eig(0), eig(1))) {
}
else if (
is_eq(eig(0), eig(2))) {
}
else if (
is_eq(eig(1), eig(2))) {
}
eigen_vec(
i, 0), eigen_vec(
i, 1), eigen_vec(
i, 2),
eigen_vec(
j, 0), eigen_vec(
j, 1), eigen_vec(
j, 2),
eigen_vec(
k, 0), eigen_vec(
k, 1), eigen_vec(
k, 2)};
{
eigen_vec(
i,
j) = eigen_vec_c(
i,
j);
}
};
struct CommonData :
public boost::enable_shared_from_this<CommonData> {
boost::shared_ptr<MatrixDouble>
matDPtr;
return boost::shared_ptr<MatrixDouble>(shared_from_this(),
}
return boost::shared_ptr<MatrixDouble>(shared_from_this(),
}
return boost::shared_ptr<MatrixDouble>(shared_from_this(), &
matLogC);
}
return boost::shared_ptr<MatrixDouble>(shared_from_this(), &
matTangent);
}
};
template <int DIM, IntegrationType I, typename DomainEleOp>
struct OpCalculateEigenValsImpl;
template <int DIM, IntegrationType I, typename DomainEleOp>
struct OpCalculateLogCImpl;
template <int DIM, IntegrationType I, typename DomainEleOp>
struct OpCalculateLogC_dCImpl;
template <int DIM, IntegrationType I, typename DomainEleOp, int S>
struct OpCalculateHenckyStressImpl;
template <int DIM, IntegrationType I, typename DomainEleOp, int S>
struct OpCalculateHenckyThermalStressImpl;
template <int DIM, IntegrationType I, typename DomainEleOp, int S>
struct OpCalculateHenckyPlasticStressImpl;
template <int DIM, IntegrationType I, typename DomainEleOp, int S>
struct OpCalculateHenckyThermoPlasticStressImpl;
template <int DIM, IntegrationType I, typename DomainEleOp, int S>
struct OpCalculatePiolaStressImpl;
template <int DIM, IntegrationType I, typename DomainEleOp, int S>
struct OpHenckyTangentImpl;
template <int DIM, IntegrationType I, typename DomainEleOp, int S>
struct OpCalculateHenckyThermalStressdTImpl;
template <int DIM, typename DomainEleOp>
OpCalculateEigenValsImpl(
const std::string
field_name,
boost::shared_ptr<CommonData> common_data)
commonDataPtr(common_data) {
std::fill(&DomainEleOp::doEntities[MBEDGE],
&DomainEleOp::doEntities[MBMAXTYPE], false);
}
const size_t nb_gauss_pts = DomainEleOp::getGaussPts().size2();
auto t_grad = getFTensor2FromMat<DIM, DIM>(*(commonDataPtr->matGradPtr));
commonDataPtr->matEigVal.resize(DIM, nb_gauss_pts, false);
commonDataPtr->matEigVec.resize(DIM * DIM, nb_gauss_pts, false);
auto t_eig_val = getFTensor1FromMat<DIM>(commonDataPtr->matEigVal);
auto t_eig_vec = getFTensor2FromMat<DIM, DIM>(commonDataPtr->matEigVec);
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
for (int ii = 0; ii != DIM; ii++)
for (int jj = 0; jj != DIM; jj++)
eigen_vec(ii, jj) = C(ii, jj);
for (auto ii = 0; ii != DIM; ++ii)
eig(ii) = std::max(std::numeric_limits<double>::epsilon(), eig(ii));
auto nb_uniq = get_uniq_nb<DIM>(&eig(0));
if constexpr (DIM == 3) {
if (nb_uniq == 2) {
sort_eigen_vals<DIM>(eig, eigen_vec);
}
}
t_eig_vec(
i,
j) = eigen_vec(
i,
j);
++t_grad;
++t_eig_val;
++t_eig_vec;
}
}
private:
boost::shared_ptr<CommonData> commonDataPtr;
};
template <int DIM, typename DomainEleOp>
boost::shared_ptr<CommonData> common_data)
commonDataPtr(common_data) {
std::fill(&DomainEleOp::doEntities[MBEDGE],
&DomainEleOp::doEntities[MBMAXTYPE], false);
}
const size_t nb_gauss_pts = DomainEleOp::getGaussPts().size2();
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
commonDataPtr->matLogC.resize(
size_symm, nb_gauss_pts,
false);
auto t_eig_val = getFTensor1FromMat<DIM>(commonDataPtr->matEigVal);
auto t_eig_vec = getFTensor2FromMat<DIM, DIM>(commonDataPtr->matEigVec);
auto t_logC = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matLogC);
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
++t_eig_val;
++t_eig_vec;
++t_logC;
}
}
private:
boost::shared_ptr<CommonData> commonDataPtr;
};
template <int DIM, typename DomainEleOp>
OpCalculateLogC_dCImpl(
const std::string
field_name,
boost::shared_ptr<CommonData> common_data)
commonDataPtr(common_data) {
std::fill(&DomainEleOp::doEntities[MBEDGE],
&DomainEleOp::doEntities[MBMAXTYPE], false);
}
const size_t nb_gauss_pts = DomainEleOp::getGaussPts().size2();
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
auto t_logC_dC = getFTensor4DdgFromMat<DIM, DIM>(commonDataPtr->matLogCdC);
auto t_eig_val = getFTensor1FromMat<DIM>(commonDataPtr->matEigVal);
auto t_eig_vec = getFTensor2FromMat<DIM, DIM>(commonDataPtr->matEigVec);
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
auto nb_uniq = get_uniq_nb<DIM>(&t_eig_val(0));
++t_logC_dC;
++t_eig_val;
++t_eig_vec;
}
}
private:
boost::shared_ptr<CommonData> commonDataPtr;
};
template <int DIM, typename DomainEleOp, int S>
OpCalculateHenckyStressImpl(
const std::string
field_name,
boost::shared_ptr<CommonData> common_data)
commonDataPtr(common_data) {
std::fill(&DomainEleOp::doEntities[MBEDGE],
&DomainEleOp::doEntities[MBMAXTYPE], false);
}
const size_t nb_gauss_pts = DomainEleOp::getGaussPts().size2();
auto t_D = getFTensor4DdgFromMat<DIM, DIM, S>(*commonDataPtr->matDPtr);
auto t_logC = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matLogC);
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
commonDataPtr->matHenckyStress.resize(
size_symm, nb_gauss_pts,
false);
auto t_T = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matHenckyStress);
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
t_T(
i,
j) = t_D(
i,
j,
k,
l) * t_logC(
k,
l);
++t_logC;
++t_T;
++t_D;
}
}
private:
boost::shared_ptr<CommonData> commonDataPtr;
};
template <int DIM, typename DomainEleOp, int S>
OpCalculateHenckyThermalStressImpl(
const std::string
field_name, boost::shared_ptr<VectorDouble> temperature,
boost::shared_ptr<CommonData> common_data,
boost::shared_ptr<VectorDouble> coeff_expansion_ptr,
boost::shared_ptr<double> ref_temp_ptr)
commonDataPtr(common_data), coeffExpansionPtr(coeff_expansion_ptr),
refTempPtr(ref_temp_ptr) {
std::fill(&DomainEleOp::doEntities[MBEDGE],
&DomainEleOp::doEntities[MBMAXTYPE], false);
}
const size_t nb_gauss_pts = DomainEleOp::getGaussPts().size2();
auto t_D = getFTensor4DdgFromMat<DIM, DIM, S>(*commonDataPtr->matDPtr);
auto t_logC = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matLogC);
auto t_logC_dC = getFTensor4DdgFromMat<DIM, DIM>(commonDataPtr->matLogCdC);
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
commonDataPtr->matHenckyStress.resize(
size_symm, nb_gauss_pts,
false);
commonDataPtr->matFirstPiolaStress.resize(DIM * DIM, nb_gauss_pts, false);
commonDataPtr->matSecondPiolaStress.resize(
size_symm, nb_gauss_pts,
false);
auto t_T = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matHenckyStress);
auto t_P = getFTensor2FromMat<DIM, DIM>(commonDataPtr->matFirstPiolaStress);
auto t_S =
getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matSecondPiolaStress);
auto t_grad = getFTensor2FromMat<DIM, DIM>(*(commonDataPtr->matGradPtr));
auto t_temp = getFTensor0FromVec(*tempPtr);
t_coeff_exp(d, d) = (*coeffExpansionPtr)[
d];
}
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
#ifdef HENCKY_SMALL_STRAIN
(t_grad(
k,
l) - t_coeff_exp(
k,
l) * (t_temp - (*refTempPtr)));
#else
(t_logC(
k,
l) - t_coeff_exp(
k,
l) * (t_temp - (*refTempPtr)));
t_S(
k,
l) = t_T(
i,
j) * t_logC_dC(
i,
j,
k,
l);
t_P(
i,
l) = t_F(
i,
k) * t_S(
k,
l);
#endif
++t_grad;
++t_logC;
++t_logC_dC;
++t_P;
++t_T;
++t_S;
++t_D;
++t_temp;
}
}
private:
boost::shared_ptr<CommonData> commonDataPtr;
boost::shared_ptr<VectorDouble> tempPtr;
boost::shared_ptr<VectorDouble> coeffExpansionPtr;
boost::shared_ptr<double> refTempPtr;
};
template <int DIM, typename DomainEleOp, int S>
OpCalculateHenckyPlasticStressImpl(
const std::string
field_name,
boost::shared_ptr<CommonData> common_data,
boost::shared_ptr<MatrixDouble> mat_D_ptr,
scaleStress(
scale), matDPtr(mat_D_ptr) {
std::fill(&DomainEleOp::doEntities[MBEDGE],
&DomainEleOp::doEntities[MBMAXTYPE], false);
matLogCPlastic = commonDataPtr->matLogCPlastic;
}
const size_t nb_gauss_pts = DomainEleOp::getGaussPts().size2();
auto t_D = getFTensor4DdgFromMat<DIM, DIM, S>(*matDPtr);
auto t_logC = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matLogC);
auto t_logCPlastic = getFTensor2SymmetricFromMat<DIM>(*matLogCPlastic);
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
commonDataPtr->matHenckyStress.resize(
size_symm, nb_gauss_pts,
false);
auto t_T = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matHenckyStress);
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
t_T(
i,
j) = t_D(
i,
j,
k,
l) * (t_logC(
k,
l) - t_logCPlastic(
k,
l));
t_T(
i,
j) /= scaleStress;
++t_logC;
++t_T;
++t_D;
++t_logCPlastic;
}
}
private:
boost::shared_ptr<CommonData> commonDataPtr;
boost::shared_ptr<MatrixDouble> matDPtr;
boost::shared_ptr<MatrixDouble> matLogCPlastic;
const double scaleStress;
};
template <int DIM, typename DomainEleOp, int S>
OpCalculateHenckyThermoPlasticStressImpl(
const std::string
field_name, boost::shared_ptr<VectorDouble> temperature,
boost::shared_ptr<CommonData> common_data,
boost::shared_ptr<VectorDouble> coeff_expansion_ptr,
boost::shared_ptr<double> ref_temp_ptr)
commonDataPtr(common_data), coeffExpansionPtr(coeff_expansion_ptr),
refTempPtr(ref_temp_ptr) {
std::fill(&DomainEleOp::doEntities[MBEDGE],
&DomainEleOp::doEntities[MBMAXTYPE], false);
matLogCPlastic = commonDataPtr->matLogCPlastic;
}
const size_t nb_gauss_pts = DomainEleOp::getGaussPts().size2();
auto t_D = getFTensor4DdgFromMat<DIM, DIM, S>(*commonDataPtr->matDPtr);
auto t_logC = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matLogC);
auto t_logC_dC = getFTensor4DdgFromMat<DIM, DIM>(commonDataPtr->matLogCdC);
auto t_logCPlastic = getFTensor2SymmetricFromMat<DIM>(*matLogCPlastic);
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
commonDataPtr->matHenckyStress.resize(
size_symm, nb_gauss_pts,
false);
commonDataPtr->matFirstPiolaStress.resize(DIM * DIM, nb_gauss_pts, false);
commonDataPtr->matSecondPiolaStress.resize(
size_symm, nb_gauss_pts,
false);
auto t_T = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matHenckyStress);
auto t_P = getFTensor2FromMat<DIM, DIM>(commonDataPtr->matFirstPiolaStress);
auto t_S =
getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matSecondPiolaStress);
auto t_grad = getFTensor2FromMat<DIM, DIM>(*(commonDataPtr->matGradPtr));
auto t_temp = getFTensor0FromVec(*tempPtr);
t_coeff_exp(d, d) = (*coeffExpansionPtr)[
d];
}
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
#ifdef HENCKY_SMALL_STRAIN
t_D(
i,
j,
k,
l) * (t_grad(
k,
l) - t_logCPlastic(
k,
l) -
t_coeff_exp(
k,
l) * (t_temp - (*refTempPtr)));
#else
t_D(
i,
j,
k,
l) * (t_logC(
k,
l) - t_logCPlastic(
k,
l) -
t_coeff_exp(
k,
l) * (t_temp - (*refTempPtr)));
t_S(
k,
l) = t_T(
i,
j) * t_logC_dC(
i,
j,
k,
l);
t_P(
i,
l) = t_F(
i,
k) * t_S(
k,
l);
#endif
++t_grad;
++t_logC;
++t_P;
++t_T;
++t_S;
++t_D;
++t_temp;
++t_logCPlastic;
}
}
private:
boost::shared_ptr<CommonData> commonDataPtr;
boost::shared_ptr<VectorDouble> tempPtr;
boost::shared_ptr<MatrixDouble> matLogCPlastic;
boost::shared_ptr<VectorDouble> coeffExpansionPtr;
boost::shared_ptr<double> refTempPtr;
};
template <int DIM, typename DomainEleOp, int S>
OpCalculatePiolaStressImpl(
const std::string
field_name,
boost::shared_ptr<CommonData> common_data)
commonDataPtr(common_data) {
std::fill(&DomainEleOp::doEntities[MBEDGE],
&DomainEleOp::doEntities[MBMAXTYPE], false);
}
const size_t nb_gauss_pts = DomainEleOp::getGaussPts().size2();
#ifdef HENCKY_SMALL_STRAIN
auto t_D = getFTensor4DdgFromMat<DIM, DIM, S>(*commonDataPtr->matDPtr);
#endif
auto t_logC = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matLogC);
auto t_logC_dC = getFTensor4DdgFromMat<DIM, DIM>(commonDataPtr->matLogCdC);
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
commonDataPtr->matFirstPiolaStress.resize(DIM * DIM, nb_gauss_pts, false);
commonDataPtr->matSecondPiolaStress.resize(
size_symm, nb_gauss_pts,
false);
auto t_P = getFTensor2FromMat<DIM, DIM>(commonDataPtr->matFirstPiolaStress);
auto t_T = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matHenckyStress);
auto t_S =
getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matSecondPiolaStress);
auto t_grad = getFTensor2FromMat<DIM, DIM>(*(commonDataPtr->matGradPtr));
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
#ifdef HENCKY_SMALL_STRAIN
t_P(
i,
j) = t_D(
i,
j,
k,
l) * t_grad(
k,
l);
#else
t_S(
k,
l) = t_T(
i,
j) * t_logC_dC(
i,
j,
k,
l);
t_P(
i,
l) = t_F(
i,
k) * t_S(
k,
l);
#endif
++t_grad;
++t_logC;
++t_logC_dC;
++t_P;
++t_T;
++t_S;
#ifdef HENCKY_SMALL_STRAIN
++t_D;
#endif
}
}
private:
boost::shared_ptr<CommonData> commonDataPtr;
};
template <int DIM, typename DomainEleOp, int S>
boost::shared_ptr<CommonData> common_data,
boost::shared_ptr<MatrixDouble> mat_D_ptr = nullptr)
commonDataPtr(common_data) {
std::fill(&DomainEleOp::doEntities[MBEDGE],
&DomainEleOp::doEntities[MBMAXTYPE], false);
if (mat_D_ptr)
matDPtr = mat_D_ptr;
else
matDPtr = commonDataPtr->matDPtr;
}
const size_t nb_gauss_pts = DomainEleOp::getGaussPts().size2();
commonDataPtr->matTangent.resize(DIM * DIM * DIM * DIM, nb_gauss_pts);
auto dP_dF =
getFTensor4FromMat<DIM, DIM, DIM, DIM, 1>(commonDataPtr->matTangent);
auto t_D = getFTensor4DdgFromMat<DIM, DIM, S>(*matDPtr);
auto t_eig_val = getFTensor1FromMat<DIM>(commonDataPtr->matEigVal);
auto t_eig_vec = getFTensor2FromMat<DIM, DIM>(commonDataPtr->matEigVec);
auto t_T = getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matHenckyStress);
auto t_S =
getFTensor2SymmetricFromMat<DIM>(commonDataPtr->matSecondPiolaStress);
auto t_grad = getFTensor2FromMat<DIM, DIM>(*(commonDataPtr->matGradPtr));
auto t_logC_dC = getFTensor4DdgFromMat<DIM, DIM>(commonDataPtr->matLogCdC);
commonDataPtr->matCdF.resize(DIM * DIM * DIM * DIM, nb_gauss_pts);
auto dC_dF =
getFTensor4FromMat<DIM, DIM, DIM, DIM, 1>(commonDataPtr->matCdF);
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
#ifdef HENCKY_SMALL_STRAIN
#else
auto nb_uniq = get_uniq_nb<DIM>(&t_eig_val(0));
t_T, nb_uniq);
P_D_P_plus_TL(
i,
j,
k,
l) =
(t_logC_dC(
i,
j, o, p) * t_D(o, p,
m,
n)) * t_logC_dC(
m,
n,
k,
l);
P_D_P_plus_TL(
i,
j,
k,
l) *= 0.5;
t_F(
i,
k) * (P_D_P_plus_TL(
k,
j, o, p) * dC_dF(o, p,
m,
n));
#endif
++dP_dF;
++t_grad;
++t_eig_val;
++t_eig_vec;
++t_logC_dC;
++t_S;
++t_T;
++t_D;
++dC_dF;
}
}
private:
boost::shared_ptr<CommonData> commonDataPtr;
boost::shared_ptr<MatrixDouble> matDPtr;
};
template <int DIM, typename AssemblyDomainEleOp, int S>
OpCalculateHenckyThermalStressdTImpl(
const std::string row_field_name, const std::string col_field_name,
boost::shared_ptr<CommonData> elastic_common_data_ptr,
boost::shared_ptr<VectorDouble> coeff_expansion_ptr);
EntitiesFieldData::EntData &col_data);
private:
boost::shared_ptr<CommonData> elasticCommonDataPtr;
boost::shared_ptr<VectorDouble> coeffExpansionPtr;
};
template <int DIM, typename AssemblyDomainEleOp, int S>
OpCalculateHenckyThermalStressdTImpl<DIM, GAUSS, AssemblyDomainEleOp, S>::
OpCalculateHenckyThermalStressdTImpl(
const std::string row_field_name, const std::string col_field_name,
boost::shared_ptr<CommonData> elastic_common_data_ptr,
boost::shared_ptr<VectorDouble> coeff_expansion_ptr)
elasticCommonDataPtr(elastic_common_data_ptr),
coeffExpansionPtr(coeff_expansion_ptr) {
this->sYmm = false;
}
template <int DIM, typename AssemblyDomainEleOp, int S>
OpCalculateHenckyThermalStressdTImpl<DIM, GAUSS, AssemblyDomainEleOp, S>::
iNtegrate(EntitiesFieldData::EntData &row_data,
EntitiesFieldData::EntData &col_data) {
auto &locMat = AssemblyDomainEleOp::locMat;
const auto nb_integration_pts = row_data.getN().size1();
const auto nb_row_base_functions = row_data.getN().size2();
auto t_w = this->getFTensor0IntegrationWeight();
auto t_row_diff_base = row_data.getFTensor1DiffN<DIM>();
auto t_D = getFTensor4DdgFromMat<DIM, DIM, S>(*elasticCommonDataPtr->matDPtr);
auto t_grad =
getFTensor2FromMat<DIM, DIM>(*(elasticCommonDataPtr->matGradPtr));
auto t_logC_dC =
getFTensor4DdgFromMat<DIM, DIM>(elasticCommonDataPtr->matLogCdC);
t_coeff_exp(d, d) = (*coeffExpansionPtr)[
d];
}
t_eigen_strain(
i,
j) = t_D(
i,
j,
k,
l) * t_coeff_exp(
k,
l);
for (auto gg = 0; gg != nb_integration_pts; ++gg) {
double alpha = this->getMeasure() * t_w;
auto rr = 0;
for (; rr != AssemblyDomainEleOp::nbRows / DIM; ++rr) {
auto t_mat = getFTensor1FromMat<DIM, 1>(locMat, rr * DIM);
auto t_col_base = col_data.getFTensor0N(gg, 0);
for (auto cc = 0; cc != AssemblyDomainEleOp::nbCols; cc++) {
#ifdef HENCKY_SMALL_STRAIN
(t_row_diff_base(
j) * t_eigen_strain(
i,
j)) * (t_col_base * alpha);
#else
t_mat(
i) -= (t_row_diff_base(
j) *
(t_F(
i, o) * ((t_D(
m,
n,
k,
l) * t_coeff_exp(
k,
l)) *
t_logC_dC(
m,
n, o,
j)))) *
(t_col_base * alpha);
#endif
++t_mat;
++t_col_base;
}
++t_row_diff_base;
}
for (; rr != nb_row_base_functions; ++rr)
++t_row_diff_base;
++t_w;
++t_grad;
++t_logC_dC;
++t_D;
}
}
template <typename DomainEleOp> struct HenckyIntegrators {
template <int DIM, IntegrationType I>
using OpCalculateEigenVals = OpCalculateEigenValsImpl<DIM, I, DomainEleOp>;
template <int DIM, IntegrationType I>
using OpCalculateLogC = OpCalculateLogCImpl<DIM, I, DomainEleOp>;
template <int DIM, IntegrationType I>
using OpCalculateLogC_dC = OpCalculateLogC_dCImpl<DIM, I, DomainEleOp>;
template <int DIM, IntegrationType I, int S>
using OpCalculateHenckyStress =
OpCalculateHenckyStressImpl<DIM, I, DomainEleOp, S>;
template <int DIM, IntegrationType I, int S>
using OpCalculateHenckyThermalStress =
OpCalculateHenckyThermalStressImpl<DIM, I, DomainEleOp, S>;
template <int DIM, IntegrationType I, int S>
using OpCalculateHenckyPlasticStress =
OpCalculateHenckyPlasticStressImpl<DIM, I, DomainEleOp, S>;
template <int DIM, IntegrationType I, int S>
using OpCalculateHenckyThermoPlasticStress =
OpCalculateHenckyThermoPlasticStressImpl<DIM, I, DomainEleOp, S>;
template <int DIM, IntegrationType I, int S>
using OpCalculatePiolaStress =
OpCalculatePiolaStressImpl<DIM, I, DomainEleOp, S>;
template <int DIM, IntegrationType I, int S>
using OpHenckyTangent = OpHenckyTangentImpl<DIM, GAUSS, DomainEleOp, S>;
template <int DIM, IntegrationType I, typename AssemblyDomainEleOp, int S>
using OpCalculateHenckyThermalStressdT =
OpCalculateHenckyThermalStressdTImpl<DIM, I, AssemblyDomainEleOp, S>;
};
template <int DIM>
boost::ptr_deque<ForcesAndSourcesCore::UserDataOperator> &pip,
std::string block_name,
boost::shared_ptr<MatrixDouble> mat_D_Ptr, Sev sev,
PetscBool plane_strain_flag = PETSC_FALSE;
&plane_strain_flag, PETSC_NULLPTR);
std::vector<const CubitMeshSets *> meshset_vec_ptr,
double scale, PetscBool plane_strain_flag)
planeStrainFlag(plane_strain_flag) {
"Can not get data from block");
}
EntitiesFieldData::EntData &data) {
for (auto &b : blockData) {
if (b.blockEnts.find(getFEEntityHandle()) != b.blockEnts.end()) {
CHKERR getMatDPtr(matDPtr, b.bulkModulusK * scaleYoungModulus,
b.shearModulusG * scaleYoungModulus,
planeStrainFlag);
}
}
CHKERR getMatDPtr(matDPtr, bulkModulusKDefault * scaleYoungModulus,
shearModulusGDefault * scaleYoungModulus,
planeStrainFlag);
}
private:
boost::shared_ptr<MatrixDouble> matDPtr;
const double scaleYoungModulus;
const PetscBool planeStrainFlag;
double bulkModulusK;
double shearModulusG;
};
double bulkModulusKDefault;
double shearModulusGDefault;
std::vector<BlockData> blockData;
std::vector<const CubitMeshSets *> meshset_vec_ptr,
Sev sev) {
for (
auto m : meshset_vec_ptr) {
std::vector<double> block_data;
CHKERR m->getAttributes(block_data);
if (block_data.size() < 2) {
"Expected that block has two attribute");
}
auto get_block_ents = [&]() {
m_field.
get_moab().get_entities_by_handle(
m->meshset, ents,
true);
return ents;
};
blockData.push_back(
}
}
auto set_material_stiffness = [&]() {
: 1;
auto t_D = getFTensor4DdgFromMat<DIM, DIM, 0>(*mat_D_ptr);
};
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
set_material_stiffness();
}
};
PetscOptionsBegin(PETSC_COMM_WORLD, "", "", "none");
CHKERR PetscOptionsScalar(
"-young_modulus",
"Young modulus",
"",
E, &
E,
PETSC_NULLPTR);
CHKERR PetscOptionsScalar(
"-poisson_ratio",
"poisson ratio",
"", nu, &nu,
PETSC_NULLPTR);
PetscOptionsEnd();
pip.push_back(new OpMatBlocks(
m_field.
getInterface<MeshsetsManager>()->getCubitMeshsetPtr(std::regex(
(boost::format("%s(.*)") % block_name).str()
)),
));
}
template <int DIM, IntegrationType I, typename DomainEleOp>
boost::ptr_deque<ForcesAndSourcesCore::UserDataOperator> &pip,
std::string
field_name, std::string block_name, Sev sev,
double scale = 1) {
auto common_ptr = boost::make_shared<HenckyOps::CommonData>();
common_ptr->matDPtr = boost::make_shared<MatrixDouble>();
common_ptr->matGradPtr = boost::make_shared<MatrixDouble>();
common_ptr->matDPtr, sev,
scale),
"addMatBlockOps");
using H = HenckyIntegrators<DomainEleOp>;
pip.push_back(new OpCalculateVectorFieldGradient<DIM, DIM>(
pip.push_back(new typename H::template OpCalculateEigenVals<DIM, I>(
pip.push_back(
new typename H::template OpCalculateLogC<DIM, I>(
field_name, common_ptr));
pip.push_back(new typename H::template OpCalculateLogC_dC<DIM, I>(
pip.push_back(new typename H::template OpCalculateHenckyStress<DIM, I, 0>(
pip.push_back(new typename H::template OpCalculatePiolaStress<DIM, I, 0>(
return common_ptr;
}
template <int DIM, AssemblyType A, IntegrationType I, typename DomainEleOp>
boost::ptr_deque<ForcesAndSourcesCore::UserDataOperator> &pip,
std::string
field_name, boost::shared_ptr<HenckyOps::CommonData> common_ptr,
Sev sev) {
using B =
typename FormsIntegrators<DomainEleOp>::template Assembly<
pip.push_back(
}
template <int DIM, AssemblyType A, IntegrationType I, typename DomainEleOp>
boost::ptr_deque<ForcesAndSourcesCore::UserDataOperator> &pip,
std::string
field_name, std::string block_name, Sev sev,
double scale = 1) {
auto common_ptr = commonDataFactory<DIM, I, DomainEleOp>(
common_ptr, sev);
}
template <int DIM, AssemblyType A, IntegrationType I, typename DomainEleOp>
boost::ptr_deque<ForcesAndSourcesCore::UserDataOperator> &pip,
std::string
field_name, boost::shared_ptr<HenckyOps::CommonData> common_ptr,
Sev sev) {
using B =
typename FormsIntegrators<DomainEleOp>::template Assembly<
using OpKPiola =
typename B::template OpGradTensorGrad<1, DIM, DIM, 1>;
using H = HenckyIntegrators<DomainEleOp>;
pip.push_back(new typename H::template OpHenckyTangent<DIM, I, 0>(
pip.push_back(
}
template <int DIM, AssemblyType A, IntegrationType I, typename DomainEleOp>
boost::ptr_deque<ForcesAndSourcesCore::UserDataOperator> &pip,
std::string
field_name, std::string block_name, Sev sev,
double scale = 1) {
auto common_ptr = commonDataFactory<DIM, I, DomainEleOp>(
common_ptr, sev);
}
}
#endif
PetscBool is_plane_strain
Kronecker Delta class symmetric.
#define CHK_THROW_MESSAGE(err, msg)
Check and throw MoFEM exception.
@ MOFEM_DATA_INCONSISTENCY
FTensor::Index< 'i', SPACE_DIM > i
const double v
phase velocity of light in medium (cm/ns)
const double n
refractive index of diffusive medium
FTensor::Index< 'l', 3 > l
FTensor::Index< 'j', 3 > j
FTensor::Index< 'k', 3 > k
auto getMat(A &&t_val, B &&t_vec, Fun< double > f)
Get the Mat object.
auto getDiffMat(A &&t_val, B &&t_vec, Fun< double > f, Fun< double > d_f, const int nb)
Get the Diff Mat object.
auto getDiffDiffMat(A &&t_val, B &&t_vec, Fun< double > f, Fun< double > d_f, Fun< double > dd_f, C &&t_S, const int nb)
Get the Diff Diff Mat object.
auto get_uniq_nb(double *ptr)
auto sort_eigen_vals(FTensor::Tensor1< double, DIM > &eig, FTensor::Tensor2< double, DIM, DIM > &eigen_vec)
auto is_eq(const double &a, const double &b)
auto commonDataFactory(MoFEM::Interface &m_field, boost::ptr_deque< ForcesAndSourcesCore::UserDataOperator > &pip, std::string field_name, std::string block_name, Sev sev, double scale=1)
UBlasMatrix< double > MatrixDouble
MoFEMErrorCode computeEigenValuesSymmetric(const MatrixDouble &mat, VectorDouble &eig, MatrixDouble &eigen_vec)
compute eigenvalues of a symmetric matrix using lapack dsyev
MoFEMErrorCode opFactoryDomainRhs(MoFEM::Interface &m_field, std::string block_name, Pip &pip, std::string u, std::string ep, std::string tau)
MoFEMErrorCode addMatBlockOps(MoFEM::Interface &m_field, std::string block_name, Pip &pip, boost::shared_ptr< MatrixDouble > mat_D_Ptr, boost::shared_ptr< CommonData::BlockParams > mat_params_ptr, double scale_value, Sev sev)
MoFEMErrorCode opFactoryDomainLhs(MoFEM::Interface &m_field, std::string block_name, Pip &pip, std::string u, std::string ep, std::string tau)
FormsIntegrators< DomainEleOp >::Assembly< A >::LinearForm< I >::OpGradTimesTensor< 1, FIELD_DIM, SPACE_DIM > OpGradTimesTensor
constexpr AssemblyType A
[Define dimension]
constexpr auto field_name
FormsIntegrators< DomainEleOp >::Assembly< PETSC >::LinearForm< GAUSS >::OpGradTimesTensor< 1, SPACE_DIM, SPACE_DIM > OpInternalForcePiola
FormsIntegrators< DomainEleOp >::Assembly< PETSC >::BiLinearForm< GAUSS >::OpGradTensorGrad< 1, SPACE_DIM, SPACE_DIM, 1 > OpKPiola
[Only used for dynamics]
FTensor::Index< 'm', 3 > m
auto getMatHenckyStress()
boost::shared_ptr< MatrixDouble > matLogCPlastic
MatrixDouble matHenckyStress
auto getMatFirstPiolaStress()
MatrixDouble matFirstPiolaStress
boost::shared_ptr< MatrixDouble > matDPtr
MatrixDouble matSecondPiolaStress
boost::shared_ptr< MatrixDouble > matGradPtr
static auto check(const double &a, const double &b)
virtual moab::Interface & get_moab()=0
double young_modulus
Young modulus.
double poisson_ratio
Poisson ratio.