Line data Source code
1 : /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
2 : Copyright (c) 2016-2019 The plumed team
3 : (see the PEOPLE file at the root of the distribution for a list of names)
4 :
5 : See http://www.plumed.org for more information.
6 :
7 : This file is part of plumed, version 2.
8 :
9 : plumed is free software: you can redistribute it and/or modify
10 : it under the terms of the GNU Lesser General Public License as published by
11 : the Free Software Foundation, either version 3 of the License, or
12 : (at your option) any later version.
13 :
14 : plumed is distributed in the hope that it will be useful,
15 : but WITHOUT ANY WARRANTY; without even the implied warranty of
16 : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 : GNU Lesser General Public License for more details.
18 :
19 : You should have received a copy of the GNU Lesser General Public License
20 : along with plumed. If not, see <http://www.gnu.org/licenses/>.
21 : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
22 : #include "Bias.h"
23 : #include "core/PlumedMain.h"
24 : #include "core/Atoms.h"
25 : #include <string>
26 : #include <cstring>
27 : //#include "ActionRegister.h"
28 : #include "core/ActionRegister.h"
29 : #include "core/ActionWithValue.h"
30 : #include "tools/Communicator.h"
31 : #include "tools/File.h"
32 : #include <iostream>
33 :
34 : //#include "Analysis.h"
35 : //#include "core/PlumedMain.h"
36 : //#include "core/ActionRegister.h"
37 : //#include "tools/Grid.h"
38 : //#include "tools/KernelFunctions.h"
39 : //#include "tools/IFile.h"
40 : //#include "tools/OFile.h"
41 :
42 : // The original implementation of this method was contributed
43 : // by Andrea Cesari (andreacesari90@gmail.com).
44 : // Copyright has been then transferred to PLUMED developers
45 : // (see https://github.com/plumed/plumed2/blob/master/.github/CONTRIBUTING.md)
46 :
47 : using namespace std;
48 :
49 :
50 : namespace PLMD {
51 : namespace bias {
52 :
53 : //+PLUMEDOC BIAS MAXENT
54 : /*
55 : Add a linear biasing potential on one or more variables \f$f_{i}\left(\boldsymbol{x}\right)\f$ satisfying the maximum entropy principle as proposed in Ref. \cite cesari2016maxent .
56 :
57 : \warning
58 : Notice that syntax is still under revision and might change
59 :
60 : The resulting biasing potential is given by:
61 : \f[
62 : V_{BIAS}(\boldsymbol{x},t)=K_{B}T\sum_{i=1}^{\#arguments}f_{i}(\boldsymbol{x},t)\lambda_{i}(t)
63 : \f]
64 : Lagrangian multipliers \f$ \lambda_{i}\f$ are updated, every PACE steps, according to the following update rule:
65 : \f[
66 : \lambda_{i}=\lambda_{i}+\frac{k_{i}}{1+\frac{t}{\tau_{i}}}\left(f_{exp,i}+\xi_{i}\lambda_{i}-f_{i}(\boldsymbol{x})\right)
67 : \f]
68 : \f$k\f$ set the initial value of the learning rate and its units are \f$[observable]^{-2}ps^{-1}\f$. This can be set with the keyword KAPPA.
69 : The number of components for any KAPPA vector must be equal to the number of arguments of the action.
70 :
71 : Variable \f$ \xi_{i}(\lambda) \f$ is related to the choosen prior to model experimental errors. If a GAUSSIAN prior is used then:
72 : \f[
73 : \xi_{i}(\lambda)=-\lambda_{i}\sigma^{2}
74 : \f]
75 : where \f$ \sigma \f$ is the typical expected error on the observable \f$ f_i\f$.
76 : For a LAPLACE prior:
77 : \f[
78 : \xi_{i}(\lambda)=-\frac{\lambda_{i}\sigma^{2}}{1-\frac{\lambda^{2}\sigma^{2}}{2}}
79 :
80 : \f]
81 : The value of \f$ \xi(\lambda,t)\f$ is written in output as a component named: argument name followed by the string _error.
82 : Setting \f$ \sigma =0\f$ is equivalent to enforce a pure Maximum Entropy restraint without any noise modelling.
83 : This method can be also used to enforce inequality restraint as shown in following examples.
84 :
85 : Notice that a similar method is available as \ref EDS, although with different features and using a different optimization algorithm.
86 :
87 : \par Examples
88 :
89 : The following input tells plumed to restrain the distance between atoms 7 and 15
90 : and the distance between atoms 2 and 19, at different equilibrium
91 : values, and to print the energy of the restraint.
92 : Lagrangian multiplier will be printed on a file called restraint.LAGMULT with a stride set by the variable PACE to 200ps.
93 : Moreover plumed will compute the average of each lagrangian multiplier in the window [TSTART,TEND] and use that to continue the simulations with fixed Lagrangian multipliers.
94 : \plumedfile
95 : DISTANCE ATOMS=7,15 LABEL=d1
96 : DISTANCE ATOMS=2,19 LABEL=d2
97 : MAXENT ...
98 : ARG=d1,d2
99 : TYPE=EQUAL
100 : AT=0.2,0.5
101 : KAPPA=35000.0,35000.0
102 : TAU=0.02,0.02
103 : PACE=200
104 : TSTART=100
105 : TEND=500
106 : LABEL=restraint
107 : PRINT ARG=restraint.bias
108 : ... MAXENT
109 : \endplumedfile
110 : Lagrangian multipliers will be printed on a file called restraint.bias
111 : The following input tells plumed to restrain the distance between atoms 7 and 15
112 : to be greater than 0.2 and to print the energy of the restraint
113 : \plumedfile
114 : DISTANCE ATOMS=7,15 LABEL=d
115 : MAXENT ARG=d TYPE=INEQUAL> AT=0.02 KAPPA=35000.0 TAU= LABEL=restraint
116 : PRINT ARG=restraint.bias
117 : \endplumedfile
118 :
119 : (See also \ref DISTANCE and \ref PRINT).
120 :
121 : */
122 : //+ENDPLUMEDOC
123 :
124 : class MaxEnt : public Bias {
125 : std::vector<double> at;
126 : std::vector<double> kappa;
127 : std::vector<double> lambda;
128 : std::vector<double> avgx;
129 : std::vector<double> work;
130 : std::vector<double> oldlambda;
131 : std::vector<double> tau;
132 : std::vector<double> avglambda;
133 : std::vector<double> avglambda_restart;
134 : std::vector<double> expected_eps;
135 : std::vector<double> apply_weights;
136 : double sigma;
137 : double tstart;
138 : double tend;
139 : double avgstep; //current number of samples over which to compute the average. Check if could be replaced bu getStep()
140 : long int pace_;
141 : long int stride_;
142 : double totalWork;
143 : double BetaReweightBias;
144 : double simtemp;
145 : double reweight_bias2;
146 : vector<ActionWithValue*> biases;
147 : std::string type;
148 : std::string error_type;
149 : double alpha;
150 : double avg_counter;
151 : int learn_replica;
152 : Value* valueForce2;
153 : Value* valueWork;
154 : OFile lagmultOfile_;
155 : IFile ifile;
156 : string lagmultfname;
157 : string ifilesnames;
158 : string fmt;
159 : bool isFirstStep;
160 : bool reweight;
161 : bool no_broadcast;
162 : bool printFirstStep;
163 : std::vector<bool> done_average;
164 : int myrep,nrep;
165 : public:
166 : explicit MaxEnt(const ActionOptions&);
167 : ~MaxEnt();
168 : void calculate();
169 : void update();
170 : void update_lambda();
171 : static void registerKeywords(Keywords& keys);
172 : void ReadLagrangians(IFile &ifile);
173 : void WriteLagrangians(vector<double> &lagmult,OFile &file);
174 : double compute_error(string &err_type,double &l);
175 : double convert_lambda(string &type,double lold);
176 : void check_lambda_boundaries(string &err_type,double &l);
177 : };
178 4871 : PLUMED_REGISTER_ACTION(MaxEnt,"MAXENT")
179 :
180 51 : void MaxEnt::registerKeywords(Keywords& keys) {
181 51 : Bias::registerKeywords(keys);
182 51 : componentsAreNotOptional(keys);
183 51 : keys.use("ARG");
184 51 : keys.add("compulsory","KAPPA","0.0","specifies the initial value for the learning rate");
185 51 : keys.add("compulsory","TAU","Specify the dumping time for the learning rate.");
186 : keys.add("compulsory","TYPE","specify the restraint type. "
187 : "EQAUL to restrain the variable at a given equilibrium value"
188 : "INEQUAL< to restrain the variable to be smaller than a given value"
189 51 : "INEQUAL> to restrain the variable to be greater than a given value");
190 : keys.add("optional","ERROR_TYPE","specify the prior on the error to use."
191 : "GAUSSIAN: use a Gaussian prior"
192 51 : "LAPLACE: use a Laplace prior");
193 51 : keys.add("optional","TSTART","time in ps from where to start averaging the Lagrangian multiplier. By default no average is computed, hence lambda is updated every PACE steps");
194 51 : keys.add("optional","TEND","time in ps where to stop to compute the average of Lagrangian multiplier. From this time until the end of the simulation Lagrangian multipliers are kept fix to the average computed between TSTART and TEND;");
195 51 : keys.add("optional","ALPHA","default=1.0; To be used with LAPLACE KEYWORD, allows to choose a prior function proportional to a Gaussian times an exponential function. ALPHA=1 correspond to the LAPLACE prior.");
196 51 : keys.add("compulsory","AT","the position of the restraint");
197 51 : keys.add("optional","SIGMA","The typical erros expected on observable");
198 51 : keys.add("optional","FILE","Lagrangian multipliers output file. The default name is: label name followed by the string .LAGMULT ");
199 51 : keys.add("optional","LEARN_REPLICA","In a multiple replica environment specify which is the reference replica. By default replica 0 will be used.");
200 51 : keys.add("optional","APPLY_WEIGHTS","Vector of weights containing 1 in correspondece of each replica that will receive the lagrangian multiplier from the current one.");
201 51 : keys.add("optional","PACE","the frequency for Lagrangian multipliers update");
202 51 : keys.add("optional","PRINT_STRIDE","stride of Lagrangian multipliers output file. If no STRIDE is passed they are written every time they are updated (PACE).");
203 51 : keys.add("optional","FMT","specify format for Lagrangian multipliers files (usefulf to decrease the number of digits in regtests)");
204 51 : keys.addFlag("REWEIGHT",false,"to be used with plumed driver in order to reweight a trajectory a posteriori");
205 51 : keys.addFlag("NO_BROADCAST",false,"If active will avoid Lagrangian multipliers to be comunicated to other replicas.");
206 51 : keys.add("optional","TEMP","the system temperature. This is required if you are reweighting.");
207 51 : keys.addOutputComponent("force2","default","the instantaneous value of the squared force due to this bias potential");
208 51 : keys.addOutputComponent("work","default","the instantaneous value of the work done by the biasing force");
209 : keys.addOutputComponent("_work","default","the instantaneous value of the work done by the biasing force for each argument. "
210 : "These quantities will named with the arguments of the bias followed by "
211 51 : "the character string _work.");
212 51 : keys.addOutputComponent("_error","default","Instantaneous values of the discrepancy between the observable and the restraint center");
213 51 : keys.addOutputComponent("_coupling","default","Instantaneous values of Lagrangian multipliers. They are also written by default in a separate output file.");
214 51 : keys.use("RESTART");
215 51 : }
216 200 : MaxEnt::~MaxEnt() {
217 50 : lagmultOfile_.close();
218 150 : }
219 50 : MaxEnt::MaxEnt(const ActionOptions&ao):
220 : PLUMED_BIAS_INIT(ao),
221 50 : at(getNumberOfArguments()),
222 50 : kappa(getNumberOfArguments(),0.0),
223 50 : lambda(getNumberOfArguments(),0.0),
224 50 : avgx(getNumberOfArguments(),0.0),
225 50 : oldlambda(getNumberOfArguments(),0.0),
226 100 : tau(getNumberOfArguments(),getTimeStep()),
227 50 : avglambda(getNumberOfArguments(),0.0),
228 50 : avglambda_restart(getNumberOfArguments(),0.0),
229 50 : expected_eps(getNumberOfArguments(),0.0),
230 : sigma(0.0),
231 : pace_(100),
232 : stride_(100),
233 : alpha(1.0),
234 : avg_counter(0.0),
235 : isFirstStep(true),
236 : reweight(false),
237 : no_broadcast(false),
238 : printFirstStep(true),
239 550 : done_average(getNumberOfArguments(),false)
240 : {
241 50 : if(comm.Get_rank()==0) nrep=multi_sim_comm.Get_size();
242 50 : if(comm.Get_rank()==0) myrep=multi_sim_comm.Get_rank();
243 50 : comm.Bcast(nrep,0);
244 50 : comm.Bcast(myrep,0);
245 50 : parseFlag("NO_BROADCAST",no_broadcast);
246 : //if(no_broadcast){
247 : //for(int irep=0;irep<nrep;irep++){
248 : // if(irep!=myrep)
249 : // apply_weights[irep]=0.0;}
250 : //}
251 50 : avgstep=1.0;
252 50 : tstart=-1.0;
253 50 : tend=-1.0;
254 50 : totalWork=0.0;
255 50 : learn_replica=0;
256 :
257 50 : parse("LEARN_REPLICA",learn_replica);
258 50 : parseVector("APPLY_WEIGHTS",apply_weights);
259 50 : if(apply_weights.size()==0) apply_weights.resize(nrep,1.0);
260 50 : parseVector("KAPPA",kappa);
261 50 : parseVector("AT",at);
262 50 : parseVector("TAU",tau);
263 50 : parse("TYPE",type);
264 50 : error_type="GAUSSIAN";
265 50 : parse("ERROR_TYPE",error_type);
266 50 : parse("ALPHA",alpha);
267 50 : parse("SIGMA",sigma);
268 50 : parse("TSTART",tstart);
269 50 : if(tstart <0 && tstart != -1.0) error("TSTART should be a positive number");
270 50 : parse("TEND",tend);
271 50 : if(tend<0 && tend != -1.0) error("TSTART should be a positive number");
272 50 : if(tend<tstart) error("TEND should be >= TSTART");
273 50 : lagmultfname=getLabel()+".LAGMULT";
274 50 : parse("FILE",lagmultfname);
275 50 : parse("FMT",fmt);
276 50 : parse("PACE",pace_);
277 50 : if(pace_<=0 ) error("frequency for lagrangian multipliers update (PACE) is nonsensical");
278 50 : stride_=pace_; //if no STRIDE is passed, then Lagrangian multipliers willbe printed at each update
279 50 : parse("PRINT_STRIDE",stride_);
280 50 : if(stride_<=0 ) error("frequency for Lagrangian multipliers printing (STRIDE) is nonsensical");
281 50 : simtemp=0.;
282 50 : parse("TEMP",simtemp);
283 50 : if(simtemp>0) simtemp*=plumed.getAtoms().getKBoltzmann();
284 0 : else simtemp=plumed.getAtoms().getKbT();
285 50 : parseFlag("REWEIGHT",reweight);
286 50 : if(simtemp<=0 && reweight) error("Set the temperature (TEMP) if you want to do reweighting.");
287 :
288 50 : checkRead();
289 :
290 50 : log.printf(" at");
291 50 : for(unsigned i=0; i<at.size(); i++) log.printf(" %f",at[i]);
292 50 : log.printf("\n");
293 50 : log.printf(" with initial learning rate for optimization of");
294 50 : for(unsigned i=0; i<kappa.size(); i++) log.printf(" %f",kappa[i]);
295 50 : log.printf("\n");
296 50 : log.printf("Dumping times for the learning rates are (ps): ");
297 50 : for(unsigned i=0; i<tau.size(); i++) log.printf(" %f",tau[i]);
298 50 : log.printf("\n");
299 50 : log.printf("Lagrangian multipliers are updated every %ld steps (PACE)\n",pace_);
300 50 : log.printf("Lagrangian multipliers output file %s\n",lagmultfname.c_str());
301 50 : log.printf("Lagrangian multipliers are written every %ld steps (PRINT_STRIDE)\n",stride_);
302 50 : if(fmt.length()>0)
303 50 : log.printf("The format for real number in Lagrangian multipliers file is %s\n",fmt.c_str());
304 50 : if(tstart >-1.0 && tend>-1.0)
305 14 : log.printf("Lagrangian multipliers are averaged from %lf ps to %lf ps\n",tstart,tend);
306 50 : if(no_broadcast)
307 0 : log.printf("Using NO_BROADCAST options. Lagrangian multipliers will not be comunicated to other replicas.\n");
308 : //for(int irep=0;irep<nrep;irep++){
309 : // if(apply_weights[irep]!=0)
310 : // log.printf("%d",irep);
311 : // }
312 50 : addComponent("force2"); componentIsNotPeriodic("force2");
313 50 : addComponent("work"); componentIsNotPeriodic("work");
314 50 : valueForce2=getPntrToComponent("force2");
315 50 : valueWork=getPntrToComponent("work");
316 :
317 50 : std::string comp;
318 532 : for(unsigned i=0; i< getNumberOfArguments() ; i++) {
319 482 : comp=getPntrToArgument(i)->getName()+"_coupling";
320 482 : addComponent(comp); componentIsNotPeriodic(comp);
321 482 : comp=getPntrToArgument(i)->getName()+"_work";
322 482 : addComponent(comp); componentIsNotPeriodic(comp);
323 482 : work.push_back(0.); // initialize the work value
324 482 : comp=getPntrToArgument(i)->getName()+"_error";
325 482 : addComponent(comp); componentIsNotPeriodic(comp);
326 : }
327 100 : string fname;
328 50 : fname=lagmultfname;
329 50 : ifile.link(*this);
330 50 : if(ifile.FileExist(fname)) {
331 37 : ifile.open(fname);
332 37 : if(getRestart()) {
333 37 : log.printf(" Restarting from: %s\n",fname.c_str());
334 37 : ReadLagrangians(ifile);
335 37 : printFirstStep=false;
336 : }
337 37 : ifile.reset(false);
338 37 : ifile.close();
339 : }
340 :
341 50 : lagmultOfile_.link(*this);
342 50 : lagmultOfile_.open(fname);
343 100 : if(fmt.length()>0) {fmt=" "+fmt; lagmultOfile_.fmtField(fmt);}
344 50 : }
345 : ////MEMBER FUNCTIONS
346 37 : void MaxEnt::ReadLagrangians(IFile &ifile)
347 : {
348 : double dummy;
349 481 : while(ifile.scanField("time",dummy)) {
350 4708 : for(unsigned j=0; j<getNumberOfArguments(); ++j) {
351 4301 : ifile.scanField(getPntrToArgument(j)->getName()+"_coupling",lambda[j]);
352 4301 : if(dummy>=tstart && dummy <=tend)
353 42 : avglambda[j]+=lambda[j];
354 4301 : if(dummy>=tend) {
355 4231 : avglambda[j]=lambda[j];
356 4231 : done_average[j]=true;
357 : }
358 : }
359 407 : if(dummy>=tstart && dummy <=tend)
360 6 : avg_counter++;
361 407 : ifile.scanField();
362 : }
363 37 : }
364 550 : void MaxEnt::WriteLagrangians(vector<double> &lagmult,OFile &file) {
365 550 : if(printFirstStep) {
366 143 : unsigned ncv=getNumberOfArguments();
367 143 : file.printField("time",getTimeStep()*getStep());
368 1144 : for(unsigned i=0; i<ncv; ++i)
369 1001 : file.printField(getPntrToArgument(i)->getName()+"_coupling",lagmult[i]);
370 143 : file.printField();
371 : } else {
372 407 : if(!isFirstStep) {
373 370 : unsigned ncv=getNumberOfArguments();
374 370 : file.printField("time",getTimeStep()*getStep());
375 4280 : for(unsigned i=0; i<ncv; ++i)
376 3910 : file.printField(getPntrToArgument(i)->getName()+"_coupling",lagmult[i]);
377 370 : file.printField();
378 : }
379 : }
380 550 : }
381 5302 : double MaxEnt::compute_error(string &err_type,double &l) {
382 5302 : double sigma2=pow(sigma,2.0);
383 5302 : double l2=convert_lambda(type,l);
384 5302 : double return_error=0;
385 5302 : if(err_type=="GAUSSIAN" && sigma!=0.0)
386 0 : return_error=-l2*sigma2;
387 : else {
388 5302 : if(err_type=="LAPLACE" && sigma!=0) {
389 5302 : return_error=-l2*sigma2/(1.0-l2*l2*sigma2/(alpha+1));
390 : }
391 : }
392 5302 : return return_error;
393 : }
394 119118 : double MaxEnt::convert_lambda(string &type,double lold) {
395 119118 : double return_lambda=0;
396 119118 : if(type=="EQUAL")
397 113816 : return_lambda=lold;
398 : else {
399 5302 : if(type=="INEQUAL>") {
400 0 : if(lold>0.0)
401 0 : return_lambda=0.0;
402 : else
403 0 : return_lambda=lold;
404 : }
405 : else {
406 5302 : if(type=="INEQUAL<") {
407 0 : if(lold<0.0)
408 0 : return_lambda=0.0;
409 : else
410 0 : return_lambda=lold;
411 : }
412 : }
413 : }
414 119118 : return return_lambda;
415 : }
416 5302 : void MaxEnt::check_lambda_boundaries(string &err_type,double &l) {
417 5302 : if(err_type=="LAPLACE" && sigma !=0 ) {
418 5302 : double l2=convert_lambda(err_type,l);
419 5302 : if(l2 <-(sqrt(alpha+1)/sigma-0.01)) {
420 0 : l=-(sqrt(alpha+1)/sigma-0.01);
421 0 : log.printf("Lambda exceeded the allowed range\n");
422 : }
423 5302 : if(l2>(sqrt(alpha+1)/sigma-0.01)) {
424 0 : l=sqrt(alpha+1)/sigma-0.01;
425 0 : log.printf("Lambda exceeded the allowed range\n");
426 : }
427 : }
428 5302 : }
429 :
430 550 : void MaxEnt::update_lambda() {
431 :
432 550 : double totalWork_=0.0;
433 550 : const double time=getTime();
434 550 : const double step=getStep();
435 550 : double KbT=simtemp;
436 : double learning_rate;
437 550 : if(reweight)
438 396 : BetaReweightBias=plumed.getBias()/KbT;
439 : else
440 154 : BetaReweightBias=0.0;
441 :
442 5852 : for(unsigned i=0; i<getNumberOfArguments(); ++i) {
443 5302 : const double k=kappa[i];
444 5302 : double cv=(getArgument(i)+compute_error(error_type,lambda[i])-at[i]);
445 5302 : if(reweight)
446 4224 : learning_rate=1.0*k/(1+step/tau[i]);
447 : else
448 1078 : learning_rate=1.0*k/(1+time/tau[i]);
449 5302 : lambda[i]+=learning_rate*cv*exp(-BetaReweightBias); //update Lagrangian multipliers and reweight them if REWEIGHT is set
450 5302 : check_lambda_boundaries(error_type,lambda[i]); //check that Lagrangians multipliers not exceed the allowed range
451 5302 : if(time>=tstart && time <=tend && !done_average[i]) {
452 546 : avglambda[i]+=convert_lambda(type,lambda[i]); //compute the average of Lagrangian multipliers over the required time window
453 : }
454 5302 : if(time>=tend && tend >=0) { //setting tend<0 will disable this feature
455 98 : if(!done_average[i]) {
456 91 : avglambda[i]=avglambda[i]/avg_counter;
457 91 : done_average[i]=true;
458 91 : lambda[i]=avglambda[i];
459 : }
460 : else
461 7 : lambda[i]=avglambda[i]; //keep Lagrangian multipliers fixed to the previously computed average.
462 : }
463 5302 : work[i]+=(convert_lambda(type,lambda[i])-oldlambda[i])*getArgument(i); //compute the work performed in updating lambda
464 5302 : totalWork_+=work[i];
465 5302 : totalWork=totalWork_;
466 5302 : oldlambda[i]=convert_lambda(type,lambda[i]);
467 : };
468 550 : if(time>=tstart && time <=tend)
469 84 : avg_counter++;
470 550 : }
471 :
472 5050 : void MaxEnt::calculate() {
473 5050 : double totf2=0.0;
474 5050 : double ene=0.0;
475 5050 : double KbT=simtemp;
476 53732 : for(unsigned i=0; i<getNumberOfArguments(); ++i) {
477 48682 : getPntrToComponent(getPntrToArgument(i)->getName()+"_error")->set(expected_eps[i]);
478 48682 : getPntrToComponent(getPntrToArgument(i)->getName()+"_work")->set(work[i]);
479 48682 : valueWork->set(totalWork);
480 48682 : getPntrToComponent(getPntrToArgument(i)->getName()+"_coupling")->set(lambda[i]);
481 48682 : const double f=-KbT*convert_lambda(type,lambda[i])*apply_weights[myrep];
482 48682 : totf2+=f*f;
483 48682 : ene+=KbT*convert_lambda(type,lambda[i])*getArgument(i)*apply_weights[myrep];
484 48682 : setOutputForce(i,f);
485 : }
486 5050 : setBias(ene);
487 5050 : valueForce2->set(totf2);
488 5050 : }
489 :
490 5050 : void MaxEnt::update() {
491 :
492 5050 : if(getStep()%stride_ == 0)
493 550 : WriteLagrangians(lambda,lagmultOfile_);
494 5050 : if(getStep()%pace_ == 0) {
495 550 : update_lambda();
496 550 : if(!no_broadcast) {
497 550 : if(comm.Get_rank()==0) //Comunicate Lagrangian multipliers from reference replica to higher ones
498 462 : multi_sim_comm.Bcast(lambda,learn_replica);
499 : }
500 550 : comm.Bcast(lambda,0);
501 : }
502 5050 : isFirstStep=false;
503 5050 : }
504 :
505 : }
506 :
507 4821 : }
|