From abf925874bcf144bc170308d925467386977e708 Mon Sep 17 00:00:00 2001 From: Gareth Tribello <gareth.tribello@gmail.com> Date: Sat, 8 Dec 2018 17:42:03 +0000 Subject: [PATCH] Fixed spellings in manual --- patches/amber14.config | 4 +- patches/gromacs-4.5.7.config | 2 +- src/adjmat/ActionWithInputMatrix.cpp | 2 +- src/adjmat/AlignedMatrixBase.cpp | 5 +- src/adjmat/ClusterDiameter.cpp | 4 +- src/adjmat/ClusterDistribution.cpp | 4 +- src/adjmat/ClusterProperties.cpp | 2 +- src/adjmat/ClusterSize.cpp | 2 +- src/adjmat/ClusterWithSurface.cpp | 4 +- src/adjmat/ContactAlignedMatrix.cpp | 8 +-- src/adjmat/ContactMatrix.cpp | 7 +- src/adjmat/DFSClustering.cpp | 2 +- src/adjmat/DumpGraph.cpp | 4 +- src/adjmat/HbondMatrix.cpp | 14 ++-- src/adjmat/OutputCluster.cpp | 2 +- src/adjmat/SMACMatrix.cpp | 4 +- src/adjmat/Sprint.cpp | 6 +- src/adjmat/TopologyMatrix.cpp | 7 +- src/analysis/AnalysisBase.cpp | 2 +- src/analysis/Average.cpp | 6 +- src/analysis/Committor.cpp | 4 +- src/analysis/Histogram.cpp | 16 ++--- src/analysis/ReadAnalysisFrames.cpp | 4 +- src/analysis/WhamHistogram.cpp | 2 +- src/bias/ABMD.cpp | 2 +- src/bias/BiasValue.cpp | 2 +- src/bias/ExtendedLagrangian.cpp | 6 +- src/bias/External.cpp | 6 +- src/bias/MaxEnt.cpp | 22 +++---- src/bias/MetaD.cpp | 66 +++++++++---------- src/bias/MovingRestraint.cpp | 24 +++---- src/bias/PBMetaD.cpp | 36 +++++----- src/bias/ReweightTemperature.cpp | 6 +- src/cltools/Completion.cpp | 2 +- src/cltools/Driver.cpp | 12 ++-- src/cltools/GenTemplate.cpp | 5 +- src/cltools/Manual.cpp | 7 +- src/cltools/PdbRenumber.cpp | 2 +- src/cltools/SimpleMD.cpp | 6 +- src/cltools/SumHills.cpp | 16 ++--- src/cltools/pesmd.cpp | 4 +- src/colvar/Angle.cpp | 2 +- src/colvar/Constant.cpp | 3 +- src/colvar/ContactMap.cpp | 4 +- src/colvar/Coordination.cpp | 6 +- src/colvar/CoordinationBase.cpp | 6 +- src/colvar/Dimer.cpp | 12 ++-- src/colvar/Dipole.cpp | 2 +- src/colvar/Distance.cpp | 2 +- src/colvar/EEFSolv.cpp | 6 +- src/colvar/ERMSD.cpp | 6 +- src/colvar/Fake.cpp | 3 +- src/colvar/Gyration.cpp | 2 +- src/colvar/MultiRMSD.cpp | 6 +- src/colvar/PCARMSD.cpp | 4 +- src/colvar/PathMSD.cpp | 6 +- src/colvar/PropertyMap.cpp | 10 +-- src/colvar/Puckering.cpp | 6 +- src/colvar/RMSD.cpp | 14 ++-- src/colvar/Torsion.cpp | 2 +- src/core/ActionSetup.cpp | 2 +- src/core/ActionWithArguments.cpp | 4 +- src/core/ActionWithValue.cpp | 6 +- src/crystallization/BondOrientation.cpp | 6 +- src/crystallization/CubicHarmonicBase.cpp | 2 +- src/crystallization/Gradient.cpp | 2 +- .../InterMolecularTorsions.cpp | 16 ++--- src/crystallization/MoleculePlane.cpp | 2 +- src/crystallization/OrientationSphere.cpp | 2 +- src/crystallization/PolymerAngles.cpp | 2 +- src/crystallization/Q3.cpp | 12 ++-- src/crystallization/Q4.cpp | 18 ++--- src/crystallization/Q6.cpp | 18 ++--- src/crystallization/SMAC.cpp | 2 +- src/crystallization/SimpleCubic.cpp | 2 +- src/crystallization/Steinhardt.cpp | 2 +- src/crystallization/Tetrahedral.cpp | 10 +-- src/crystallization/VectorMean.cpp | 2 +- src/crystallization/VectorMultiColvar.cpp | 4 +- src/crystallization/VectorSum.cpp | 2 +- .../ClassicalMultiDimensionalScaling.cpp | 10 +-- src/dimred/PCA.cpp | 16 ++--- src/dimred/ProjectNonLandmarkPoints.cpp | 6 +- src/dimred/SketchMap.cpp | 4 +- src/dimred/SketchMapBase.cpp | 2 +- src/dimred/SketchMapConjGrad.cpp | 4 +- src/dimred/SketchMapPointwise.cpp | 8 +-- src/dimred/SketchMapSmacof.cpp | 2 +- src/dimred/SmacoffMDS.cpp | 6 +- src/drr/DynamicReferenceRestraining.cpp | 12 ++-- src/eds/EDS.cpp | 6 +- src/function/Combine.cpp | 2 +- src/function/FuncPathMSD.cpp | 4 +- src/function/FuncSumHills.cpp | 16 ++--- src/function/LocalEnsemble.cpp | 4 +- src/function/Matheval.cpp | 2 +- src/function/Piecewise.cpp | 6 +- src/function/Stats.cpp | 2 +- src/function/Target.cpp | 15 ++--- src/generic/Debug.cpp | 2 +- src/generic/DumpAtoms.cpp | 4 +- src/generic/FitToTemplate.cpp | 14 ++-- src/generic/Flush.cpp | 2 +- src/generic/Group.cpp | 8 +-- src/generic/Plumed.cpp | 10 +-- src/generic/Read.cpp | 8 +-- src/generic/Time.cpp | 2 +- src/generic/WholeMolecules.cpp | 6 +- src/generic/WrapAround.cpp | 6 +- src/gridtools/ActionWithGrid.cpp | 2 +- src/gridtools/ActionWithInputGrid.cpp | 2 +- src/gridtools/ConvertToFES.cpp | 2 +- src/gridtools/DumpCube.cpp | 4 +- src/gridtools/DumpGrid.cpp | 6 +- src/gridtools/FindContour.cpp | 2 +- src/gridtools/FindContourSurface.cpp | 8 +-- src/gridtools/FindSphericalContour.cpp | 14 ++-- src/gridtools/FourierTransform.cpp | 2 +- src/gridtools/GridToXYZ.cpp | 2 +- src/gridtools/InterpolateGrid.cpp | 2 +- src/isdb/CS2Backbone.cpp | 18 ++--- src/isdb/Caliber.cpp | 12 ++-- src/isdb/EMMI.cpp | 2 +- src/isdb/Jcoupling.cpp | 2 +- src/isdb/Metainference.cpp | 14 ++-- src/isdb/MetainferenceBase.cpp | 6 +- src/isdb/NOE.cpp | 2 +- src/isdb/PRE.cpp | 10 +-- src/isdb/RDC.cpp | 22 +++---- src/isdb/Rescale.cpp | 18 ++--- src/isdb/SAXS.cpp | 22 +++---- src/isdb/Select.cpp | 2 +- src/isdb/Selector.cpp | 4 +- src/manyrestraints/UWalls.cpp | 4 +- src/mapping/AdaptivePath.cpp | 2 +- src/mapping/PCAVars.cpp | 8 +-- src/mapping/Path.cpp | 6 +- src/mapping/PathTools.cpp | 14 ++-- src/mapping/PropertyMap.cpp | 4 +- src/multicolvar/AlphaBeta.cpp | 8 +-- src/multicolvar/Angles.cpp | 6 +- src/multicolvar/Bridge.cpp | 2 +- src/multicolvar/CenterOfMultiColvar.cpp | 8 +-- src/multicolvar/CoordinationNumbers.cpp | 4 +- src/multicolvar/Density.cpp | 2 +- src/multicolvar/DihedralCorrelation.cpp | 10 +-- src/multicolvar/DistanceFromContour.cpp | 10 +-- src/multicolvar/Distances.cpp | 10 +-- src/multicolvar/FilterBetween.cpp | 20 +++--- src/multicolvar/FilterLessThan.cpp | 14 ++-- src/multicolvar/FilterMoreThan.cpp | 12 ++-- src/multicolvar/InPlaneDistances.cpp | 2 +- src/multicolvar/LocalAverage.cpp | 8 +-- src/multicolvar/MultiColvarBase.cpp | 6 +- src/multicolvar/MultiColvarCombine.cpp | 2 +- src/multicolvar/MultiColvarDensity.cpp | 16 ++--- src/multicolvar/NumberOfLinks.cpp | 4 +- src/multicolvar/Torsions.cpp | 6 +- src/multicolvar/VolumeAround.cpp | 2 +- src/multicolvar/VolumeBetweenContours.cpp | 6 +- src/multicolvar/VolumeCavity.cpp | 8 +-- src/multicolvar/VolumeInCylinder.cpp | 4 +- src/multicolvar/VolumeInSphere.cpp | 4 +- src/multicolvar/VolumeTetrapore.cpp | 4 +- src/multicolvar/XAngle.cpp | 4 +- src/multicolvar/XDistances.cpp | 10 +-- src/multicolvar/XYDistances.cpp | 14 ++-- src/multicolvar/XYTorsion.cpp | 8 +-- src/pamm/HBPammHydrogens.cpp | 2 +- src/pamm/HBPammMatrix.cpp | 2 +- src/pamm/PAMM.cpp | 26 ++++---- src/piv/PIV.cpp | 22 +++---- src/secondarystructure/AlphaRMSD.cpp | 8 +-- src/secondarystructure/AntibetaRMSD.cpp | 8 +-- src/secondarystructure/ParabetaRMSD.cpp | 6 +- .../SecondaryStructureRMSD.cpp | 8 +-- src/setup/Load.cpp | 2 +- src/setup/MolInfo.cpp | 6 +- src/setup/Units.cpp | 16 ++--- src/tools/HistogramBead.cpp | 8 +-- src/tools/KernelFunctions.cpp | 8 +-- src/tools/Keywords.cpp | 4 +- src/tools/PDB.cpp | 4 +- src/tools/SwitchingFunction.cpp | 4 +- src/vatom/Center.cpp | 6 +- src/vatom/FixedAtom.cpp | 4 +- src/vatom/Ghost.cpp | 5 +- src/ves/CoeffsVector.cpp | 2 +- src/ves/MD_LinearExpansionPES.cpp | 24 +++---- src/ves/Opt_BachAveragedSGD.cpp | 26 ++++---- src/ves/Optimizer.cpp | 54 +++++++-------- src/ves/OutputBasisFunctions.cpp | 6 +- src/ves/OutputFesBias.cpp | 18 ++--- src/ves/OutputTargetDistribution.cpp | 8 +-- src/ves/TD_Chi.cpp | 10 +-- src/ves/TD_ChiSquared.cpp | 10 +-- src/ves/TD_Custom.cpp | 10 +-- src/ves/TD_Exponential.cpp | 4 +- src/ves/TD_ExponentiallyModifiedGaussian.cpp | 4 +- src/ves/TD_Gaussian.cpp | 16 ++--- src/ves/TD_GeneralizedExtremeValue.cpp | 2 +- src/ves/TD_GeneralizedNormal.cpp | 6 +- src/ves/TD_LinearCombination.cpp | 4 +- src/ves/TD_ProductCombination.cpp | 2 +- src/ves/TD_ProductDistribution.cpp | 10 +-- src/ves/TD_WellTempered.cpp | 6 +- src/ves/VesBias.cpp | 10 +-- src/ves/VesLinearExpansion.cpp | 20 +++--- src/vesselbase/ActionWithVessel.cpp | 4 +- src/vesselbase/Highest.cpp | 2 +- src/vesselbase/Histogram.cpp | 4 +- src/vesselbase/Lowest.cpp | 2 +- src/vesselbase/Mean.cpp | 2 +- src/vesselbase/Moments.cpp | 4 +- user-doc/Analysis.md | 40 +++++------ user-doc/CollectiveVariables.md | 36 +++++----- user-doc/Files.md | 4 +- user-doc/Functions.md | 2 +- user-doc/GettingStarted.md | 8 +-- user-doc/Group.md | 6 +- user-doc/Installation.md | 66 +++++++++---------- user-doc/Introduction.md | 8 +-- user-doc/Miscelaneous.md | 30 ++++----- user-doc/Modules.md | 6 +- user-doc/Performances.md | 42 ++++++------ user-doc/Regex.md | 4 +- user-doc/VES.md | 8 +-- user-doc/bibliography.bib | 11 ++++ user-doc/tutorials/others/isdb-1.txt | 10 +-- 229 files changed, 944 insertions(+), 936 deletions(-) diff --git a/patches/amber14.config b/patches/amber14.config index 1f859fdf6..f8539419a 100644 --- a/patches/amber14.config +++ b/patches/amber14.config @@ -17,10 +17,10 @@ add to the cntrl input namelist these two fields: plumed=1 , plumedfile='plumed.dat' -The first is switching plumed on, the second is specifying the name of the plumed +The first is switching PLUMED on, the second is specifying the name of the PLUMED input file. -This patch is compatible with the MPI version of sander and support +This patch is compatible with the MPI version of sander and supports multisander. However, replica exchange is not supported. Multisander can thus only be used for multiple walkers metadynamics or for ensemble restraints. diff --git a/patches/gromacs-4.5.7.config b/patches/gromacs-4.5.7.config index 1b27e03f0..09ed90375 100644 --- a/patches/gromacs-4.5.7.config +++ b/patches/gromacs-4.5.7.config @@ -23,7 +23,7 @@ cat << EOF PLUMED can be incorporated into gromacs using the standard patching procedure. Patching must be done in the gromacs source directory _after_ gromacs has been configured but _before_ gromacs is compiled. -Gromcas should be configured with ./configure (not cmake). +Gromacs should be configured with ./configure (not cmake). To enable PLUMED in a gromacs simulation one should use mdrun with an extra -plumed flag. The flag can be used to diff --git a/src/adjmat/ActionWithInputMatrix.cpp b/src/adjmat/ActionWithInputMatrix.cpp index cbb2e1831..70eab61ba 100644 --- a/src/adjmat/ActionWithInputMatrix.cpp +++ b/src/adjmat/ActionWithInputMatrix.cpp @@ -31,7 +31,7 @@ namespace adjmat { void ActionWithInputMatrix::registerKeywords( Keywords& keys ) { MultiColvarBase::registerKeywords( keys ); - keys.add("compulsory","MATRIX","the action that calcualtes the adjacency matrix vessel we would like to analyse"); + keys.add("compulsory","MATRIX","the action that calculates the adjacency matrix vessel we would like to analyze"); } diff --git a/src/adjmat/AlignedMatrixBase.cpp b/src/adjmat/AlignedMatrixBase.cpp index 73c5869de..4f523386b 100644 --- a/src/adjmat/AlignedMatrixBase.cpp +++ b/src/adjmat/AlignedMatrixBase.cpp @@ -41,9 +41,8 @@ void AlignedMatrixBase::registerKeywords( Keywords& keys ) { "have an orientation so your list will be a list of the labels of \\ref mcolv or \\ref multicolvarfunction " "as PLUMED calculates the orientations of molecules within these operations. Please note also that the majority " "of \\ref mcolv and \\ref multicolvarfunction do not calculate a molecular orientation."); - keys.add("numbered","SWITCH","This keyword is used if you want to employ an alternative to the continuous swiching function defined above. " - "The following provides information on the \\ref switchingfunction that are available. " - "When this keyword is present you no longer need the NN, MM, D_0 and R_0 keywords."); + keys.add("numbered","SWITCH","This keyword is used if you want to employ an alternative to the continuous switching function defined above. " + "The following provides information on the \\ref switchingfunction that are available."); } AlignedMatrixBase::AlignedMatrixBase( const ActionOptions& ao ): diff --git a/src/adjmat/ClusterDiameter.cpp b/src/adjmat/ClusterDiameter.cpp index 10be1fde5..3d2202930 100644 --- a/src/adjmat/ClusterDiameter.cpp +++ b/src/adjmat/ClusterDiameter.cpp @@ -28,9 +28,9 @@ Print out the diameter of one of the connected components As discussed in the section of the manual on \ref contactmatrix a useful tool for developing complex collective variables is the notion of the so called adjacency matrix. An adjacency matrix is an \f$N \times N\f$ matrix in which the \f$i\f$th, \f$j\f$th element tells you whether -or not the \f$i\f$th and \f$j\f$th atoms/molecules from a set of \f$N\f$ atoms/molecules are adjacent or not. When analysing these matrix +or not the \f$i\f$th and \f$j\f$th atoms/molecules from a set of \f$N\f$ atoms/molecules are adjacent or not. When analyzing these matrix we can treat them as a graph and find connected components using some clustering algorithm. This action is used in tandem with this form of analysis -to output the largest of the distances between the paris of atoms that are connected together in a particular connected component. It is important to +to output the largest of the distances between the pairs of atoms that are connected together in a particular connected component. It is important to note that the quantity that is output by this action is not differentiable. As such it cannot be used as a collective variable in a biased simulation. \par Examples diff --git a/src/adjmat/ClusterDistribution.cpp b/src/adjmat/ClusterDistribution.cpp index 5f1c84068..b020fbf81 100644 --- a/src/adjmat/ClusterDistribution.cpp +++ b/src/adjmat/ClusterDistribution.cpp @@ -40,8 +40,8 @@ that atoms with a high value for the local q6 Steinhardt parameter have with oth value for the local q6 Steinhardt parameter is then computed. A contact matrix is then computed that measures whether atoms atoms \f$i\f$ and \f$j\f$ have a high value for this coordination number and if they are within 3.6 nm of each other. The connected components of this matrix are then found using a depth first clustering -algorithm on the corresponding graph. The number of componets in this graph that contain more than 27 atoms is then computed. -As discussed in \cite tribello-clustering this input was used to analyse the formation of a polycrystal of GeTe from amorphous +algorithm on the corresponding graph. The number of components in this graph that contain more than 27 atoms is then computed. +As discussed in \cite tribello-clustering this input was used to analyze the formation of a polycrystal of GeTe from amorphous GeTe. \plumedfile diff --git a/src/adjmat/ClusterProperties.cpp b/src/adjmat/ClusterProperties.cpp index d708ae2e8..0d99c73ce 100644 --- a/src/adjmat/ClusterProperties.cpp +++ b/src/adjmat/ClusterProperties.cpp @@ -30,7 +30,7 @@ Calculate properties of the distribution of some quantities that are part of a c This collective variable was developed for looking at nucleation phenomena, where you are interested in using studying the behavior of atoms in small aggregates or nuclei. In these sorts of problems you might be interested in the degree the atoms in a nucleus have adopted their crystalline -structure or (in the case of heterogenous nucleation of a solute from a solvent) you might be +structure or (in the case of heterogeneous nucleation of a solute from a solvent) you might be interested in how many atoms are present in the largest cluster \cite tribello-clustering. \par Examples diff --git a/src/adjmat/ClusterSize.cpp b/src/adjmat/ClusterSize.cpp index af1eed8eb..cf2ced065 100644 --- a/src/adjmat/ClusterSize.cpp +++ b/src/adjmat/ClusterSize.cpp @@ -28,7 +28,7 @@ Gives the number of atoms in the connected component As discussed in the section of the manual on \ref contactmatrix a useful tool for developing complex collective variables is the notion of the so called adjacency matrix. An adjacency matrix is an \f$N \times N\f$ matrix in which the \f$i\f$th, \f$j\f$th element tells you whether -or not the \f$i\f$th and \f$j\f$th atoms/molecules from a set of \f$N\f$ atoms/molecules are adjacent or not. When analysing these matrix +or not the \f$i\f$th and \f$j\f$th atoms/molecules from a set of \f$N\f$ atoms/molecules are adjacent or not. When analyzing these matrix we can treat them as a graph and find connected components using some clustering algorithm. This action is used in tandem with this form of analysis to output the number of atoms that are connected together in a particular connected component. It is important to note that the quantity that is output by this action is not differentiable. As such it cannot be used as a collective variable in a biased simulation. diff --git a/src/adjmat/ClusterWithSurface.cpp b/src/adjmat/ClusterWithSurface.cpp index a3e010e5a..2c80532ce 100644 --- a/src/adjmat/ClusterWithSurface.cpp +++ b/src/adjmat/ClusterWithSurface.cpp @@ -30,11 +30,11 @@ Take a connected component that was found using a clustering algorithm and creat As discussed in the section of the manual on \ref contactmatrix a useful tool for developing complex collective variables is the notion of the so called adjacency matrix. An adjacency matrix is an \f$N \times N\f$ matrix in which the \f$i\f$th, \f$j\f$th element tells you whether -or not the \f$i\f$th and \f$j\f$th atoms/molecules from a set of \f$N\f$ atoms/molecules are adjacent or not. When analysing these matrix +or not the \f$i\f$th and \f$j\f$th atoms/molecules from a set of \f$N\f$ atoms/molecules are adjacent or not. When analyzing these matrix we can treat them as a graph and find connected components using some clustering algorithm. This action is used in tandem with this form of analysis and takes one of the connected components that was found during this analysis and creates a new cluster that includes all the atoms within the connected component that was found together that were within a certain cutoff distance of the atoms in the connected component. This form of analysis -has been used sucessfully in the forward flux sampling simulations described in this paper \cite gab-ice-kaolinite +has been used successfully in the forward flux sampling simulations described in this paper \cite gab-ice-kaolinite \par Examples diff --git a/src/adjmat/ContactAlignedMatrix.cpp b/src/adjmat/ContactAlignedMatrix.cpp index b23de0af2..499abb094 100644 --- a/src/adjmat/ContactAlignedMatrix.cpp +++ b/src/adjmat/ContactAlignedMatrix.cpp @@ -30,7 +30,7 @@ Adjacency matrix in which two molecule are adjacent if they are within a certain As discussed in the section of the manual on \ref contactmatrix a useful tool for developing complex collective variables is the notion of the so called adjacency matrix. An adjacency matrix is an \f$N \times N\f$ matrix in which the \f$i\f$th, \f$j\f$th element tells you whether or not the \f$i\f$th and \f$j\f$th atoms/molecules from a set of \f$N\f$ atoms/molecules are adjacent or not. These matrices can then be further -analysed using a number of other algorithms as is detailed in \cite tribello-clustering. +analyzed using a number of other algorithms as is detailed in \cite tribello-clustering. For this action the elements of the adjacency matrix are calculated using: @@ -50,7 +50,7 @@ objects \f$i\f$ and \f$j\f$. The example input below is necessarily but gives you an idea of what can be achieved using this action. The orientations and positions of four molecules are defined using the \ref MOLECULES action as the position of the -centeres of mass of the two atoms specified and the direction of the vector connecting the two atoms that were specified. +centers of mass of the two atoms specified and the direction of the vector connecting the two atoms that were specified. A \f$4 \times 4\f$ matrix is then computed using the formula above. The \f$ij\f$-element of this matrix tells us whether or not atoms \f$i\f$ and \f$j\f$ are within 0.1 nm of each other and whether or not the dot-product of their orientation vectors is greater than 0.5. The sum of the rows of this matrix are then computed. The sums of the \f$i\f$th row of this matrix tells us how @@ -60,8 +60,8 @@ molecule \f$i\f$. We thus calculate the number of these "coordination numbers" \plumedfile m1: MOLECULES MOL1=1,2 MOL2=3,4 MOL3=5,6 MOL4=7,8 mat: ALIGNED_MATRIX ATOMS=m1 SWITCH={RATIONAL R_0=0.1} ORIENTATION_SWITCH={RATIONAL R_0=0.1 D_MAX=0.5} -rr: ROWSUMS MATRIX=mat MORE_THAN={RATIONAL D_0=1.0 R_0=0.1} -PRINT ARG=rr.* FILE=colvar +row: ROWSUMS MATRIX=mat MORE_THAN={RATIONAL D_0=1.0 R_0=0.1} +PRINT ARG=row.* FILE=colvar \endplumedfile */ diff --git a/src/adjmat/ContactMatrix.cpp b/src/adjmat/ContactMatrix.cpp index 0fa2aecc5..d9a6e45c6 100644 --- a/src/adjmat/ContactMatrix.cpp +++ b/src/adjmat/ContactMatrix.cpp @@ -32,7 +32,7 @@ Adjacency matrix in which two atoms are adjacent if they are within a certain cu As discussed in the section of the manual on \ref contactmatrix a useful tool for developing complex collective variables is the notion of the so called adjacency matrix. An adjacency matrix is an \f$N \times N\f$ matrix in which the \f$i\f$th, \f$j\f$th element tells you whether or not the \f$i\f$th and \f$j\f$th atoms/molecules from a set of \f$N\f$ atoms/molecules are adjacent or not. These matrices can then be further -analysed using a number of other algorithms as is detailed in \cite tribello-clustering. +analyzed using a number of other algorithms as is detailed in \cite tribello-clustering. For this action the elements of the contact matrix are calculated using: @@ -89,9 +89,8 @@ void ContactMatrix::registerKeywords( Keywords& keys ) { "the atomic positions you can use a \\ref DENSITY command to specify a group of atoms. Specifying your atomic positions using labels of " "other \\ref mcolv or \\ref multicolvarfunction commands is useful, however, as you can then exploit a much wider " "variety of functions of the contact matrix as described in \\ref contactmatrix"); - keys.add("numbered","SWITCH","This keyword is used if you want to employ an alternative to the continuous swiching function defined above. " - "The following provides information on the \\ref switchingfunction that are available. " - "When this keyword is present you no longer need the NN, MM, D_0 and R_0 keywords."); + keys.add("numbered","SWITCH","This keyword is used if you want to employ an alternative to the continuous switching function defined above. " + "The following provides information on the \\ref switchingfunction that are available. "); // I added these keywords so I can test the results I get for column and row sums against output from COORDINATIONNUMBERS /// These should never be used in production as I think they will be much slower than COORDINATIONNUMBERS keys.add("hidden","ATOMSA",""); keys.add("hidden","ATOMSB",""); diff --git a/src/adjmat/DFSClustering.cpp b/src/adjmat/DFSClustering.cpp index 82017a971..3a247b5d5 100644 --- a/src/adjmat/DFSClustering.cpp +++ b/src/adjmat/DFSClustering.cpp @@ -36,7 +36,7 @@ Find the connected components of the matrix using the depth first search cluster As discussed in the section of the manual on \ref contactmatrix a useful tool for developing complex collective variables is the notion of the so called adjacency matrix. An adjacency matrix is an \f$N \times N\f$ matrix in which the \f$i\f$th, \f$j\f$th element tells you whether or not the \f$i\f$th and \f$j\f$th atoms/molecules from a set of \f$N\f$ atoms/molecules are adjacent or not. As detailed in \cite tribello-clustering -these matrices provide a representation of a graph and can thus can be analysed using tools from graph theory. This particular action performs +these matrices provide a representation of a graph and can thus can be analyzed using tools from graph theory. This particular action performs a depth first search clustering to find the connected components of this graph. You can read more about depth first search here: https://en.wikipedia.org/wiki/Depth-first_search diff --git a/src/adjmat/DumpGraph.cpp b/src/adjmat/DumpGraph.cpp index 3fa483188..56f814b56 100644 --- a/src/adjmat/DumpGraph.cpp +++ b/src/adjmat/DumpGraph.cpp @@ -32,7 +32,7 @@ namespace adjmat { //+PLUMEDOC CONCOMP DUMPGRAPH /* -Write out the connnectivity of the nodes in the graph in dot format. +Write out the connectivity of the nodes in the graph in dot format. \par Examples @@ -64,7 +64,7 @@ PLUMED_REGISTER_ACTION(DumpGraph,"DUMPGRAPH") void DumpGraph::registerKeywords( Keywords& keys ) { Action::registerKeywords( keys ); ActionPilot::registerKeywords( keys ); - keys.add("compulsory","MATRIX","the action that calcualtes the adjacency matrix vessel we would like to analyse"); + keys.add("compulsory","MATRIX","the action that calculates the adjacency matrix vessel we would like to analyze"); keys.add("compulsory","STRIDE","1","the frequency with which you would like to output the graph"); keys.add("compulsory","FILE","the name of the file on which to output the data"); keys.add("compulsory","MAXCONNECT","0","maximum number of connections that can be formed by any given node in the graph. " diff --git a/src/adjmat/HbondMatrix.cpp b/src/adjmat/HbondMatrix.cpp index 3bdc06e71..d5e4908b8 100644 --- a/src/adjmat/HbondMatrix.cpp +++ b/src/adjmat/HbondMatrix.cpp @@ -34,7 +34,7 @@ Adjacency matrix in which two atoms are adjacent if there is a hydrogen bond bet As discussed in the section of the manual on \ref contactmatrix a useful tool for developing complex collective variables is the notion of the so called adjacency matrix. An adjacency matrix is an \f$N \times N\f$ matrix in which the \f$i\f$th, \f$j\f$th element tells you whether or not the \f$i\f$th and \f$j\f$th atoms/molecules from a set of \f$N\f$ atoms/molecules are adjacent or not. These matrices can then be further -analysed using a number of other algorithms as is detailed in \cite tribello-clustering. +analyzed using a number of other algorithms as is detailed in \cite tribello-clustering. For this action the elements of the adjacency matrix are calculated using: @@ -46,12 +46,12 @@ This expression was derived by thinking about how to detect if there is a hydrog if the hydrogen bond is present atoms \f$i\f$ and \f$j\f$ should be within a certain cutoff distance. In addition, there should be a hydrogen within a certain cutoff distance of atom \f$i\f$ and this hydrogen should lie on or close to the vector connecting atoms \f$i\f$ and \f$j\f$. As such \f$\sigma_{oo}( |\mathbf{r}_{ij}| )\f$ is a \ref switchingfunction that acts on the modulus of the vector connecting atom \f$i\f$ to atom -\f$j\f$. The sum over \f$k\f$ then runs over all the hydrogen atoms that are specified using using HYDROGEN keyword. \f$\sigma_{oh}( |\mathbf{r}_{ik}| )\f$ -is a \ref switchingfunction that acts on the modulus of the vector connecting atom \f$i\f$ to atom \f$k\f$ and \f$\sigma_{\theta}( \theta_{kij} )\f$ +\f$j\f$. The sum over \f$k\f$ then runs over all the hydrogen atoms that are specified using using HYDROGEN keyword. \f$\sigma_{oh}(|\mathbf{r}_{ik}|)\f$ +is a \ref switchingfunction that acts on the modulus of the vector connecting atom \f$i\f$ to atom \f$k\f$ and \f$\sigma_{\theta}(\theta_{kij})\f$ is a \ref switchingfunction that acts on the angle between the vector connecting atoms \f$i\f$ and \f$j\f$ and the vector connecting atoms \f$i\f$ and \f$k\f$. -It is important to note that hydrogen bonds, unlike regular bonds, are asymetric. In other words, the hydrogen atom does not sit at the +It is important to note that hydrogen bonds, unlike regular bonds, are asymmetric. In other words, the hydrogen atom does not sit at the mid point between the two other atoms in this three-center bond. As a result of this adjacency matrices calculated using \ref HBOND_MATRIX are not symmetric like those calculated by \ref CONTACT_MATRIX. One consequence of this fact is that the quantities found by performing \ref ROWSUMS and \ref COLUMNSUMS on a square \ref HBOND_MATRIX are not the same as they would be if you performed \ref ROWSUMS and @@ -59,8 +59,8 @@ symmetric like those calculated by \ref CONTACT_MATRIX. One consequence of this \par Examples -The following input can be used to analyse the number of hydrogen bonds each of the oxygen atoms in a box of water participates in. Each -water molecule can participate in a hydrogen bond in one of two ways. It can either donate its hydrogens to the neighbouring oxygen or +The following input can be used to analyze the number of hydrogen bonds each of the oxygen atoms in a box of water participates in. Each +water molecule can participate in a hydrogen bond in one of two ways. It can either donate one of its hydrogen atom to the neighboring oxygen or it can accept a bond between the hydrogen of a neighboring water molecule and its own oxygen. The input below allows you to output information on the number of hydrogen bonds each of the water molecules donates and accepts. This information is output in two xyz files which each contain five columns of data. The first four of these columns are a label for the atom and the x, y and z position of the oxygen. The last column is then @@ -109,7 +109,7 @@ PLUMED_REGISTER_ACTION(HBondMatrix,"HBOND_MATRIX") void HBondMatrix::registerKeywords( Keywords& keys ) { AdjacencyMatrixBase::registerKeywords( keys ); keys.add("atoms","ATOMS","The list of atoms which can be part of a hydrogen bond. When this command is used the set of atoms that can donate a " - "hydrogen bond is assumed to be the same as the set of atoms that can form hydrogen bonds. The atoms involved must be specified" + "hydrogen bond is assumed to be the same as the set of atoms that can form hydrogen bonds. The atoms involved must be specified " "as a list of labels of \\ref mcolv or labels of a \\ref multicolvarfunction actions. If you would just like to use " "the atomic positions you can use a \\ref DENSITY command to specify a group of atoms. Specifying your atomic positions using labels of " "other \\ref mcolv or \\ref multicolvarfunction commands is useful, however, as you can then exploit a much wider " diff --git a/src/adjmat/OutputCluster.cpp b/src/adjmat/OutputCluster.cpp index 66c58da60..f44386f55 100644 --- a/src/adjmat/OutputCluster.cpp +++ b/src/adjmat/OutputCluster.cpp @@ -96,7 +96,7 @@ void OutputCluster::registerKeywords( Keywords& keys ) { keys.add("compulsory","STRIDE","1","the frequency with which you would like to output the atoms in the cluster"); keys.add("compulsory","FILE","the name of the file on which to output the details of the cluster"); keys.add("compulsory","MAXDEPTH","6","maximum depth for searches over paths to reconstruct clusters for PBC"); - keys.add("compulsory","MAXGOES","200","number of times to run searches to reconstuct clusters"); + keys.add("compulsory","MAXGOES","200","number of times to run searches to reconstruct clusters"); keys.addFlag("MAKE_WHOLE",false,"reconstruct the clusters and remove all periodic boundary conditions."); } diff --git a/src/adjmat/SMACMatrix.cpp b/src/adjmat/SMACMatrix.cpp index 12417a11d..1bffeee71 100644 --- a/src/adjmat/SMACMatrix.cpp +++ b/src/adjmat/SMACMatrix.cpp @@ -38,7 +38,7 @@ A_{ij} = \sigma(r_{ij}) \sum_n K_n(\theta_{ij}) In this expression \f$r_{ij}\f$ is the distance between molecule \f$i\f$ and molecule \f$j\f$ and \f$\sigma(r_{ij}\f$ is a \ref switchingfunction that acts on this distance. The $K_n functions are \ref kernelfunctions that take the torsion angle, \f$\theta_{ij}\f$, between the internal orientation vectors for molecules \f$i\f$ and \f$j\f$ as input. These kernel functions should be set so that they are -equal to one when the relative orientation of the moleclues are as they are in the solid and equal to zero otherwise. +equal to one when the relative orientation of the molecules are as they are in the solid and equal to zero otherwise. As the above matrix element is a product of functions it is only equal to one when the centers of mass of molecules \f$i\f$ and\f$j\f$ are with a certain distance of each other and when the molecules are aligned in some desirable way. @@ -47,7 +47,7 @@ are with a certain distance of each other and when the molecules are aligned in In the following example an adjacency matrix is constructed in which the \f$(i,j)\f$ element is equal to one if molecules \f$i\f$ and \f$j\f$ are within 6 angstroms of each other and if the torsional angle between the orientations of these molecules is close to 0 or \f$\pi\f$. The various connected components of this matrix are determined using the -\ref DFSCLUSTERING algorithm and then the size of the largest cluster of connectes molecules is output to a colvar file +\ref DFSCLUSTERING algorithm and then the size of the largest cluster of connects molecules is output to a colvar file \plumedfile UNITS LENGTH=A diff --git a/src/adjmat/Sprint.cpp b/src/adjmat/Sprint.cpp index fe3eb92f3..5cb21c7b8 100644 --- a/src/adjmat/Sprint.cpp +++ b/src/adjmat/Sprint.cpp @@ -53,7 +53,7 @@ SPRINT MATRIX=mat LABEL=ss PRINT ARG=ss.* FILE=colvar \endplumedfile -This example input calculates the 14 SPRINT coordinates foa a molecule composed of 7 hydrogen and +This example input calculates the 14 SPRINT coordinates for a molecule composed of 7 hydrogen and 7 carbon atoms. Once again two atoms are adjacent if they are within a cutoff: \plumedfile @@ -111,8 +111,8 @@ void Sprint::registerKeywords( Keywords& keys ) { ActionWithInputMatrix::registerKeywords( keys ); componentsAreNotOptional(keys); keys.addOutputComponent("coord","default","all \\f$n\\f$ sprint coordinates are calculated and then stored in increasing order. " - "the smallest sprint coordinate will be labelled <em>label</em>.coord-1, " - "the second smallest will be labelleled <em>label</em>.coord-1 and so on"); + "the smallest sprint coordinate will be labeled <em>label</em>.coord-1, " + "the second smallest will be labelled <em>label</em>.coord-1 and so on"); } Sprint::Sprint(const ActionOptions&ao): diff --git a/src/adjmat/TopologyMatrix.cpp b/src/adjmat/TopologyMatrix.cpp index 0b4de263e..72767739d 100644 --- a/src/adjmat/TopologyMatrix.cpp +++ b/src/adjmat/TopologyMatrix.cpp @@ -85,11 +85,10 @@ void TopologyMatrix::registerKeywords( Keywords& keys ) { "other \\ref mcolv or \\ref multicolvarfunction commands is useful, however, as you can then exploit a much wider " "variety of functions of the contact matrix as described in \\ref contactmatrix"); keys.add("atoms","ATOMS",""); - keys.add("numbered","SWITCH","This keyword is used if you want to employ an alternative to the continuous swiching function defined above. " - "The following provides information on the \\ref switchingfunction that are available. " - "When this keyword is present you no longer need the NN, MM, D_0 and R_0 keywords."); + keys.add("numbered","SWITCH","This keyword is used if you want to employ an alternative to the continuous switching function defined above. " + "The following provides information on the \\ref switchingfunction that are available."); keys.add("numbered","RADIUS",""); - keys.add("numbered","CYLINDER_SWITCH","a switching function on ( r_ij . r_ik - 1 )/r_ij"); + keys.add("numbered","CYLINDER_SWITCH","a switching function on \\f$(r_{ij}\\cdot r_{ik}-1)/r_{ij}\\f$"); keys.add("numbered","BIN_SIZE","the size to use for the bins"); keys.add("compulsory","DENSITY_THRESHOLD",""); keys.add("compulsory","SIGMA","the width of the function to be used for kernel density estimation"); diff --git a/src/analysis/AnalysisBase.cpp b/src/analysis/AnalysisBase.cpp index 85836987e..dcacf2e1d 100644 --- a/src/analysis/AnalysisBase.cpp +++ b/src/analysis/AnalysisBase.cpp @@ -32,7 +32,7 @@ void AnalysisBase::registerKeywords( Keywords& keys ) { ActionWithValue::registerKeywords( keys ); ActionAtomistic::registerKeywords( keys ); ActionWithArguments::registerKeywords( keys ); keys.remove("NUMERICAL_DERIVATIVES"); ActionWithVessel::registerKeywords( keys ); keys.remove("TOL"); keys.reset_style("TIMINGS","hidden"); keys.isAnalysis(); - keys.add("atoms-2","USE_OUTPUT_DATA_FROM","use the ouput of the analysis performed by this object as input to your new analysis object"); + keys.add("atoms-2","USE_OUTPUT_DATA_FROM","use the output of the analysis performed by this object as input to your new analysis object"); } AnalysisBase::AnalysisBase(const ActionOptions&ao): diff --git a/src/analysis/Average.cpp b/src/analysis/Average.cpp index 085c5fb44..b3bbfb72f 100644 --- a/src/analysis/Average.cpp +++ b/src/analysis/Average.cpp @@ -34,7 +34,7 @@ The ensemble average for a non-periodic, collective variable, \f$s\f$ is given b \f] Here the sum runs over a the trajectory and \f$s(t')\f$ is used to denote the value of the collective variable -at time \f$t'\f$. The final quantity evalulated is a weighted +at time \f$t'\f$. The final quantity evaluated is a weighted average as the weights, \f$w(t')\f$, allow us to negate the effect any bias might have on the region of phase space sampled by the system. This is discussed in the section of the manual on \ref Analysis. @@ -60,7 +60,7 @@ PRINT ARG=d1a FILE=colvar STRIDE=100 The following example calculates the ensemble average for the torsional angle involving atoms 1, 2, 3 and 4. At variance with the previous example this quantity is periodic so the second formula in the above introduction is used to calculate the average. Furthermore, by using the CLEAR keyword we have specified that block averages -are to be calculated. Consequently, after 100 steps all the information aquired thus far in the simulation is +are to be calculated. Consequently, after 100 steps all the information acquired thus far in the simulation is forgotten and the process of averaging is begun again. The quantities output in the colvar file are thus the block averages taken over the first 100 frames of the trajectory, the block average over the second 100 frames of trajectory and so on. @@ -73,7 +73,7 @@ PRINT ARG=t1a FILE=colvar STRIDE=100 This third example incorporates a bias. Notice that the effect the bias has on the ensemble average is removed by taking advantage of the \ref REWEIGHT_BIAS method. The final ensemble averages output to the file are thus block ensemble averages for the -unbiased canononical ensemble at a temperature of 300 K. +unbiased canonical ensemble at a temperature of 300 K. \plumedfile t1: TORSION ATOMS=1,2,3,4 diff --git a/src/analysis/Committor.cpp b/src/analysis/Committor.cpp index 5dfeddb5e..13509a1db 100644 --- a/src/analysis/Committor.cpp +++ b/src/analysis/Committor.cpp @@ -35,7 +35,7 @@ Does a committor analysis. \par Examples The following input monitors two torsional angles during a simulation, -defines two basins (A and B) as a function of the two torsions and +defines two basins (A and B) as a function of the two torsion angles and stops the simulation when it falls in one of the two. In the log file will be shown the latest values for the CVs and the basin reached. \plumedfile @@ -84,7 +84,7 @@ void Committor::registerKeywords( Keywords& keys ) { keys.add("numbered", "BASIN_LL","List of lower limits for basin #"); keys.add("numbered", "BASIN_UL","List of upper limits for basin #"); keys.reset_style("BASIN_LL","compulsory"); keys.reset_style("BASIN_UL","compulsory"); - keys.add("compulsory","STRIDE","1","the frequency with which the CVs are analysed"); + keys.add("compulsory","STRIDE","1","the frequency with which the CVs are analyzed"); keys.add("optional","FILE","the name of the file on which to output the reached basin"); keys.add("optional","FMT","the format that should be used to output real numbers"); keys.addFlag("NOSTOP",false,"if true do not stop the simulation when reaching a basin but just keep track of it"); diff --git a/src/analysis/Histogram.cpp b/src/analysis/Histogram.cpp index 6631f35c9..c8baaa5d4 100644 --- a/src/analysis/Histogram.cpp +++ b/src/analysis/Histogram.cpp @@ -62,19 +62,19 @@ We calculate histograms within PLUMED using a method known as kernel density est https://en.wikipedia.org/wiki/Kernel_density_estimation In PLUMED the value of \f$\zeta\f$ at each discrete instant in time in the trajectory is accumulated. A kernel, \f$K(\zeta-\zeta(t'),\sigma)\f$, -centered at the current value, \f$\zeta(t)\f$, of this quantity is generated with a bandwith \f$\sigma\f$, which +centered at the current value, \f$\zeta(t)\f$, of this quantity is generated with a bandwidth \f$\sigma\f$, which is set by the user. These kernels are then used to accumulate the ensemble average for the probability density: \f[ \langle P(\zeta) \rangle = \frac{ \sum_{t'=0}^t w(t') K(\zeta-\zeta(t'),\sigma) }{ \sum_{t'=0}^t w(t') } \f] -Here the sums run over a portion of the trajectory specified by the user. The final quantity evalulated is a weighted +Here the sums run over a portion of the trajectory specified by the user. The final quantity evaluated is a weighted average as the weights, \f$w(t')\f$, allow us to negate the effect any bias might have on the region of phase space sampled by the system. This is discussed in the section of the manual on \ref Analysis. A discrete analogue of kernel density estimation can also be used. In this analogue the kernels in the above formula -are replaced by dirac delta functions. When this method is used the final function calculated is no longer a probability +are replaced by Dirac delta functions. When this method is used the final function calculated is no longer a probability density - it is instead a probability mass function as each element of the function tells you the value of an integral between two points on your grid rather than the value of a (continuous) function on a grid. @@ -101,17 +101,17 @@ weighted average. Furthermore, the weights should be taken into account when th where \f$W_i\f$ is the sum of all the weights for the \f$i\f$th block of data. -If we wish to caclulate a normalized histogram we must calculate ensemble averages from our biased simulation using: +If we wish to calculate a normalized histogram we must calculate ensemble averages from our biased simulation using: \f[ \langle H(x) \rangle = \frac{\sum_{t=1}^M w_t K( x - x_t,\sigma) }{\sum_{t=1}^M w_t} \f] -where the sums runs over the trajectory, \f$w_t\f$ is the weight of the \f$t\f$th trajectory frame, \f$x_t\f$ is the value of the cv for the \f$t\f$th +where the sums runs over the trajectory, \f$w_t\f$ is the weight of the \f$t\f$th trajectory frame, \f$x_t\f$ is the value of the CV for the \f$t\f$th trajectory frame and \f$K\f$ is a kernel function centered on \f$x_t\f$ with bandwidth \f$\sigma\f$. The quantity that is evaluated is the value of the normalized histogram at point \f$x\f$. The following ensemble average will be calculated if you use the NORMALIZATION=true option in HISTOGRAM. If the ensemble average is calculated in this way we must calculate the associated error bars from our block averages using the second of the expressions above. -A number of works have shown that when biased simulations are performed it is often better to calculate an unormalized estimate of the histogram using: +A number of works have shown that when biased simulations are performed it is often better to calculate an estimate of the histogram that is not normalized using: \f[ \langle H(x) \rangle = \frac{1}{M} \sum_{t=1}^M w_t K( x - x_t,\sigma) \f] @@ -122,7 +122,7 @@ block averages. \par Examples The following input monitors two torsional angles during a simulation -and outputs a continuos histogram as a function of them at the end of the simulation. +and outputs a continuous histogram as a function of them at the end of the simulation. \plumedfile TORSION ATOMS=1,2,3,4 LABEL=r1 TORSION ATOMS=2,3,4,5 LABEL=r2 @@ -226,7 +226,7 @@ void Histogram::registerKeywords( Keywords& keys ) { gridtools::ActionWithGrid::registerKeywords( keys ); keys.use("ARG"); keys.remove("NORMALIZATION"); keys.add("compulsory","NORMALIZATION","ndata","This controls how the data is normalized it can be set equal to true, false or ndata. See above for an explanation"); keys.add("optional","DATA","input data from action with vessel and compute histogram"); - keys.add("optional","VECTORS","input three dimsnional vectors for computing histogram"); + keys.add("optional","VECTORS","input three dimensional vectors for computing histogram"); keys.add("compulsory","GRID_MIN","the lower bounds for the grid"); keys.add("compulsory","GRID_MAX","the upper bounds for the grid"); keys.add("optional","GRID_BIN","the number of bins for the grid"); diff --git a/src/analysis/ReadAnalysisFrames.cpp b/src/analysis/ReadAnalysisFrames.cpp index 8a73b6bb9..7da01ff0c 100644 --- a/src/analysis/ReadAnalysisFrames.cpp +++ b/src/analysis/ReadAnalysisFrames.cpp @@ -41,7 +41,7 @@ PLUMED_REGISTER_ACTION(ReadAnalysisFrames,"COLLECT_FRAMES") void ReadAnalysisFrames::registerKeywords( Keywords& keys ) { AnalysisBase::registerKeywords( keys ); keys.remove("SERIAL"); keys.remove("USE_OUTPUT_DATA_FROM"); keys.use("ARG"); - keys.add("atoms-1","ATOMS","the atoms whose positions we are tracking for the purpose of analysing the data"); + keys.add("atoms-1","ATOMS","the atoms whose positions we are tracking for the purpose of analyzing the data"); keys.add("atoms-1","STRIDE","the frequency with which data should be stored for analysis. By default data is collected on every step"); keys.add("compulsory","CLEAR","0","the frequency with which data should all be deleted and restarted"); keys.add("optional","LOGWEIGHTS","list of actions that calculates log weights that should be used to weight configurations when calculating averages"); @@ -127,7 +127,7 @@ void ReadAnalysisFrames::calculateWeights() { void ReadAnalysisFrames::update() { if( getStep()==0 ) return; - // Delete everything we stored now that it has been analysed + // Delete everything we stored now that it has been analyzed if( clearonnextstep ) { my_data_stash.clear(); my_data_stash.resize(0); logweights.clear(); logweights.resize(0); diff --git a/src/analysis/WhamHistogram.cpp b/src/analysis/WhamHistogram.cpp index ab66cd5d1..41d535d12 100644 --- a/src/analysis/WhamHistogram.cpp +++ b/src/analysis/WhamHistogram.cpp @@ -27,7 +27,7 @@ namespace analysis { //+PLUMEDOC REWEIGHTING WHAM_HISTOGRAM /* -This can be used to output the a histogram using the weighted histogram techinque +This can be used to output the a histogram using the weighted histogram technique \par Examples diff --git a/src/bias/ABMD.cpp b/src/bias/ABMD.cpp index 5d0c40172..bd0e5039c 100644 --- a/src/bias/ABMD.cpp +++ b/src/bias/ABMD.cpp @@ -105,7 +105,7 @@ void ABMD::registerKeywords(Keywords& keys) { keys.add("optional","NOISE","Array of white noise intensities (add a temperature to the ABMD)"); keys.add("optional","SEED","Array of seeds for the white noise (add a temperature to the ABMD)"); keys.addOutputComponent("force2","default","the instantaneous value of the squared force due to this bias potential"); - keys.addOutputComponent("_min","default","one or multiple instances of this quantity will be refereceable elsewhere in the input file. " + keys.addOutputComponent("_min","default","one or multiple instances of this quantity can be referenced elsewhere in the input file. " " These quantities will be named with the arguments of the bias followed by " "the character string _min. These quantities tell the user the minimum value assumed by rho_m(t)."); } diff --git a/src/bias/BiasValue.cpp b/src/bias/BiasValue.cpp index 1a2b6a4ee..21158fc19 100644 --- a/src/bias/BiasValue.cpp +++ b/src/bias/BiasValue.cpp @@ -89,7 +89,7 @@ void BiasValue::registerKeywords(Keywords& keys) { Bias::registerKeywords(keys); keys.use("ARG"); // Should be _bias below - keys.addOutputComponent("_bias","default","one or multiple instances of this quantity will be refereceable elsewhere in the input file. " + keys.addOutputComponent("_bias","default","one or multiple instances of this quantity can be referenced elsewhere in the input file. " "these quantities will named with the arguments of the bias followed by " "the character string _bias. These quantities tell the user how much the bias is " "due to each of the colvars."); diff --git a/src/bias/ExtendedLagrangian.cpp b/src/bias/ExtendedLagrangian.cpp index a8e08bf78..01feb5ff0 100644 --- a/src/bias/ExtendedLagrangian.cpp +++ b/src/bias/ExtendedLagrangian.cpp @@ -39,7 +39,7 @@ namespace bias { Add extended Lagrangian. This action can be used to create fictitious collective variables coupled to the real ones. -Given \f$x_i\f$ the i-th argument of this bias potential, potential +Given \f$x_i\f$ the \f$i\f$th argument of this bias potential, potential and kinetic contributions are added to the energy of the system as \f[ V=\sum_i \frac{k_i}{2} (x_i-s_i)^2 + \sum_i \frac{\dot{s}_i^2}{2m_i} @@ -142,10 +142,10 @@ void ExtendedLagrangian::registerKeywords(Keywords& keys) { keys.add("compulsory","FRICTION","0.0","add a friction to the variable"); keys.add("optional","TEMP","the system temperature - needed when FRICTION is present. If not provided will be taken from MD code (if available)"); componentsAreNotOptional(keys); - keys.addOutputComponent("_fict","default","one or multiple instances of this quantity will be refereceable elsewhere in the input file. " + keys.addOutputComponent("_fict","default","one or multiple instances of this quantity can be referenced elsewhere in the input file. " "These quantities will named with the arguments of the bias followed by " "the character string _tilde. It is possible to add forces on these variable."); - keys.addOutputComponent("_vfict","default","one or multiple instances of this quantity will be refereceable elsewhere in the input file. " + keys.addOutputComponent("_vfict","default","one or multiple instances of this quantity can be referenced elsewhere in the input file. " "These quantities will named with the arguments of the bias followed by " "the character string _tilde. It is NOT possible to add forces on these variable."); } diff --git a/src/bias/External.cpp b/src/bias/External.cpp index c0da8689f..74e9b086a 100644 --- a/src/bias/External.cpp +++ b/src/bias/External.cpp @@ -85,8 +85,8 @@ value of t1 (the value in the first column) is kept fixed and the value of the function is given at 100 equally spaced values for t2 between \f$-pi\f$ and \f$+pi\f$. In the second block of data t1 is fixed at \f$-pi + \frac{2pi}{100}\f$ and the value of the function is given at 100 equally spaced values for t2 between \f$-pi\f$ and \f$+pi\f$. In the third block of -data the same is done but t1 is fixed at \f$-pi + \frac{4pi}{100}\f$ and so on untill you get to -the 100th block of data where t1 is fixed at \f$+pi\f$. +data the same is done but t1 is fixed at \f$-pi + \frac{4pi}{100}\f$ and so on until you get to +the one hundredth block of data where t1 is fixed at \f$+pi\f$. Please note the order that the order of arguments in the plumed.dat file must be the same as the order of arguments in the header of the grid file. @@ -113,7 +113,7 @@ void External::registerKeywords(Keywords& keys) { keys.add("compulsory","FILE","the name of the file containing the external potential."); keys.addFlag("NOSPLINE",false,"specifies that no spline interpolation is to be used when calculating the energy and forces due to the external potential"); keys.addFlag("SPARSE",false,"specifies that the external potential uses a sparse grid"); - keys.add("compulsory","SCALE","1.0","a factor that multiplies the external potential, usefull to invert free energies"); + keys.add("compulsory","SCALE","1.0","a factor that multiplies the external potential, useful to invert free energies"); } External::External(const ActionOptions& ao): diff --git a/src/bias/MaxEnt.cpp b/src/bias/MaxEnt.cpp index 21703c9c9..b8a56fd76 100644 --- a/src/bias/MaxEnt.cpp +++ b/src/bias/MaxEnt.cpp @@ -68,7 +68,7 @@ Lagrangian multipliers \f$ \lambda_{i}\f$ are updated, every PACE steps, accordi \f$k\f$ set the initial value of the learning rate and its units are \f$[observable]^{-2}ps^{-1}\f$. This can be set with the keyword KAPPA. The number of components for any KAPPA vector must be equal to the number of arguments of the action. -Variable \f$ \xi_{i}(\lambda) \f$ is related to the choosen prior to model experimental errors. If a GAUSSIAN prior is used then: +Variable \f$ \xi_{i}(\lambda) \f$ is related to the chosen prior to model experimental errors. If a GAUSSIAN prior is used then: \f[ \xi_{i}(\lambda)=-\lambda_{i}\sigma^{2} \f] @@ -90,7 +90,7 @@ The following input tells plumed to restrain the distance between atoms 7 and 15 and the distance between atoms 2 and 19, at different equilibrium values, and to print the energy of the restraint. Lagrangian multiplier will be printed on a file called restraint.LAGMULT with a stride set by the variable PACE to 200ps. -Moreover plumed will compute the average of each lagrangian multiplier in the window [TSTART,TEND] and use that to continue the simulations with fixed Lagrangian multipliers. +Moreover plumed will compute the average of each Lagrangian multiplier in the window [TSTART,TEND] and use that to continue the simulations with fixed Lagrangian multipliers. \plumedfile DISTANCE ATOMS=7,15 LABEL=d1 DISTANCE ATOMS=2,19 LABEL=d2 @@ -183,25 +183,25 @@ void MaxEnt::registerKeywords(Keywords& keys) { keys.add("compulsory","KAPPA","0.0","specifies the initial value for the learning rate"); keys.add("compulsory","TAU","Specify the dumping time for the learning rate."); keys.add("compulsory","TYPE","specify the restraint type. " - "EQAUL to restrain the variable at a given equilibrium value" - "INEQUAL< to restrain the variable to be smaller than a given value" + "EQUAL to restrain the variable at a given equilibrium value " + "INEQUAL< to restrain the variable to be smaller than a given value " "INEQUAL> to restrain the variable to be greater than a given value"); keys.add("optional","ERROR_TYPE","specify the prior on the error to use." - "GAUSSIAN: use a Gaussian prior" + "GAUSSIAN: use a Gaussian prior " "LAPLACE: use a Laplace prior"); - keys.add("optional","TSTART","time in ps from where to start averaging the Lagrangian multiplier. By default no average is computed, hence lambda is updated every PACE steps"); + keys.add("optional","TSTART","time from where to start averaging the Lagrangian multiplier. By default no average is computed, hence lambda is updated every PACE steps"); keys.add("optional","TEND","time in ps where to stop to compute the average of Lagrangian multiplier. From this time until the end of the simulation Lagrangian multipliers are kept fix to the average computed between TSTART and TEND;"); keys.add("optional","ALPHA","default=1.0; To be used with LAPLACE KEYWORD, allows to choose a prior function proportional to a Gaussian times an exponential function. ALPHA=1 correspond to the LAPLACE prior."); keys.add("compulsory","AT","the position of the restraint"); - keys.add("optional","SIGMA","The typical erros expected on observable"); + keys.add("optional","SIGMA","The typical errors expected on observable"); keys.add("optional","FILE","Lagrangian multipliers output file. The default name is: label name followed by the string .LAGMULT "); keys.add("optional","LEARN_REPLICA","In a multiple replica environment specify which is the reference replica. By default replica 0 will be used."); - keys.add("optional","APPLY_WEIGHTS","Vector of weights containing 1 in correspondece of each replica that will receive the lagrangian multiplier from the current one."); + keys.add("optional","APPLY_WEIGHTS","Vector of weights containing 1 in correspondence of each replica that will receive the Lagrangian multiplier from the current one."); keys.add("optional","PACE","the frequency for Lagrangian multipliers update"); keys.add("optional","PRINT_STRIDE","stride of Lagrangian multipliers output file. If no STRIDE is passed they are written every time they are updated (PACE)."); - keys.add("optional","FMT","specify format for Lagrangian multipliers files (usefulf to decrease the number of digits in regtests)"); + keys.add("optional","FMT","specify format for Lagrangian multipliers files (useful to decrease the number of digits in regtests)"); keys.addFlag("REWEIGHT",false,"to be used with plumed driver in order to reweight a trajectory a posteriori"); - keys.addFlag("NO_BROADCAST",false,"If active will avoid Lagrangian multipliers to be comunicated to other replicas."); + keys.addFlag("NO_BROADCAST",false,"If active will avoid Lagrangian multipliers to be communicated to other replicas."); keys.add("optional","TEMP","the system temperature. This is required if you are reweighting."); keys.addOutputComponent("force2","default","the instantaneous value of the squared force due to this bias potential"); keys.addOutputComponent("work","default","the instantaneous value of the work done by the biasing force"); @@ -270,7 +270,7 @@ MaxEnt::MaxEnt(const ActionOptions&ao): parse("FILE",lagmultfname); parse("FMT",fmt); parse("PACE",pace_); - if(pace_<=0 ) error("frequency for lagrangian multipliers update (PACE) is nonsensical"); + if(pace_<=0 ) error("frequency for Lagrangian multipliers update (PACE) is nonsensical"); stride_=pace_; //if no STRIDE is passed, then Lagrangian multipliers willbe printed at each update parse("PRINT_STRIDE",stride_); if(stride_<=0 ) error("frequency for Lagrangian multipliers printing (STRIDE) is nonsensical"); diff --git a/src/bias/MetaD.cpp b/src/bias/MetaD.cpp index b519489de..8fd6a61e3 100644 --- a/src/bias/MetaD.cpp +++ b/src/bias/MetaD.cpp @@ -47,7 +47,7 @@ namespace bias { //+PLUMEDOC BIAS METAD /* -Used to performed MetaDynamics on one or more collective variables. +Used to performed metadynamics on one or more collective variables. In a metadynamics simulations a history dependent bias composed of intermittently added Gaussian functions is added to the potential \cite metad. @@ -75,7 +75,7 @@ utility. In the simplest possible implementation of a metadynamics calculation the expense of a metadynamics calculation increases with the length of the simulation as one has to, at every step, evaluate -the values of a larger and larger number of Gaussians. To avoid this issue you can +the values of a larger and larger number of Gaussian kernels. To avoid this issue you can store the bias on a grid. This approach is similar to that proposed in \cite babi08jcp but has the advantage that the grid spacing is independent on the Gaussian width. Notice that you should @@ -91,7 +91,7 @@ case one can first save a GRID using GRID_WFILE (and GRID_WSTRIDE) and at a late it using GRID_RFILE. Another option that is available in plumed is well-tempered metadynamics \cite Barducci:2008. In this -varient of metadynamics the heights of the Gaussian hills are rescaled at each step so the bias is now +variant of metadynamics the heights of the Gaussian hills are scaled at each step so the bias is now given by: \f[ @@ -106,27 +106,27 @@ Also notice that with well-tempered metadynamics the HILLS file does not contain but the negative of the free-energy estimate. This choice has the advantage that one can restart a simulation using a different value for the \f$\Delta T\f$. The applied bias will be scaled accordingly. -Note that you can use here also the flexible gaussian approach \cite Branduardi:2012dl -in which you can adapt the gaussian to the extent of Cartesian space covered by a variable or +Note that you can use here also the flexible Gaussian approach \cite Branduardi:2012dl +in which you can adapt the Gaussian to the extent of Cartesian space covered by a variable or to the space in collective variable covered in a given time. In this case the width of the deposited -gaussian potential is denoted by one value only that is a Cartesian space (ADAPTIVE=GEOM) or a time -(ADAPTIVE=DIFF). Note that a specific integration technique for the deposited gaussians +Gaussian potential is denoted by one value only that is a Cartesian space (ADAPTIVE=GEOM) or a time +(ADAPTIVE=DIFF). Note that a specific integration technique for the deposited Gaussian kernels should be used in this case. Check the documentation for utility sum_hills. With the keyword INTERVAL one changes the metadynamics algorithm setting the bias force equal to zero outside boundary \cite baftizadeh2012protein. If, for example, metadynamics is performed on a CV s and one is interested only -to the free energy for s > sw, the history dependent potential is still updated according to the above -equations but the metadynamics force is set to zero for s < sw. Notice that Gaussians are added also -if s < sw, as the tails of these Gaussians influence VG in the relevant region s > sw. In this way, the -force on the system in the region s > sw comes from both metadynamics and the force field, in the region -s < sw only from the latter. This approach allows obtaining a history-dependent bias potential VG that +to the free energy for s > boundary, the history dependent potential is still updated according to the above +equations but the metadynamics force is set to zero for s < boundary. Notice that Gaussian kernels are added also +if s < boundary, as the tails of these Gaussian kernels influence VG in the relevant region s > boundary. In this way, the +force on the system in the region s > boundary comes from both metadynamics and the force field, in the region +s < boundary only from the latter. This approach allows obtaining a history-dependent bias potential VG that fluctuates around a stable estimator, equal to the negative of the free energy far enough from the boundaries. Note that: - It works only for one-dimensional biases; - It works both with and without GRID; -- The interval limit sw in a region where the free energy derivative is not large; -- If in the region outside the limit sw the system has a free energy minimum, the INTERVAL keyword should - be used together with a \ref UPPER_WALLS or \ref LOWER_WALLS at sw. +- The interval limit boundary in a region where the free energy derivative is not large; +- If in the region outside the limit boundary the system has a free energy minimum, the INTERVAL keyword should + be used together with a \ref UPPER_WALLS or \ref LOWER_WALLS at boundary. As a final note, since version 2.0.2 when the system is outside of the selected interval the force is set to zero and the bias value to the value at the corresponding boundary. This allows acceptances @@ -174,8 +174,8 @@ PRINT ARG=d1,d2,restraint.bias STRIDE=100 FILE=COLVAR (See also \ref DISTANCE \ref PRINT). \par -If you use adaptive Gaussians, with diffusion scheme where you use -a Gaussian that should cover the space of 20 timesteps in collective variables. +If you use adaptive Gaussian kernels, with diffusion scheme where you use +a Gaussian that should cover the space of 20 time steps in collective variables. Note that in this case the histogram correction is needed when summing up hills. \plumedfile DISTANCE ATOMS=3,5 LABEL=d1 @@ -185,7 +185,7 @@ PRINT ARG=d1,d2,restraint.bias STRIDE=100 FILE=COLVAR \endplumedfile \par -If you use adaptive Gaussians, with geometrical scheme where you use +If you use adaptive Gaussian kernels, with geometrical scheme where you use a Gaussian that should cover the space of 0.05 nm in Cartesian space. Note that in this case the histogram correction is needed when summing up hills. \plumedfile @@ -196,7 +196,7 @@ PRINT ARG=d1,d2,restraint.bias STRIDE=100 FILE=COLVAR \endplumedfile \par -When using adaptive Gaussians you might want to limit how the hills width can change. +When using adaptive Gaussian kernels you might want to limit how the hills width can change. You can use SIGMA_MIN and SIGMA_MAX keywords. The sigmas should specified in terms of CV so you should use the CV units. Note that if you use a negative number, this means that the limit is not set. @@ -257,7 +257,7 @@ normalized using the c(t) reweighting factor is given in the rbias component free energy surface using the \ref HISTOGRAM analysis. \par -The kinetics of the transitions between basins can also be analysed on the fly as +The kinetics of the transitions between basins can also be analyzed on the fly as in \cite PRL230602. The flag ACCELERATION turn on accumulation of the acceleration factor that can then be used to determine the rate. This method can be used together with \ref COMMITTOR analysis to stop the simulation when the system get to the target basin. @@ -300,14 +300,14 @@ You can also provide a target distribution using the keyword TARGET \cite white2015designing \cite marinelli2015ensemble \cite gil2016empirical -The TARGET should be a grid containing a free-energy (i.e. the -kbT*log of the desired target distribution). -Gaussians will then be scaled by a factor +The TARGET should be a grid containing a free-energy (i.e. the -\f$k_B\f$T*log of the desired target distribution). +Gaussian kernels will then be scaled by a factor \f[ e^{\beta(\tilde{F}(s)-\tilde{F}_{max})} \f] Here \f$\tilde{F}(s)\f$ is the free energy defined on the grid and \f$\tilde{F}_{max}\f$ its maximum value. Notice that we here used the maximum value as in ref \cite gil2016empirical -This choice allows to avoid exceedingly large Gaussians to be added. However, +This choice allows to avoid exceedingly large Gaussian kernels to be added. However, it could make the Gaussian too small. You should always choose carefully the HEIGHT parameter in this case. The grid file should be similar to other PLUMED grid files in that it should contain @@ -315,7 +315,7 @@ both the target free-energy and its derivatives. Notice that if you wish your simulation to converge to the target free energy you should use the DAMPFACTOR command to provide a global tempering \cite dama2014well -Alternatively, if you use a BIASFACTOR yout simulation will converge to a free +Alternatively, if you use a BIASFACTOR your simulation will converge to a free energy that is a linear combination of the target free energy and of the intrinsic free energy determined by the original force field. @@ -494,15 +494,15 @@ void MetaD::registerKeywords(Keywords& keys) { keys.add("compulsory","FILE","HILLS","a file in which the list of added hills is stored"); keys.add("optional","HEIGHT","the heights of the Gaussian hills. Compulsory unless TAU and either BIASFACTOR or DAMPFACTOR are given"); keys.add("optional","FMT","specify format for HILLS files (useful for decrease the number of digits in regtests)"); - keys.add("optional","BIASFACTOR","use well tempered metadynamics and use this biasfactor. Please note you must also specify temp"); + keys.add("optional","BIASFACTOR","use well tempered metadynamics and use this bias factor. Please note you must also specify temp"); keys.add("optional","RECT","list of bias factors for all the replicas"); - keys.add("optional","DAMPFACTOR","damp hills with exp(-max(V)/(kbT*DAMPFACTOR)"); + keys.add("optional","DAMPFACTOR","damp hills with exp(-max(V)/(\\f$k_B\\f$T*DAMPFACTOR)"); for (size_t i = 0; i < n_tempering_options_; i++) { registerTemperingKeywords(tempering_names_[i][0], tempering_names_[i][1], keys); } keys.add("optional","TARGET","target to a predefined distribution"); keys.add("optional","TEMP","the system temperature - this is only needed if you are doing well-tempered metadynamics"); - keys.add("optional","TAU","in well tempered metadynamics, sets height to (kb*DeltaT*pace*timestep)/tau"); + keys.add("optional","TAU","in well tempered metadynamics, sets height to (\\f$k_B \\Delta T\\f$*pace*timestep)/tau"); keys.add("optional","GRID_MIN","the lower bounds for the grid"); keys.add("optional","GRID_MAX","the upper bounds for the grid"); keys.add("optional","GRID_BIN","the number of bins for the grid"); @@ -519,12 +519,12 @@ void MetaD::registerKeywords(Keywords& keys) { keys.add("optional","GRID_WFILE","the file on which to write the grid"); keys.add("optional","GRID_RFILE","a grid file from which the bias should be read at the initial step of the simulation"); keys.addFlag("STORE_GRIDS",false,"store all the grid files the calculation generates. They will be deleted if this keyword is not present"); - keys.add("optional","ADAPTIVE","use a geometric (=GEOM) or diffusion (=DIFF) based hills width scheme. Sigma is one number that has distance units or timestep dimensions"); + keys.add("optional","ADAPTIVE","use a geometric (=GEOM) or diffusion (=DIFF) based hills width scheme. Sigma is one number that has distance units or time step dimensions"); keys.add("optional","WALKERS_ID", "walker id"); keys.add("optional","WALKERS_N", "number of walkers"); keys.add("optional","WALKERS_DIR", "shared directory with the hills files from all the walkers"); keys.add("optional","WALKERS_RSTRIDE","stride for reading hills files"); - keys.add("optional","INTERVAL","monodimensional lower and upper limits, outside the limits the system will not feel the biasing force."); + keys.add("optional","INTERVAL","one dimensional lower and upper limits, outside the limits the system will not feel the biasing force."); keys.add("optional","SIGMA_MAX","the upper bounds for the sigmas (in CV units) when using adaptive hills. Negative number means no bounds "); keys.add("optional","SIGMA_MIN","the lower bounds for the sigmas (in CV units) when using adaptive hills. Negative number means no bounds "); keys.addFlag("WALKERS_MPI",false,"Switch on MPI version of multiple walkers - not compatible with WALKERS_* options other than WALKERS_DIR"); @@ -533,7 +533,7 @@ void MetaD::registerKeywords(Keywords& keys) { keys.add("optional","ACCELERATION_RFILE","a data file from which the acceleration should be read at the initial step of the simulation"); keys.addFlag("CALC_MAX_BIAS", false, "Set to TRUE if you want to compute the maximum of the metadynamics V(s, t)"); keys.addFlag("CALC_TRANSITION_BIAS", false, "Set to TRUE if you want to compute a metadynamics transition bias V*(t)"); - keys.add("numbered", "TRANSITIONWELL", "This keyword appears multiple times as TRANSITIONWELLx with x=0,1,2,...,n. Each specifies the coordinates for one well as in transition-tempered metadynamics. At least one must be provided."); + keys.add("numbered", "TRANSITIONWELL", "This keyword appears multiple times as TRANSITIONWELL followed by an integer. Each specifies the coordinates for one well as in transition-tempered metadynamics. At least one must be provided."); keys.addFlag("FREQUENCY_ADAPTIVE",false,"Set to TRUE if you want to enable frequency adaptive metadynamics such that the frequency for hill addition to change dynamically based on the acceleration factor."); keys.add("optional","FA_UPDATE_FREQUENCY","the frequency for updating the hill addition pace in frequency adaptive metadynamics, by default this is equal to the value given in PACE"); keys.add("optional","FA_MAX_PACE","the maximum hill addition frequency allowed in frequency adaptive metadynamics. By default there is no maximum value."); @@ -546,7 +546,7 @@ void MetaD::registerKeywords(Keywords& keys) { const std::string MetaD::tempering_names_[1][2] = {{"TT", "transition tempered"}}; void MetaD::registerTemperingKeywords(const std::string &name_stem, const std::string &name, Keywords &keys) { - keys.add("optional", name_stem + "BIASFACTOR", "use " + name + " metadynamics with this biasfactor. Please note you must also specify temp"); + keys.add("optional", name_stem + "BIASFACTOR", "use " + name + " metadynamics with this bias factor. Please note you must also specify temp"); keys.add("optional", name_stem + "BIASTHRESHOLD", "use " + name + " metadynamics with this bias threshold. Please note you must also specify " + name_stem + "BIASFACTOR"); keys.add("optional", name_stem + "ALPHA", "use " + name + " metadynamics with this hill size decay exponent parameter. Please note you must also specify " + name_stem + "BIASFACTOR"); } @@ -592,7 +592,7 @@ MetaD::MetaD(const ActionOptions& ao): log.printf(" Uses Geometry-based hills width: sigma must be in distance units and only one sigma is needed\n"); adaptive_=FlexibleBin::geometry; } else if(adaptiveoption=="DIFF") { - log.printf(" Uses Diffusion-based hills width: sigma must be in timesteps and only one sigma is needed\n"); + log.printf(" Uses Diffusion-based hills width: sigma must be in time steps and only one sigma is needed\n"); adaptive_=FlexibleBin::diffusion; } else if(adaptiveoption=="NONE") { adaptive_=FlexibleBin::none; @@ -615,7 +615,7 @@ MetaD::MetaD(const ActionOptions& ao): // if adaptive then the number must be an integer if(adaptive_==FlexibleBin::diffusion) { if(int(sigma0_[0])-sigma0_[0]>1.e-9 || int(sigma0_[0])-sigma0_[0] <-1.e-9 || int(sigma0_[0])<1 ) { - error("In case of adaptive hills with diffusion, the sigma must be an integer which is the number of timesteps\n"); + error("In case of adaptive hills with diffusion, the sigma must be an integer which is the number of time steps\n"); } } // here evtl parse the sigma min and max values diff --git a/src/bias/MovingRestraint.cpp b/src/bias/MovingRestraint.cpp index b58ea675b..4665c2ed3 100644 --- a/src/bias/MovingRestraint.cpp +++ b/src/bias/MovingRestraint.cpp @@ -44,7 +44,7 @@ V(\vec{s},t) = \frac{1}{2} \kappa(t) ( \vec{s} - \vec{s}_0(t) )^2 The time dependence of \f$\kappa\f$ and \f$\vec{s}_0\f$ are specified by a list of STEP, KAPPA and AT keywords. These keywords tell plumed what values \f$\kappa\f$ and \f$\vec{s}_0\f$ -should have at the time specified by the corresponding STEP keyword. Inbetween these times +should have at the time specified by the corresponding STEP keyword. In between these times the values of \f$\kappa\f$ and \f$\vec{s}_0\f$ are linearly interpolated. Additional material and examples can be also found in the tutorial \ref belfast-5 @@ -125,28 +125,28 @@ void MovingRestraint::registerKeywords( Keywords& keys ) { keys.use("ARG"); keys.add("compulsory","VERSE","B","Tells plumed whether the restraint is only acting for CV larger (U) or smaller (L) than " "the restraint or whether it is acting on both sides (B)"); - keys.add("numbered","STEP","This keyword appears multiple times as STEPx with x=0,1,2,...,n. Each value given represents " - "the MD step at which the restraint parameters take the values KAPPAx and ATx."); + keys.add("numbered","STEP","This keyword appears multiple times as STEP\\f$x\\f$ with x=0,1,2,...,n. Each value given represents " + "the MD step at which the restraint parameters take the values KAPPA\\f$x\\f$ and AT\\f$x\\f$."); keys.reset_style("STEP","compulsory"); - keys.add("numbered","AT","ATx is equal to the position of the restraint at time STEPx. For intermediate times this parameter " - "is linearly interpolated. If no ATx is specified for STEPx then the values of AT are kept constant " - "during the interval of time between STEPx-1 and STEPx."); + keys.add("numbered","AT","AT\\f$x\\f$ is equal to the position of the restraint at time STEP\\f$x\\f$. For intermediate times this parameter " + "is linearly interpolated. If no AT\\f$x\\f$ is specified for STEP\\f$x\\f$ then the values of AT are kept constant " + "during the interval of time between STEP\\f$x-1\\f$ and STEP\\f$x\\f$."); keys.reset_style("AT","compulsory"); - keys.add("numbered","KAPPA","KAPPAx is equal to the value of the force constants at time STEPx. For intermediate times this " - "parameter is linearly interpolated. If no KAPPAx is specified for STEPx then the values of KAPPAx " - "are kept constant during the interval of time between STEPx-1 and STEPx."); + keys.add("numbered","KAPPA","KAPPA\\f$x\\f$ is equal to the value of the force constants at time STEP\\f$x\\f$. For intermediate times this " + "parameter is linearly interpolated. If no KAPPA\\f$x\\f$ is specified for STEP\\f$x\\f$ then the values of KAPPA\\f$x\\f$ " + "are kept constant during the interval of time between STEP\\f$x-1\\f$ and STEP\\f$x\\f$."); keys.reset_style("KAPPA","compulsory"); keys.addOutputComponent("work","default","the total work performed changing this restraint"); keys.addOutputComponent("force2","default","the instantaneous value of the squared force due to this bias potential"); - keys.addOutputComponent("_cntr","default","one or multiple instances of this quantity will be refereceable elsewhere in the input file. " + keys.addOutputComponent("_cntr","default","one or multiple instances of this quantity can be referenced elsewhere in the input file. " "these quantities will named with the arguments of the bias followed by " "the character string _cntr. These quantities give the instantaneous position " "of the center of the harmonic potential."); - keys.addOutputComponent("_work","default","one or multiple instances of this quantity will be refereceable elsewhere in the input file. " + keys.addOutputComponent("_work","default","one or multiple instances of this quantity can be referenced elsewhere in the input file. " "These quantities will named with the arguments of the bias followed by " "the character string _work. These quantities tell the user how much work has " "been done by the potential in dragging the system along the various colvar axis."); - keys.addOutputComponent("_kappa","default","one or multiple instances of this quantity will be refereceable elsewhere in the input file. " + keys.addOutputComponent("_kappa","default","one or multiple instances of this quantity can be referenced elsewhere in the input file. " "These quantities will named with the arguments of the bias followed by " "the character string _kappa. These quantities tell the user the time dependent value of kappa."); } diff --git a/src/bias/PBMetaD.cpp b/src/bias/PBMetaD.cpp index aef6f0904..35688dbec 100644 --- a/src/bias/PBMetaD.cpp +++ b/src/bias/PBMetaD.cpp @@ -47,11 +47,11 @@ namespace bias { //+PLUMEDOC BIAS PBMETAD /* -Used to performed Parallel Bias MetaDynamics. +Used to performed Parallel Bias metadynamics. -This action activate Parallel Bias MetaDynamics (PBMetaD) \cite pbmetad, a version of MetaDynamics \cite metad in which +This action activate Parallel Bias Metadynamics (PBMetaD) \cite pbmetad, a version of metadynamics \cite metad in which multiple low-dimensional bias potentials are applied in parallel. -In the current implementation, these have the form of mono-dimensional MetaDynamics bias +In the current implementation, these have the form of mono-dimensional metadynamics bias potentials: \f[ @@ -114,7 +114,7 @@ and if Gaussian width is fixed PLUMED will use 1/5 of the Gaussian width as grid This default choice should be reasonable for most applications. Another option that is available is well-tempered metadynamics \cite Barducci:2008. In this -variant of PBMetaD the heights of the Gaussian hills are rescaled at each step by the +variant of PBMetaD the heights of the Gaussian hills are scaled at each step by the additional well-tempered metadynamics term. This ensures that each bias converges more smoothly. It should be noted that, in the case of well-tempered metadynamics, in the output printed the Gaussian height is re-scaled using the bias factor. @@ -126,23 +126,23 @@ Note that you can use here also the flexible gaussian approach \cite Branduardi in which you can adapt the gaussian to the extent of Cartesian space covered by a variable or to the space in collective variable covered in a given time. In this case the width of the deposited gaussian potential is denoted by one value only that is a Cartesian space (ADAPTIVE=GEOM) or a time -(ADAPTIVE=DIFF). Note that a specific integration technique for the deposited gaussians +(ADAPTIVE=DIFF). Note that a specific integration technique for the deposited Gaussian kernels should be used in this case. Check the documentation for utility sum_hills. With the keyword INTERVAL one changes the metadynamics algorithm setting the bias force equal to zero outside boundary \cite baftizadeh2012protein. If, for example, metadynamics is performed on a CV s and one is interested only -to the free energy for s > sw, the history dependent potential is still updated according to the above -equations but the metadynamics force is set to zero for s < sw. Notice that Gaussians are added also -if s < sw, as the tails of these Gaussians influence VG in the relevant region s > sw. In this way, the -force on the system in the region s > sw comes from both metadynamics and the force field, in the region -s < sw only from the latter. This approach allows obtaining a history-dependent bias potential VG that +to the free energy for s > boundary, the history dependent potential is still updated according to the above +equations but the metadynamics force is set to zero for s < boundary. Notice that Gaussians are added also +if s < boundary, as the tails of these Gaussians influence VG in the relevant region s > boundary. In this way, the +force on the system in the region s > boundary comes from both metadynamics and the force field, in the region +s < boundary only from the latter. This approach allows obtaining a history-dependent bias potential VG that fluctuates around a stable estimator, equal to the negative of the free energy far enough from the boundaries. Note that: - It works only for one-dimensional biases; - It works both with and without GRID; -- The interval limit sw in a region where the free energy derivative is not large; -- If in the region outside the limit sw the system has a free energy minimum, the INTERVAL keyword should - be used together with a \ref UPPER_WALLS or \ref LOWER_WALLS at sw. +- The interval limit boundary in a region where the free energy derivative is not large; +- If in the region outside the limit boundary the system has a free energy minimum, the INTERVAL keyword should + be used together with a \ref UPPER_WALLS or \ref LOWER_WALLS at boundary. Multiple walkers \cite multiplewalkers can also be used. See below the examples. @@ -161,7 +161,7 @@ PRINT ARG=d1,d2,pb.bias STRIDE=100 FILE=COLVAR (See also \ref DISTANCE and \ref PRINT). \par -If you use well-tempered metadynamics, you should specify a single biasfactor and initial +If you use well-tempered metadynamics, you should specify a single bias factor and initial Gaussian height. \plumedfile DISTANCE ATOMS=3,5 LABEL=d1 @@ -294,9 +294,9 @@ void PBMetaD::registerKeywords(Keywords& keys) { keys.add("optional","FILE","files in which the lists of added hills are stored, default names are assigned using arguments if FILE is not found"); keys.add("optional","HEIGHT","the height of the Gaussian hills, one for all biases. Compulsory unless TAU, TEMP and BIASFACTOR are given"); keys.add("optional","FMT","specify format for HILLS files (useful for decrease the number of digits in regtests)"); - keys.add("optional","BIASFACTOR","use well tempered metadynamics with this biasfactor, one for all biases. Please note you must also specify temp"); + keys.add("optional","BIASFACTOR","use well tempered metadynamics with this bias factor, one for all biases. Please note you must also specify temp"); keys.add("optional","TEMP","the system temperature - this is only needed if you are doing well-tempered metadynamics"); - keys.add("optional","TAU","in well tempered metadynamics, sets height to (kb*DeltaT*pace*timestep)/tau"); + keys.add("optional","TAU","in well tempered metadynamics, sets height to (\\f$k_B \\Delta T\\f$*pace*timestep)/tau"); keys.add("optional","GRID_RFILES", "read grid for the bias"); keys.add("optional","GRID_WSTRIDE", "frequency for dumping the grid"); keys.add("optional","GRID_WFILES", "dump grid for the bias, default names are used if GRID_WSTRIDE is used without GRID_WFILES."); @@ -312,8 +312,8 @@ void PBMetaD::registerKeywords(Keywords& keys) { keys.add("optional","WALKERS_N", "number of walkers"); keys.add("optional","WALKERS_DIR", "shared directory with the hills files from all the walkers"); keys.add("optional","WALKERS_RSTRIDE","stride for reading hills files"); - keys.add("optional","INTERVAL_MIN","monodimensional lower limits, outside the limits the system will not feel the biasing force."); - keys.add("optional","INTERVAL_MAX","monodimensional upper limits, outside the limits the system will not feel the biasing force."); + keys.add("optional","INTERVAL_MIN","one dimensional lower limits, outside the limits the system will not feel the biasing force."); + keys.add("optional","INTERVAL_MAX","one dimensional upper limits, outside the limits the system will not feel the biasing force."); keys.add("optional","ADAPTIVE","use a geometric (=GEOM) or diffusion (=DIFF) based hills width scheme. Sigma is one number that has distance units or timestep dimensions"); keys.add("optional","SIGMA_MAX","the upper bounds for the sigmas (in CV units) when using adaptive hills. Negative number means no bounds "); keys.add("optional","SIGMA_MIN","the lower bounds for the sigmas (in CV units) when using adaptive hills. Negative number means no bounds "); diff --git a/src/bias/ReweightTemperature.cpp b/src/bias/ReweightTemperature.cpp index aa6b1280f..8a1b7a6e3 100644 --- a/src/bias/ReweightTemperature.cpp +++ b/src/bias/ReweightTemperature.cpp @@ -29,7 +29,7 @@ /* Calculate weights for ensemble averages allow for the computing of ensemble averages at temperatures lower/higher than that used in your original simulation. -We can use our knowledge of the Boltzmann distribution in the cannonical ensemble to reweight the data +We can use our knowledge of the Boltzmann distribution in the canonical ensemble to reweight the data contained in trajectories. Using this procedure we can take trajectory at temperature \f$T_1\f$ and use it to extract probabilities at a different temperature, \f$T_2\f$, using: @@ -43,7 +43,7 @@ that computes ensemble averages. For example this action can be used in tandem \par Examples -The following input can be used to postprocess a molecular dynamics trajectory calculated at a temperature of 500 K. +The following input can be used to post process a molecular dynamics trajectory calculated at a temperature of 500 K. The \ref HISTOGRAM as a function of the distance between atoms 1 and 2 that would have been obtained if the simulation had been run at the lower temperature of 300 K is estimated using the data from the higher temperature trajectory and output to a file. @@ -79,7 +79,7 @@ PLUMED_REGISTER_ACTION(ReweightTemperature,"REWEIGHT_TEMP") void ReweightTemperature::registerKeywords(Keywords& keys ) { ReweightBase::registerKeywords( keys ); keys.add("compulsory","REWEIGHT_TEMP","reweight data from a trajectory at one temperature and output the probability " - "distribution at a second temperature. This is not possible during postprocessing."); + "distribution at a second temperature. This is not possible during post processing."); } ReweightTemperature::ReweightTemperature(const ActionOptions&ao): diff --git a/src/cltools/Completion.cpp b/src/cltools/Completion.cpp index 54ca03284..be48a623c 100644 --- a/src/cltools/Completion.cpp +++ b/src/cltools/Completion.cpp @@ -36,7 +36,7 @@ namespace cltools { //+PLUMEDOC TOOLS completion /* -Dumps the body of a bash function to be used for autocompletion. +Dumps the body of a bash function to be used for auto completion. Users will typically not need this command. See more at \ref BashAutocompletion diff --git a/src/cltools/Driver.cpp b/src/cltools/Driver.cpp index c0eebe97d..9107261ec 100644 --- a/src/cltools/Driver.cpp +++ b/src/cltools/Driver.cpp @@ -102,7 +102,7 @@ will read a file produced by \ref DUMPMASSCHARGE . \par Examples -The following command tells plumed to postprocess the trajectory contained in `trajectory.xyz` +The following command tells plumed to post process the trajectory contained in `trajectory.xyz` by performing the actions described in the input file `plumed.dat`. If an action that takes the stride keyword is given a stride equal to \f$n\f$ then it will be performed only on every \f$n\f$th frames in the trajectory file. @@ -132,7 +132,7 @@ PRINT ARG=d FILE=colvar In this case, the driver reads the `xyz` file assuming it to contain coordinates in Angstrom units. However, the resulting `colvar` file contains a distance expressed in nm. -The following command tells plumed to postprocess the trajectory contained in trajectory.xyz. +The following command tells plumed to post process the trajectory contained in trajectory.xyz. by performing the actions described in the input file plumed.dat. \verbatim plumed driver --plumed plumed.dat --ixyz trajectory.xyz --trajectory-stride 100 --timestep 0.001 @@ -143,7 +143,7 @@ and the `--timestep` is equal to the simulation timestep. As such the `STRIDE` files are referred to the original timestep and any files output resemble those that would have been generated had we run the calculation we are running with driver when the MD simulation was running. -PLUMED can read natively xyz files (in PLUMED units) and gro files (in nm). In addition, +PLUMED can read xyz files (in PLUMED units) and gro files (in nm). In addition, PLUMED includes by default support for a subset of the trajectory file formats supported by VMD, e.g. xtc and dcd: @@ -219,7 +219,7 @@ void Driver<real>::registerKeywords( Keywords& keys ) { " currently working only for xtc/trr files read with --ixtc/--trr)" #endif ); - keys.add("compulsory","--multi","0","set number of replicas for multi environment (needs mpi)"); + keys.add("compulsory","--multi","0","set number of replicas for multi environment (needs MPI)"); keys.addFlag("--noatoms",false,"don't read in a trajectory. Just use colvar files as specified in plumed.dat"); keys.add("atoms","--ixyz","the trajectory in xyz format"); keys.add("atoms","--igro","the trajectory in gro format"); @@ -230,13 +230,13 @@ void Driver<real>::registerKeywords( Keywords& keys ) { keys.add("optional","--length-units","units for length, either as a string or a number"); keys.add("optional","--mass-units","units for mass in pdb and mc file, either as a string or a number"); keys.add("optional","--charge-units","units for charge in pdb and mc file, either as a string or a number"); - keys.add("optional","--kt","set kBT, it will not be necessary to specify temperature in input file"); + keys.add("optional","--kt","set \\f$k_B T\\f$, it will not be necessary to specify temperature in input file"); keys.add("optional","--dump-forces","dump the forces on a file"); keys.add("optional","--dump-forces-fmt","( default=%%f ) the format to use to dump the forces"); keys.addFlag("--dump-full-virial",false,"with --dump-forces, it dumps the 9 components of the virial"); keys.add("optional","--pdb","provides a pdb with masses and charges"); keys.add("optional","--mc","provides a file with masses and charges as produced with DUMPMASSCHARGE"); - keys.add("optional","--box","comma-separated box dimensions (3 for orthorombic, 9 for generic)"); + keys.add("optional","--box","comma-separated box dimensions (3 for orthorhombic, 9 for generic)"); keys.add("optional","--natoms","provides number of atoms - only used if file format does not contain number of atoms"); keys.add("optional","--initial-step","provides a number for the initial step, default is 0"); keys.add("optional","--debug-forces","output a file containing the forces due to the bias evaluated using numerical derivatives " diff --git a/src/cltools/GenTemplate.cpp b/src/cltools/GenTemplate.cpp index 28ed043b2..62671e173 100644 --- a/src/cltools/GenTemplate.cpp +++ b/src/cltools/GenTemplate.cpp @@ -36,10 +36,9 @@ namespace cltools { //+PLUMEDOC TOOLS gentemplate /* -gentemplate is a tool that you can use to construct template inputs for the various -actions +gentemplate is a tool that you can use to construct template inputs for the various actions -The templates generated by this tool are primarily for use with Toni Giorgino's vmd gui. It may be +The templates generated by this tool are primarily for use with Toni Giorgino's VMD GUI. It may be useful however to use this tool as a quick aid memoir. \par Examples diff --git a/src/cltools/Manual.cpp b/src/cltools/Manual.cpp index 9d4b509fe..5a231f970 100644 --- a/src/cltools/Manual.cpp +++ b/src/cltools/Manual.cpp @@ -40,7 +40,7 @@ manual is a tool that you can use to construct the manual page for a particular action The manual constructed by this action is in html. In all probability you will never need to use this -tool. However, it is used within the scripts that generate plumed's html manual. If you need to use this +tool. However, it is used within the scripts that generate the html manual for PLUMED. If you need to use this tool outside those scripts the input is specified using the following command line arguments. \par Examples @@ -72,6 +72,7 @@ void Manual::registerKeywords( Keywords& keys ) { CLTool::registerKeywords( keys ); keys.add("compulsory","--action","print the manual for this particular action"); keys.addFlag("--vim",false,"print the keywords in vim syntax"); + keys.addFlag("--spelling",false,"print a list of the keywords and component names for the spell checker"); } Manual::Manual(const CLToolOptions& co ): @@ -89,7 +90,9 @@ int Manual::main(FILE* in, FILE*out,Communicator& pc) { std::cerr<<"LIST OF DOCUMENTED COMMAND LINE TOOLS:\n"; std::cerr<<cltoolRegister()<<"\n\n"; bool vimout; parseFlag("--vim",vimout); - if( !actionRegister().printManual(action,vimout) && !cltoolRegister().printManual(action) ) { + bool spellout; parseFlag("--spelling",spellout); + if( vimout && spellout ) error("can only use one of --vim and --spelling at a time"); + if( !actionRegister().printManual(action,vimout,spellout) && !cltoolRegister().printManual(action,spellout) ) { fprintf(stderr,"specified action is not registered\n"); return 1; } diff --git a/src/cltools/PdbRenumber.cpp b/src/cltools/PdbRenumber.cpp index 21198122a..b6cfe72fb 100644 --- a/src/cltools/PdbRenumber.cpp +++ b/src/cltools/PdbRenumber.cpp @@ -65,7 +65,7 @@ For instance the following command: > plumed pdbrenumber --ipdb input.pdb --opdb output.pdb \endverbatim will copy file `input.pdb` to `output.pdb` replacing all the serial atoms with -increasing numbers starting from one. Atoms past the 99999th one will be written +increasing numbers starting from one. Atoms that have an index that is greater than 99999 will be written in the output PDB file in hybrid-36 code. It is possible to set a different serial number for the first atom, letting the diff --git a/src/cltools/SimpleMD.cpp b/src/cltools/SimpleMD.cpp index d8c591eba..b54199a10 100644 --- a/src/cltools/SimpleMD.cpp +++ b/src/cltools/SimpleMD.cpp @@ -39,7 +39,7 @@ namespace cltools { /* simplemd allows one to do molecular dynamics on systems of Lennard-Jones atoms. -The input to simplemd is spcified in an input file. Configurations are input and +The input to simplemd is specified in an input file. Configurations are input and output in xyz format. The input file should contain one directive per line. The directives available are as follows: @@ -91,7 +91,7 @@ public: static void registerKeywords( Keywords& keys ) { keys.add("compulsory","nstep","The number of steps of dynamics you want to run"); keys.add("compulsory","temperature","NVE","the temperature at which you wish to run the simulation in LJ units"); - keys.add("compulsory","friction","off","The friction (in LJ units) for the langevin thermostat that is used to keep the temperature constant"); + keys.add("compulsory","friction","off","The friction (in LJ units) for the Langevin thermostat that is used to keep the temperature constant"); keys.add("compulsory","tstep","0.005","the integration timestep in LJ units"); keys.add("compulsory","inputfile","An xyz file containing the initial configuration of the system"); keys.add("compulsory","forcecutoff","2.5",""); @@ -99,7 +99,7 @@ public: keys.add("compulsory","outputfile","An output xyz file containing the final configuration of the system"); keys.add("compulsory","nconfig","10","The frequency with which to write configurations to the trajectory file followed by the name of the trajectory file"); keys.add("compulsory","nstat","1","The frequency with which to write the statistics to the statistics file followed by the name of the statistics file"); - keys.add("compulsory","maxneighbours","10000","The maximum number of neighbours an atom can have"); + keys.add("compulsory","maxneighbours","10000","The maximum number of neighbors an atom can have"); keys.add("compulsory","idum","0","The random number seed"); keys.add("compulsory","ndim","3","The dimensionality of the system (some interesting LJ clusters are two dimensional)"); keys.add("compulsory","wrapatoms","false","If true, atomic coordinates are written wrapped in minimal cell"); diff --git a/src/cltools/SumHills.cpp b/src/cltools/SumHills.cpp index db011599c..e71a84f83 100644 --- a/src/cltools/SumHills.cpp +++ b/src/cltools/SumHills.cpp @@ -55,9 +55,9 @@ plumed sum_hills --hills PATHTOMYHILLSFILE The default name for the output file will be fes.dat Note that starting from this version plumed will automatically detect the number of the variables you have and their periodicity. -Additionally, if you use flexible hills (multivariate gaussians), plumed will understand it from the HILLS file. +Additionally, if you use flexible hills (multivariate Gaussian kernels), plumed will understand it from the HILLS file. -now sum_hills tool accepts als multiple files that will be integrated one after the other +The sum_hills tool will also accept multiple files that will be integrated one after the other \verbatim plumed sum_hills --hills PATHTOMYHILLSFILE1,PATHTOMYHILLSFILE2,PATHTOMYHILLSFILE3 @@ -98,7 +98,7 @@ You can use a --stride keyword to have a dump each bunch of hills you read plumed sum_hills --stride 300 --hills PATHTOMYHILLSFILE \endverbatim -You can also have, in case of welltempered metadynamics, only the negative +You can also have, in case of well tempered metadynamics, only the negative bias instead of the free energy through the keyword --negbias \verbatim @@ -118,7 +118,7 @@ plumed sum_hills --histo PATHTOMYCOLVARORHILLSFILE --sigma 0.2,0.2 --kt 0.6 in this case you need a --kt to do the reweighting and then you need also some width (with the --sigma keyword) for the histogram calculation (actually will be done with -gaussians, so it will be a continuous histogram) +Gaussian kernels, so it will be a continuous histogram) Here the default output will be histo.dat. Note that also here you can have multiple input files separated by a comma. @@ -208,13 +208,13 @@ void CLToolSumHills::registerKeywords( Keywords& keys ) { keys.add("optional","--bin","the number of bins for the grid"); keys.add("optional","--spacing","grid spacing, alternative to the number of bins"); keys.add("optional","--idw","specify the variables to be used for the free-energy/histogram (default is all). With --hills the other variables will be integrated out, with --histo the other variables won't be considered"); - keys.add("optional","--outfile","specify the outputfile for sumhills"); - keys.add("optional","--outhisto","specify the outputfile for the histogram"); + keys.add("optional","--outfile","specify the output file for sumhills"); + keys.add("optional","--outhisto","specify the output file for the histogram"); keys.add("optional","--kt","specify temperature in energy units for integrating out variables"); keys.add("optional","--sigma"," a vector that specify the sigma for binning (only needed when doing histogram "); - keys.addFlag("--negbias",false," print the negative bias instead of the free energy (only needed with welltempered runs and flexible hills) "); + keys.addFlag("--negbias",false," print the negative bias instead of the free energy (only needed with well tempered runs and flexible hills) "); keys.addFlag("--nohistory",false," to be used with --stride: it splits the bias/histogram in pieces without previous history "); - keys.addFlag("--mintozero",false," it translate all the minimum value in bias/histogram to zero (usefull to compare results) "); + keys.addFlag("--mintozero",false," it translate all the minimum value in bias/histogram to zero (useful to compare results) "); keys.add("optional","--fmt","specify the output format"); } diff --git a/src/cltools/pesmd.cpp b/src/cltools/pesmd.cpp index 2fae8545b..912d35532 100644 --- a/src/cltools/pesmd.cpp +++ b/src/cltools/pesmd.cpp @@ -100,13 +100,13 @@ namespace cltools { class PesMD : public PLMD::CLTool { string description() const { - return "langevin dynamics on PLUMED energy landscape"; + return "Langevin dynamics on PLUMED energy landscape"; } public: static void registerKeywords( Keywords& keys ) { keys.add("compulsory","nstep","The number of steps of dynamics you want to run"); keys.add("compulsory","temperature","NVE","the temperature at which you wish to run the simulation in LJ units"); - keys.add("compulsory","friction","off","The friction (in LJ units) for the langevin thermostat that is used to keep the temperature constant"); + keys.add("compulsory","friction","off","The friction (in LJ units) for the Langevin thermostat that is used to keep the temperature constant"); keys.add("compulsory","tstep","0.005","the integration timestep in LJ units"); keys.add("compulsory","dimension","the dimension of your energy landscape"); keys.add("compulsory","plumed","plumed.dat","the name of the plumed input file containing the potential"); diff --git a/src/colvar/Angle.cpp b/src/colvar/Angle.cpp index 9403d9c52..ad1a0cbd6 100644 --- a/src/colvar/Angle.cpp +++ b/src/colvar/Angle.cpp @@ -46,7 +46,7 @@ If _three_ atoms are given, the angle is defined as: |{\bf r}_{21}| |{\bf r}_{23}|}\right) \f] Here \f$ {\bf r}_{ij}\f$ is the distance vector among the -i-th and the j-th listed atom. +\f$i\f$th and the \f$j\f$th listed atom. If _four_ atoms are given, the angle is defined as: \f[ diff --git a/src/colvar/Constant.cpp b/src/colvar/Constant.cpp index 13164b9b9..ec83c324e 100644 --- a/src/colvar/Constant.cpp +++ b/src/colvar/Constant.cpp @@ -32,8 +32,7 @@ namespace colvar { //+PLUMEDOC COLVAR CONSTANT /* -Return one or more constant quantities -with or without derivatives. +Return one or more constant quantities with or without derivatives. Useful in combination with functions that takes in input constants or parameters. diff --git a/src/colvar/ContactMap.cpp b/src/colvar/ContactMap.cpp index a5545ad21..6f238e22d 100644 --- a/src/colvar/ContactMap.cpp +++ b/src/colvar/ContactMap.cpp @@ -57,7 +57,7 @@ PRINT ARG=f1.* FILE=colvar The following example calculates the difference of the current contact map with respect to a reference provided. In this case REFERENCE is the fraction of contact that is formed -(i.e. the distance between two atoms transformed with the SWITH), while R_0 is the contact +(i.e. the distance between two atoms transformed with the SWITCH), while R_0 is the contact distance. WEIGHT gives the relative weight of each contact to the final distance measure. \plumedfile @@ -142,7 +142,7 @@ void ContactMap::registerKeywords( Keywords& keys ) { "weight value for each contact."); keys.reset_style("SWITCH","compulsory"); keys.addFlag("SUM",false,"calculate the sum of all the contacts in the input"); - keys.addFlag("CMDIST",false,"calculate the distance with respect to the provided reference contant map"); + keys.addFlag("CMDIST",false,"calculate the distance with respect to the provided reference contact map"); keys.addFlag("SERIAL",false,"Perform the calculation in serial - for debug purpose"); keys.addOutputComponent("contact","default","By not using SUM or CMDIST each contact will be stored in a component"); } diff --git a/src/colvar/Coordination.cpp b/src/colvar/Coordination.cpp index 2247c828f..36a9cb13d 100644 --- a/src/colvar/Coordination.cpp +++ b/src/colvar/Coordination.cpp @@ -41,7 +41,7 @@ and is defined as \f] where \f$s_{ij}\f$ is 1 if the contact between atoms \f$i\f$ and \f$j\f$ is formed, zero otherwise. -In practise, \f$s_{ij}\f$ is replaced with a switching function to make it differentiable. +In actuality, \f$s_{ij}\f$ is replaced with a switching function to make it differentiable. The default switching function is: \f[ s_{ij} = \frac{ 1 - \left(\frac{{\bf r}_{ij}-d_0}{r_0}\right)^n } { 1 - \left(\frac{{\bf r}_{ij}-d_0}{r_0}\right)^m } @@ -61,7 +61,7 @@ so that they actually count as "zero". \par Examples -The following example instructs plumed to calculate the total coordination number of the atoms in group 1-10 with the atoms in group 20-100. For atoms 1-10 coordination numbers are calculated that count the number of atoms from the second group that are within 0.3 nm of the central atom. A neighbour list is used to make this calculation faster, this neighbour list is updated every 100 steps. +The following example instructs plumed to calculate the total coordination number of the atoms in group 1-10 with the atoms in group 20-100. For atoms 1-10 coordination numbers are calculated that count the number of atoms from the second group that are within 0.3 nm of the central atom. A neighbor list is used to make this calculation faster, this neighbor list is updated every 100 steps. \plumedfile COORDINATION GROUPA=1-10 GROUPB=20-100 R_0=0.3 NLIST NL_CUTOFF=0.5 NL_STRIDE=100 \endplumedfile @@ -114,7 +114,7 @@ void Coordination::registerKeywords( Keywords& keys ) { keys.add("compulsory","MM","0","The m parameter of the switching function; 0 implies 2*NN"); keys.add("compulsory","D_0","0.0","The d_0 parameter of the switching function"); keys.add("compulsory","R_0","The r_0 parameter of the switching function"); - keys.add("optional","SWITCH","This keyword is used if you want to employ an alternative to the continuous swiching function defined above. " + keys.add("optional","SWITCH","This keyword is used if you want to employ an alternative to the continuous switching function defined above. " "The following provides information on the \\ref switchingfunction that are available. " "When this keyword is present you no longer need the NN, MM, D_0 and R_0 keywords."); } diff --git a/src/colvar/CoordinationBase.cpp b/src/colvar/CoordinationBase.cpp index d6175db63..cade44e02 100644 --- a/src/colvar/CoordinationBase.cpp +++ b/src/colvar/CoordinationBase.cpp @@ -35,9 +35,9 @@ void CoordinationBase::registerKeywords( Keywords& keys ) { Colvar::registerKeywords(keys); keys.addFlag("SERIAL",false,"Perform the calculation in serial - for debug purpose"); keys.addFlag("PAIR",false,"Pair only 1st element of the 1st group with 1st element in the second, etc"); - keys.addFlag("NLIST",false,"Use a neighbour list to speed up the calculation"); - keys.add("optional","NL_CUTOFF","The cutoff for the neighbour list"); - keys.add("optional","NL_STRIDE","The frequency with which we are updating the atoms in the neighbour list"); + keys.addFlag("NLIST",false,"Use a neighbor list to speed up the calculation"); + keys.add("optional","NL_CUTOFF","The cutoff for the neighbor list"); + keys.add("optional","NL_STRIDE","The frequency with which we are updating the atoms in the neighbor list"); keys.add("atoms","GROUPA","First list of atoms"); keys.add("atoms","GROUPB","Second list of atoms (if empty, N*(N-1)/2 pairs in GROUPA are counted)"); } diff --git a/src/colvar/Dimer.cpp b/src/colvar/Dimer.cpp index 4cd5d1c07..0659e0746 100644 --- a/src/colvar/Dimer.cpp +++ b/src/colvar/Dimer.cpp @@ -38,10 +38,10 @@ namespace colvar { //+PLUMEDOC COLVAR DIMER /* -This CV computes the Dimer interaction energy for a collection of Dimers. +This CV computes the dimer interaction energy for a collection of dimers. -Each Dimer represents an atom, as described in the Dimer paper, -JCTC 13, 425 (2017). A system of N atoms is thus represented with N Dimers, each +Each dimer represents an atom, as described in the dimer paper \cite dimer-metad. +A system of N atoms is thus represented with N dimers, each Dimer being composed of two beads and eventually a virtual site representing its center of mass. A typical configuration for a dimerized system has the following ordering of atoms: @@ -97,7 +97,7 @@ dim: DIMER TEMP=300 Q=0.5 ALLATOMS DSIGMA=0.002 NOVSITES \endplumedfile The NOVSITES flag is not required if one provides the atom serials of each Dimer. These are -defined through two atomlists provided __instead__ of the ALLATOMS keyword. +defined through two lists of atoms provided __instead__ of the ALLATOMS keyword. For example, the Dimer interaction energy of dimers specified by beads (1;23),(5;27),(7;29) is: \plumedfile dim: DIMER TEMP=300 Q=0.5 ATOMS1=1,5,7 ATOMS2=23,27,29 DSIGMA=0.002 @@ -120,7 +120,7 @@ dim: DIMER TEMP=300 Q=0.5 ATOMS1=1,5,7 ATOMS2=23,27,29 DSIGMA=0.002,0.002,0.004, \par Usage of the CV The dimer interaction is not coded in the driver program and has to be inserted -in the hamiltonian of the system as a linear RESTRAINT (see \ref RESTRAINT): +in the Hamiltonian of the system as a linear RESTRAINT (see \ref RESTRAINT): \plumedfile dim: DIMER TEMP=300 Q=0.5 ALLATOMS DSIGMA=0.002 RESTRAINT ARG=dim AT=0 KAPPA=0 SLOPE=1 LABEL=dimforces @@ -162,7 +162,7 @@ void Dimer::registerKeywords( Keywords& keys) { keys.add("atoms", "ATOMS1", "The list of atoms representing the first bead of each Dimer being considered by this CV. Used if ALLATOMS flag is missing"); keys.add("atoms", "ATOMS2", "The list of atoms representing the second bead of each Dimer being considered by this CV. Used if ALLATOMS flag is missing"); keys.addFlag("ALLATOMS", false, "Use EVERY atom of the system. Overrides ATOMS keyword."); - keys.addFlag("NOVSITES", false, "If present the configuration is without virtual sites at the centroids."); + keys.addFlag("NOVSITES", false, "If present the configuration is without virtual sites at the centroid positions."); } diff --git a/src/colvar/Dipole.cpp b/src/colvar/Dipole.cpp index 4ffb359fd..777046e7c 100644 --- a/src/colvar/Dipole.cpp +++ b/src/colvar/Dipole.cpp @@ -36,7 +36,7 @@ Calculate the dipole moment for a group of atoms. When running with periodic boundary conditions, the atoms should be in the proper periodic image. This is done automatically since PLUMED 2.5, -by considering the ordered list of atoms and rebuilding PBCs with a procedure +by considering the ordered list of atoms and rebuilding the molecule with a procedure that is equivalent to that done in \ref WHOLEMOLECULES . Notice that rebuilding is local to this action. This is different from \ref WHOLEMOLECULES which actually modifies the coordinates stored in PLUMED. diff --git a/src/colvar/Distance.cpp b/src/colvar/Distance.cpp index ec6a81c30..d18494bd7 100644 --- a/src/colvar/Distance.cpp +++ b/src/colvar/Distance.cpp @@ -37,7 +37,7 @@ Calculate the distance between a pair of atoms. By default the distance is computed taking into account periodic boundary conditions. This behavior can be changed with the NOPBC flag. -Moreover, single components in cartesian space (x,y, and z, with COMPONENTS) +Moreover, single components in Cartesian space (x,y, and z, with COMPONENTS) or single components projected to the three lattice vectors (a,b, and c, with SCALED_COMPONENTS) can be also computed. diff --git a/src/colvar/EEFSolv.cpp b/src/colvar/EEFSolv.cpp index 2a6ec6176..6aacea6b2 100644 --- a/src/colvar/EEFSolv.cpp +++ b/src/colvar/EEFSolv.cpp @@ -54,7 +54,7 @@ where \f$\Delta G^\mathrm{solv}_i\f$ is the free energy of solvation, \f$\Delta \f] where \f$\Delta G^\mathrm{free}_i\f$ is the solvation free energy of the isolated group, \f$\lambda_i\f$ is the correlation length equal to the width of the first solvation shell and \f$R_i\f$ is the van der Waals radius of atom \f$i\f$. -The output from this collective variable, the free energy of solvation, can be used with the \ref BIASVALUE keyword to provide implicit solvation to a system. All parameters are designed to be used with a modified CHARMM36 force field. It takes only non-hydrogen atoms as input, these can be conveniently specified using the \ref GROUP action with the NDX_GROUP parameter. To speed up the calculation, EEFSOLV internally uses a neighbourlist with a cutoff dependent on the type of atom (maximum of 1.95 nm). This cutoff can be extended further by using the NL_BUFFER keyword. +The output from this collective variable, the free energy of solvation, can be used with the \ref BIASVALUE keyword to provide implicit solvation to a system. All parameters are designed to be used with a modified CHARMM36 force field. It takes only non-hydrogen atoms as input, these can be conveniently specified using the \ref GROUP action with the NDX_GROUP parameter. To speed up the calculation, EEFSOLV internally uses a neighbor list with a cutoff dependent on the type of atom (maximum of 1.95 nm). This cutoff can be extended further by using the NL_BUFFER keyword. \par Examples @@ -65,7 +65,7 @@ WHOLEMOLECULES ENTITY0=1-111 # This allows us to select only non-hydrogen atoms protein-h: GROUP NDX_FILE=index.ndx NDX_GROUP=Protein-H -# We extend the cutoff by 0.2 nm and update the neighbourlist every 10 steps +# We extend the cutoff by 0.2 nm and update the neighbor list every 10 steps solv: EEFSOLV ATOMS=protein-h NL_STRIDE=10 NL_BUFFER=0.2 # Here we actually add our calculated energy back to the potential @@ -106,7 +106,7 @@ void EEFSolv::registerKeywords(Keywords& keys) { useCustomisableComponents(keys); keys.add("atoms", "ATOMS", "The atoms to be included in the calculation, e.g. the whole protein."); keys.add("compulsory", "NL_BUFFER", "The buffer to the intrinsic cutoff used when calculating pairwise interactions."); - keys.add("compulsory", "NL_STRIDE", "The frequency with which the neighbourlist is updated."); + keys.add("compulsory", "NL_STRIDE", "The frequency with which the neighbor list is updated."); keys.addFlag("TEMP_CORRECTION", false, "Correct free energy of solvation constants for temperatures different from 298.15 K"); } diff --git a/src/colvar/ERMSD.cpp b/src/colvar/ERMSD.cpp index 8264743fa..1cc29b65f 100644 --- a/src/colvar/ERMSD.cpp +++ b/src/colvar/ERMSD.cpp @@ -51,11 +51,11 @@ It is not unusual, for example, that two RNA structures with low RMSD (i.e. less eRMSD measures the distance between structures by considering only the relative positions and orientations of nucleobases. The eRMSD can be considered as a vectorial version of contact maps and it is calculated as follows: 1. Set up a local reference system in the center of the six-membered ring of each nucleobase in a molecule. - The xy plane lies on the plane of the nucleobase, and it is oriented such that the Watson-Crick interaction is always at \f$ \theta \approx 60^{\circ} \f$. + The xy plane lies on the plane of the nucleobase, and it is oriented such that the Watson-Crick interaction is always at \f$\theta\approx 60^{\circ}\f$. -2. Calculate all pairwise distance vectors \f$ \vec{r}_{i,j} \f$ among base centers. +2. Calculate all pairwise distance vectors \f$\vec{r}_{i,j}\f$ among base centers. -3. Rescale distance vectors as \f$ \tilde{\vec{r}}_{i,j} = (r_x/a,r_y/a,r_z/b) \f$, where a=b=5 \f$\r{A}\f$, c=3 \f$\r{A}\f$. This rescaling has the effect of weghting more deviations on the z-axis with respect to the x/y directions. +3. Rescale distance vectors as \f$\tilde{\vec{r}}_{i,j}=(r_x/a,r_y/a,r_z/b)\f$, where a=b=5 \f$\r{A}\f$, c=3 \f$\r{A}\f$. This rescaling has the effect of weighting more deviations on the z-axis with respect to the x/y directions. 4. Calculate the G vectors diff --git a/src/colvar/Fake.cpp b/src/colvar/Fake.cpp index cbd038427..3a0973c4f 100644 --- a/src/colvar/Fake.cpp +++ b/src/colvar/Fake.cpp @@ -34,8 +34,7 @@ namespace colvar { //+PLUMEDOC COLVAR FAKE /* -This is a fake colvar container used by cltools or various other actions -and just support input and period definition +This is a fake colvar container used by cltools or various other actions that supports input and period definitions \par Examples diff --git a/src/colvar/Gyration.cpp b/src/colvar/Gyration.cpp index 3dd09d063..282fe2f9d 100644 --- a/src/colvar/Gyration.cpp +++ b/src/colvar/Gyration.cpp @@ -60,7 +60,7 @@ The radius of gyration usually makes sense when atoms used for the calculation are all part of the same molecule. When running with periodic boundary conditions, the atoms should be in the proper periodic image. This is done automatically since PLUMED 2.2, -by considering the ordered list of atoms and rebuilding PBCs with a procedure +by considering the ordered list of atoms and rebuilding the broken entities using a procedure that is equivalent to that done in \ref WHOLEMOLECULES . Notice that rebuilding is local to this action. This is different from \ref WHOLEMOLECULES which actually modifies the coordinates stored in PLUMED. diff --git a/src/colvar/MultiRMSD.cpp b/src/colvar/MultiRMSD.cpp index a626dc34e..fc2397db2 100644 --- a/src/colvar/MultiRMSD.cpp +++ b/src/colvar/MultiRMSD.cpp @@ -98,13 +98,13 @@ with the TER keyword being used to separate the various domains in you protein. The following tells plumed to calculate the RMSD distance between the positions of the atoms in the reference file and their instantaneous -position. The Kearseley algorithm for each of the domains. +position. The Kearsley algorithm for each of the domains. \plumedfile MULTI-RMSD REFERENCE=file.pdb TYPE=MULTI-OPTIMAL \endplumedfile -The following tells plumed to calculate the RMSD distance btween the positions of +The following tells plumed to calculate the RMSD distance between the positions of the atoms in the domains of reference the reference structure and their instantaneous positions. Here distances are calculated using the \ref DRMSD measure. @@ -153,7 +153,7 @@ void MultiRMSD::registerKeywords(Keywords& keys) { Colvar::registerKeywords(keys); keys.add("compulsory","REFERENCE","a file in pdb format containing the reference structure and the atoms involved in the CV."); keys.add("compulsory","TYPE","MULTI-SIMPLE","the manner in which RMSD alignment is performed. Should be MULTI-OPTIMAL, MULTI-OPTIMAL-FAST, MULTI-SIMPLE or MULTI-DRMSD."); - keys.addFlag("SQUARED",false," This should be setted if you want MSD instead of RMSD "); + keys.addFlag("SQUARED",false," This should be set if you want the mean squared displacement instead of the root mean squared displacement"); } MultiRMSD::MultiRMSD(const ActionOptions&ao): diff --git a/src/colvar/PCARMSD.cpp b/src/colvar/PCARMSD.cpp index 6b7f220ca..bf682f312 100644 --- a/src/colvar/PCARMSD.cpp +++ b/src/colvar/PCARMSD.cpp @@ -75,8 +75,8 @@ void PCARMSD::registerKeywords(Keywords& keys) { keys.add("compulsory","EIGENVECTORS","a file in pdb format containing the reference structure and the atoms involved in the CV."); //useCustomisableComponents(keys); keys.addOutputComponent("eig","default","the projections on each eigenvalue are stored on values labeled eig-1, eig-2, ..."); - keys.addOutputComponent("residual","default","the distance of the present configuration from the configuration supplied as AVERAGE in terms of MSD after optimal alignment "); - keys.addFlag("SQUARED-ROOT",false," This should be setted if you want RMSD instead of MSD "); + keys.addOutputComponent("residual","default","the distance of the present configuration from the configuration supplied as AVERAGE in terms of mean squared displacement after optimal alignment "); + keys.addFlag("SQUARED-ROOT",false," This should be set if you want RMSD instead of mean squared displacement "); } PCARMSD::PCARMSD(const ActionOptions&ao): diff --git a/src/colvar/PathMSD.cpp b/src/colvar/PathMSD.cpp index 52781c338..6e411ef6b 100644 --- a/src/colvar/PathMSD.cpp +++ b/src/colvar/PathMSD.cpp @@ -39,7 +39,7 @@ in input ("sss" component) and the distance from them ("zzz" component). When running with periodic boundary conditions, the atoms should be in the proper periodic image. This is done automatically since PLUMED 2.5, -by considering the ordered list of atoms and rebuilding PBCs with a procedure +by considering the ordered list of atoms and rebuilding molecules with a procedure that is equivalent to that done in \ref WHOLEMOLECULES . Notice that rebuilding is local to this action. This is different from \ref WHOLEMOLECULES which actually modifies the coordinates stored in PLUMED. @@ -58,9 +58,9 @@ p1: PATHMSD REFERENCE=file.pdb LAMBDA=500.0 NEIGH_STRIDE=4 NEIGH_SIZE=8 PRINT ARG=p1.sss,p1.zzz STRIDE=1 FILE=colvar FMT=%8.4f \endplumedfile -note that NEIGH_STRIDE=4 NEIGH_SIZE=8 control the neighborlist parameter (optional but +note that NEIGH_STRIDE=4 NEIGH_SIZE=8 control the neighbor list parameter (optional but recommended for performance) and states that the neighbor list will be calculated every 4 -timesteps and consider only the closest 8 member to the actual md snapshots. +steps and consider only the closest 8 member to the actual md snapshots. In the REFERENCE PDB file the frames must be separated either using END or ENDMDL. diff --git a/src/colvar/PropertyMap.cpp b/src/colvar/PropertyMap.cpp index 0e5f640b4..2e258f09a 100644 --- a/src/colvar/PropertyMap.cpp +++ b/src/colvar/PropertyMap.cpp @@ -35,7 +35,7 @@ This Colvar calculates the property maps according to the work of Spiwok \cite S Basically it calculates -\f{eqnarray} +\f{eqnarray*}{ X=\frac{\sum_i X_i*\exp(-\lambda D_i(x))}{\sum_i \exp(-\lambda D_i(x))} \\ Y=\frac{\sum_i Y_i*\exp(-\lambda D_i(x))}{\sum_i \exp(-\lambda D_i(x))} \\ \cdots\\ @@ -43,12 +43,12 @@ zzz=-\frac{1}{\lambda}\log(\sum_i \exp(-\lambda D_i(x))) \f} where the parameters \f$X_i\f$ and \f$Y_i\f$ are provided in the input pdb (allv.pdb in this case) and - \f$D_i(x)\f$ is the MSD after optimal alignment calculated on the pdb frames you input (see Kearsley). + \f$D_i(x)\f$ is the mean squared displacement after optimal alignment calculated on the pdb frames you input (see Kearsley). When running with periodic boundary conditions, the atoms should be in the proper periodic image. This is done automatically since PLUMED 2.5, -by considering the ordered list of atoms and rebuilding PBCs with a procedure +by considering the ordered list of atoms and rebuilding molecules using a procedure that is equivalent to that done in \ref WHOLEMOLECULES . Notice that rebuilding is local to this action. This is different from \ref WHOLEMOLECULES which actually modifies the coordinates stored in PLUMED. @@ -64,9 +64,9 @@ p3: PROPERTYMAP REFERENCE=../../trajectories/path_msd/allv.pdb PROPERTY=X,Y LAMB PRINT ARG=p3.X,p3.Y,p3.zzz STRIDE=1 FILE=colvar FMT=%8.4f \endplumedfile -note that NEIGH_STRIDE=4 NEIGH_SIZE=8 control the neighborlist parameter (optional but +note that NEIGH_STRIDE=4 NEIGH_SIZE=8 control the neighbor list parameter (optional but recommended for performance) and states that the neighbor list will be calculated every 4 -timesteps and consider only the closest 8 member to the actual md snapshots. +steps and consider only the closest 8 member to the actual md snapshots. In this case the input line instructs plumed to look for two properties X and Y with attached values in the REMARK line of the reference pdb (Note: No spaces from X and = and 1 !!!!). diff --git a/src/colvar/Puckering.cpp b/src/colvar/Puckering.cpp index a91ee42fc..5daf96931 100644 --- a/src/colvar/Puckering.cpp +++ b/src/colvar/Puckering.cpp @@ -43,7 +43,7 @@ namespace colvar { For 5-membered rings the implementation is the one discussed in \cite huang2014improvement . This implementation is simple and can be used in RNA to distinguish C2'-endo and C3'-endo conformations. - Both the polar coordinates (phs and amp) and the cartesian coordinates (Zx and Zy) are provided. + Both the polar coordinates (phs and amp) and the Cartesian coordinates (Zx and Zy) are provided. C2'-endo conformations have negative Zx, whereas C3'-endo conformations have positive Zy. Notation is consistent with \cite huang2014improvement . The five atoms should be provided as C4',O4',C1',C2',C3'. @@ -91,8 +91,8 @@ void Puckering::registerKeywords(Keywords& keys) { keys.add("atoms","ATOMS","the five or six atoms of the sugar ring in the proper order"); keys.addOutputComponent("phs","default","Pseudorotation phase (5 membered rings)"); keys.addOutputComponent("amp","default","Pseudorotation amplitude (5 membered rings)"); - keys.addOutputComponent("Zx","default","Pseudorotation x cartesian component (5 membered rings)"); - keys.addOutputComponent("Zy","default","Pseudorotation y cartesian component (5 membered rings)"); + keys.addOutputComponent("Zx","default","Pseudorotation x Cartesian component (5 membered rings)"); + keys.addOutputComponent("Zy","default","Pseudorotation y Cartesian component (5 membered rings)"); keys.addOutputComponent("phi","default","Pseudorotation phase (6 membered rings)"); keys.addOutputComponent("theta","default","Theta angle (6 membered rings)"); keys.addOutputComponent("amplitude","default","Pseudorotation amplitude (6 membered rings)"); diff --git a/src/colvar/RMSD.cpp b/src/colvar/RMSD.cpp index 3d36b37f6..164dca04f 100644 --- a/src/colvar/RMSD.cpp +++ b/src/colvar/RMSD.cpp @@ -93,11 +93,11 @@ to this action that you set using REFERENCE=whatever.pdb). This input reference containing the set of atoms for which you want to calculate the RMSD displacement and their positions in the reference configuration. It is important to note that the indices in this pdb need to be set correctly. The indices in this file determine the indices of the instantaneous atomic positions that are used by PLUMED when calculating this colvar. As such if you want to calculate the RMSD distance -moved by the 1st, 4th, 6th and 28th atoms in the MD codes input file then the indices of the corresponding refernece positions in this pdb +moved by the first, fourth, sixth and twenty eighth atoms in the MD codes input file then the indices of the corresponding reference positions in this pdb file should be set equal to 1, 4, 6 and 28. The pdb input file should also contain the values of \f$w\f$ and \f$w'\f$. In particular, the OCCUPANCY column (the first column after the coordinates) -is used provides the values of \f$ w'\f$ that are used to calculate the position of the centre of mass. The BETA column (the second column +is used provides the values of \f$ w'\f$ that are used to calculate the position of the center of mass. The BETA column (the second column after the Cartesian coordinates) is used to provide the \f$ w \f$ values which are used in the the calculation of the displacement. Please note that it is possible to use fractional values for beta and for the occupancy. However, we recommend you only do this when you really know what you are doing however as the results can be rather strange. @@ -105,7 +105,7 @@ you really know what you are doing however as the results can be rather strange. In PDB files the atomic coordinates and box lengths should be in Angstroms unless you are working with natural units. If you are working with natural units then the coordinates should be in your natural length unit. For more details on the PDB file format visit http://www.wwpdb.org/docs.html. -Make sure your PDB file is correclty formatted as explained \ref pdbreader "in this page". +Make sure your PDB file is correctly formatted as explained \ref pdbreader "in this page". A different method is used to calculate the RMSD distance when you use TYPE=OPTIMAL on the input line. In this case the root mean square deviation is calculated after the positions of geometric centers in the reference and instantaneous configurations are aligned AND after @@ -117,7 +117,7 @@ d(X,X') = \sqrt{ \sum_i \sum_\alpha^{x,y,z} \frac{w_i}{\sum_j w_j}[ X_{i,\alpha \f] where \f$ M(X,X',w') \f$ is the optimal alignment matrix which is calculated using the Kearsley \cite kearsley algorithm. Again different sets of -weights are used for the alignment (\f$w'\f$) and for the displacement calcuations (\f$w\f$). +weights are used for the alignment (\f$w'\f$) and for the displacement calculations (\f$w\f$). This gives a great deal of flexibility as it allows you to use a different sets of atoms (which may or may not overlap) for the alignment and displacement parts of the calculation. This may be very useful when you want to calculate how a ligand moves about in a protein cavity as you can use the protein as a reference system and do no alignment of the ligand. @@ -130,7 +130,7 @@ that are available in plumed. More information on these various methods can be When running with periodic boundary conditions, the atoms should be in the proper periodic image. This is done automatically since PLUMED 2.5, -by considering the ordered list of atoms and rebuilding PBCs with a procedure +by considering the ordered list of atoms and rebuilding molecules using a procedure that is equivalent to that done in \ref WHOLEMOLECULES . Notice that rebuilding is local to this action. This is different from \ref WHOLEMOLECULES which actually modifies the coordinates stored in PLUMED. @@ -143,7 +143,7 @@ periodic image. The following tells plumed to calculate the RMSD distance between the positions of the atoms in the reference file and their instantaneous -position. The Kearseley algorithm is used so this is done optimally. +position. The Kearsley algorithm is used so this is done optimally. \plumedfile RMSD REFERENCE=file.pdb TYPE=OPTIMAL @@ -160,7 +160,7 @@ void RMSD::registerKeywords(Keywords& keys) { Colvar::registerKeywords(keys); keys.add("compulsory","REFERENCE","a file in pdb format containing the reference structure and the atoms involved in the CV."); keys.add("compulsory","TYPE","SIMPLE","the manner in which RMSD alignment is performed. Should be OPTIMAL or SIMPLE."); - keys.addFlag("SQUARED",false," This should be setted if you want MSD instead of RMSD "); + keys.addFlag("SQUARED",false," This should be set if you want mean squared displacement instead of RMSD "); } RMSD::RMSD(const ActionOptions&ao): diff --git a/src/colvar/Torsion.cpp b/src/colvar/Torsion.cpp index 0d1fed867..edc620765 100644 --- a/src/colvar/Torsion.cpp +++ b/src/colvar/Torsion.cpp @@ -62,7 +62,7 @@ PRINT ARG=t1,t2 FILE=colvar STRIDE=10 \endplumedfile Here, \@phi-3 tells plumed that you would like to calculate the \f$\phi\f$ angle in the third residue of the protein. -Similarly \@psi-4 tells plumed that you want to calculate the \f$\psi\f$ angle of the 4th residue of the protein. +Similarly \@psi-4 tells plumed that you want to calculate the \f$\psi\f$ angle of the fourth residue of the protein. Both of the previous examples specify that the torsion angle should be calculated based on the position of four atoms. For the first example in particular the assumption when the torsion is specified in this way is that there are chemical diff --git a/src/core/ActionSetup.cpp b/src/core/ActionSetup.cpp index 58da48a9d..311ebb8bf 100644 --- a/src/core/ActionSetup.cpp +++ b/src/core/ActionSetup.cpp @@ -32,7 +32,7 @@ ActionSetup::ActionSetup(const ActionOptions&ao): { const ActionSet& actionset(plumed.getActionSet()); for(const auto & p : actionset) { -// check that all the preceeding actions are ActionSetup +// check that all the preceding actions are ActionSetup if( !dynamic_cast<ActionSetup*>(p.get()) && !dynamic_cast<ActionAnyorder*>(p.get()) ) error("Action " + getLabel() + " is a setup action, and should be only preceeded by other setup actions or by actions that can be used in any order."); } } diff --git a/src/core/ActionWithArguments.cpp b/src/core/ActionWithArguments.cpp index 8a9d3d9fa..fb2bd2617 100644 --- a/src/core/ActionWithArguments.cpp +++ b/src/core/ActionWithArguments.cpp @@ -37,8 +37,8 @@ void ActionWithArguments::registerKeywords(Keywords& keys) { keys.reserve("numbered","ARG","the input for this action is the scalar output from one or more other actions. The particular scalars that you will use " "are referenced using the label of the action. If the label appears on its own then it is assumed that the Action calculates " "a single scalar value. The value of this scalar is thus used as the input to this new action. If * or *.* appears the " - "scalars calculated by all the proceding actions in the input file are taken. Some actions have multi-component outputs and " - "each component of the output has a specific label. For example a \\ref DISTANCE action labelled dist may have three componets " + "scalars calculated by all the proceeding actions in the input file are taken. Some actions have multi-component outputs and " + "each component of the output has a specific label. For example a \\ref DISTANCE action labelled dist may have three components " "x, y and z. To take just the x component you should use dist.x, if you wish to take all three components then use dist.*." "More information on the referencing of Actions can be found in the section of the manual on the PLUMED \\ref Syntax. " "Scalar values can also be " diff --git a/src/core/ActionWithValue.cpp b/src/core/ActionWithValue.cpp index 85b50b3cc..b3f224e4c 100644 --- a/src/core/ActionWithValue.cpp +++ b/src/core/ActionWithValue.cpp @@ -30,7 +30,7 @@ void ActionWithValue::registerKeywords(Keywords& keys) { keys.setComponentsIntroduction("By default the value of the calculated quantity can be referenced elsewhere in the " "input file by using the label of the action. Alternatively this Action can be used " "to calculate the following quantities by employing the keywords listed " - "below. These quanties can be referenced elsewhere in the input by using this Action's " + "below. These quantities can be referenced elsewhere in the input by using this Action's " "label followed by a dot and the name of the quantity required from the list below."); keys.addFlag("NUMERICAL_DERIVATIVES", false, "calculate the derivatives for these quantities numerically"); } @@ -41,14 +41,14 @@ void ActionWithValue::noAnalyticalDerivatives(Keywords& keys) { } void ActionWithValue::componentsAreNotOptional(Keywords& keys) { - keys.setComponentsIntroduction("By default this Action calculates the following quantities. These quanties can " + keys.setComponentsIntroduction("By default this Action calculates the following quantities. These quantities can " "be referenced elsewhere in the input by using this Action's label followed by a " "dot and the name of the quantity required from the list below."); } void ActionWithValue::useCustomisableComponents(Keywords& keys) { keys.setComponentsIntroduction("The names of the components in this action can be customized by the user in the " - "actions input file. However, in addition to these customizable components the " + "actions input file. However, in addition to the components that can be customized the " "following quantities will always be output"); } diff --git a/src/crystallization/BondOrientation.cpp b/src/crystallization/BondOrientation.cpp index 4c822dade..fa0ce104d 100644 --- a/src/crystallization/BondOrientation.cpp +++ b/src/crystallization/BondOrientation.cpp @@ -59,14 +59,14 @@ void BondOrientation::registerKeywords( Keywords& keys ) { keys.reset_style("ATOMS","atoms"); keys.add("atoms-1","GROUP","Calculate the distance between each distinct pair of atoms in the group"); keys.add("atoms-2","GROUPA","Calculate the distances between all the atoms in GROUPA and all " - "the atoms in GROUPB. This must be used in conjuction with GROUPB."); + "the atoms in GROUPB. This must be used in conjunction with GROUPB."); keys.add("atoms-2","GROUPB","Calculate the distances between all the atoms in GROUPA and all the atoms " - "in GROUPB. This must be used in conjuction with GROUPA."); + "in GROUPB. This must be used in conjunction with GROUPA."); keys.add("compulsory","NN","12","The n parameter of the switching function "); keys.add("compulsory","MM","0","The m parameter of the switching function; 0 implies 2*NN"); keys.add("compulsory","D_0","0.0","The d_0 parameter of the switching function"); keys.add("compulsory","R_0","The r_0 parameter of the switching function"); - keys.add("optional","SWITCH","This keyword is used if you want to employ an alternative to the continuous swiching function defined above. " + keys.add("optional","SWITCH","This keyword is used if you want to employ an alternative to the continuous switching function defined above. " "The following provides information on the \\ref switchingfunction that are available. " "When this keyword is present you no longer need the NN, MM, D_0 and R_0 keywords."); keys.use("VMEAN"); keys.use("VSUM"); diff --git a/src/crystallization/CubicHarmonicBase.cpp b/src/crystallization/CubicHarmonicBase.cpp index 38f326598..2268dac80 100644 --- a/src/crystallization/CubicHarmonicBase.cpp +++ b/src/crystallization/CubicHarmonicBase.cpp @@ -37,7 +37,7 @@ void CubicHarmonicBase::registerKeywords( Keywords& keys ) { keys.add("compulsory","MM","0","The m parameter of the switching function; 0 implies 2*NN"); keys.add("compulsory","D_0","0.0","The d_0 parameter of the switching function"); keys.add("compulsory","R_0","The r_0 parameter of the switching function"); - keys.add("optional","SWITCH","This keyword is used if you want to employ an alternative to the continuous swiching function defined above. " + keys.add("optional","SWITCH","This keyword is used if you want to employ an alternative to the continuous switching function defined above. " "The following provides information on the \\ref switchingfunction that are available. " "When this keyword is present you no longer need the NN, MM, D_0 and R_0 keywords."); keys.add("compulsory","PHI","0.0","The Euler rotational angle phi"); diff --git a/src/crystallization/Gradient.cpp b/src/crystallization/Gradient.cpp index e59b78908..eed9ed209 100644 --- a/src/crystallization/Gradient.cpp +++ b/src/crystallization/Gradient.cpp @@ -64,7 +64,7 @@ PLUMED_REGISTER_ACTION(Gradient,"GRADIENT") void Gradient::registerKeywords( Keywords& keys ) { VolumeGradientBase::registerKeywords( keys ); keys.add("atoms","ORIGIN","we will use the position of this atom as the origin in our calculation"); - keys.add("compulsory","DIR","xyz","the directions in which we are calculating the graident. Should be x, y, z, xy, xz, yz or xyz"); + keys.add("compulsory","DIR","xyz","the directions in which we are calculating the gradient. Should be x, y, z, xy, xz, yz or xyz"); keys.add("compulsory","NBINS","number of bins to use in each direction for the calculation of the gradient"); keys.add("compulsory","SIGMA","1.0","the width of the function to be used for kernel density estimation"); keys.add("compulsory","KERNEL","gaussian","the type of kernel function to be used"); diff --git a/src/crystallization/InterMolecularTorsions.cpp b/src/crystallization/InterMolecularTorsions.cpp index ff2fff175..a3c31c576 100644 --- a/src/crystallization/InterMolecularTorsions.cpp +++ b/src/crystallization/InterMolecularTorsions.cpp @@ -30,7 +30,7 @@ //+PLUMEDOC MCOLVARF INTERMOLECULARTORSIONS /* -Calculate torsions between vectors on adjacent molecules +Calculate torsion angles between vectors on adjacent molecules This variable can be used to calculate the average torsional angles between vectors. In other words, it can be used to compute quantities like this: @@ -56,8 +56,8 @@ that are close together. The example input below is necessarily but gives you an idea of what can be achieved using this action. The orientations and positions of four molecules are defined using the \ref MOLECULES action as the position of the -centeres of mass of the two atoms specified and the direction of the vector connecting the two atoms that were specified. -The torsional angles between the molecules are then calculated by the \ref INTERMOLECULARTORSIONS command labelled tt_p. +centers of mass of the two atoms specified and the direction of the vector connecting the two atoms that were specified. +The torsional angles between the molecules are then calculated by the \ref INTERMOLECULARTORSIONS command labelled torsion_p. We then compute a \ref HISTOGRAM that shows the distribution that these torsional angles take in the structure. The weight a given torsional angle contributes to this \ref HISTOGRAM is determined using a \ref switchingfunction that acts on the distance between the two molecules. As such the torsional angles between molecules that are close together contribute a high weight to the @@ -66,8 +66,8 @@ averaged over the whole trajectory and output once all the trajectory frames hav \plumedfile m1: MOLECULES MOL1=1,2 MOL2=3,4 MOL3=5,6 MOL4=7,8 -tt_p: INTERMOLECULARTORSIONS MOLS=m1 SWITCH={RATIONAL R_0=0.25 D_0=2.0 D_MAX=3.0} -htt_p: HISTOGRAM DATA=tt_p GRID_MIN=-pi GRID_MAX=pi BANDWIDTH=0.1 GRID_BIN=200 STRIDE=1 +torsion_p: INTERMOLECULARTORSIONS MOLS=m1 SWITCH={RATIONAL R_0=0.25 D_0=2.0 D_MAX=3.0} +htt_p: HISTOGRAM DATA=torsion_p GRID_MIN=-pi GRID_MAX=pi BANDWIDTH=0.1 GRID_BIN=200 STRIDE=1 DUMPGRID GRID=htt_p FILE=myhist.out \endplumedfile @@ -98,15 +98,15 @@ PLUMED_REGISTER_ACTION(InterMolecularTorsions,"INTERMOLECULARTORSIONS") void InterMolecularTorsions::registerKeywords( Keywords& keys ) { MultiColvarBase::registerKeywords( keys ); keys.add("atoms","MOLS","The molecules you would like to calculate the torsional angles between. This should be the label/s of \\ref MOLECULES or \\ref PLANES actions"); - keys.add("atoms-1","MOLSA","In this version of the input the torsional angles between all pairs of atoms including one atom from MOLA one atom from MOLB will be computed. " + keys.add("atoms-1","MOLSA","In this version of the input the torsional angles between all pairs of atoms including one atom from MOLSA one atom from MOLSB will be computed. " "This should be the label/s of \\ref MOLECULES or \\ref PLANES actions"); - keys.add("atoms-1","MOLSB","In this version of the input the torsional angles between all pairs of atoms including one atom from MOLA one atom from MOLB will be computed. " + keys.add("atoms-1","MOLSB","In this version of the input the torsional angles between all pairs of atoms including one atom from MOLSA one atom from MOLSB will be computed. " "This should be the label/s of \\ref MOLECULES or \\ref PLANES actions"); keys.add("compulsory","NN","6","The n parameter of the switching function "); keys.add("compulsory","MM","0","The m parameter of the switching function; 0 implies 2*NN"); keys.add("compulsory","D_0","0.0","The d_0 parameter of the switching function"); keys.add("compulsory","R_0","The r_0 parameter of the switching function"); - keys.add("optional","SWITCH","This keyword is used if you want to employ an alternative to the continuous swiching function defined above. " + keys.add("optional","SWITCH","This keyword is used if you want to employ an alternative to the continuous switching function defined above. " "The following provides information on the \\ref switchingfunction that are available. " "When this keyword is present you no longer need the NN, MM, D_0 and R_0 keywords."); // Use actionWithDistributionKeywords diff --git a/src/crystallization/MoleculePlane.cpp b/src/crystallization/MoleculePlane.cpp index 1376d7710..8f8bbc5d0 100644 --- a/src/crystallization/MoleculePlane.cpp +++ b/src/crystallization/MoleculePlane.cpp @@ -54,7 +54,7 @@ void MoleculePlane::registerKeywords( Keywords& keys ) { "second atoms and the vector connecting the second and third atoms. If four atoms are specified the " "orientation of the molecule is taken as the normal to the plane containing the vector connecting the " "first and second atoms and the vector connecting the third and fourth atoms. The molecule is always " - "assumed to lie at the geometric centre for the three/four atoms."); + "assumed to lie at the geometric center for the three/four atoms."); keys.reset_style("MOL","atoms"); } diff --git a/src/crystallization/OrientationSphere.cpp b/src/crystallization/OrientationSphere.cpp index e1af69d80..e932f0527 100644 --- a/src/crystallization/OrientationSphere.cpp +++ b/src/crystallization/OrientationSphere.cpp @@ -34,7 +34,7 @@ void OrientationSphere::registerKeywords( Keywords& keys ) { keys.add("compulsory","MM","0","The m parameter of the switching function; 0 implies 2*NN"); keys.add("compulsory","D_0","0.0","The d_0 parameter of the switching function"); keys.add("compulsory","R_0","The r_0 parameter of the switching function"); - keys.add("optional","SWITCH","This keyword is used if you want to employ an alternative to the continuous swiching function defined above. " + keys.add("optional","SWITCH","This keyword is used if you want to employ an alternative to the continuous switching function defined above. " "The following provides information on the \\ref switchingfunction that are available. " "When this keyword is present you no longer need the NN, MM, D_0 and R_0 keywords."); // Use actionWithDistributionKeywords diff --git a/src/crystallization/PolymerAngles.cpp b/src/crystallization/PolymerAngles.cpp index fb8d19b7c..c409ef02d 100644 --- a/src/crystallization/PolymerAngles.cpp +++ b/src/crystallization/PolymerAngles.cpp @@ -27,7 +27,7 @@ Calculate a function to investigate the relative orientations of polymer angles This CV takes the vectors calculated by a \ref PLANES action as input and computes the following function -of the relative angles, \f$\theta\f$, between the normals of pairs of input vectors: +of the relative angles, \f$\theta\f$, between the vectors that are normal to the pairs of input vectors: \f[ s = \frac{ 3 \cos^2 \theta - 1 }{ 2 } diff --git a/src/crystallization/Q3.cpp b/src/crystallization/Q3.cpp index e9deaddd5..1fbfa0a5f 100644 --- a/src/crystallization/Q3.cpp +++ b/src/crystallization/Q3.cpp @@ -73,9 +73,9 @@ Q3 SPECIES=1-64 D_0=1.3 R_0=0.2 HISTOGRAM={GAUSSIAN LOWER=0.0 UPPER=1.0 NBINS=20 PRINT ARG=q3.* FILE=colvar \endplumedfile -The following command could be used to measure the Q3 paramters that describe the arrangement of chlorine ions around the -sodium atoms in NaCl. The imagined system here is composed of 64 NaCl formula units and the atoms are arranged in the input -with the 64 Na\f$^+\f$ ions followed by the 64 Cl\f$-\f$ ions. Once again the average Q3 paramter is calculated and output to a +The following command could be used to measure the Q3 parameters that describe the arrangement of chlorine ions around the +sodium atoms in sodium chloride. The imagined system here is composed of 64 NaCl formula units and the atoms are arranged in the input +with the 64 Na\f$^+\f$ ions followed by the 64 Cl\f$-\f$ ions. Once again the average Q3 parameter is calculated and output to a file called colvar \plumedfile @@ -88,7 +88,7 @@ PRINT ARG=q3.mean FILE=colvar //+PLUMEDOC MCOLVARF LOCAL_Q3 /* -Calculate the local degree of order around an atoms by taking the average dot product between the \f$q_3\f$ vector on the central atom and the \f$q_3\f$ vector +Calculate the local degree of order around an atoms by taking the average dot product between the \f$q_3\f$ vector on the central atom and the \f$q_3\f$ vector on the atoms in the first coordination sphere. The \ref Q3 command allows one to calculate one complex vectors for each of the atoms in your system that describe the degree of order in the coordination sphere @@ -115,7 +115,7 @@ biased dynamics what is really required is an order parameter that measures: - Whether or not the coordination spheres around atoms are ordered - Whether or not the atoms that are ordered are clustered together in a crystalline nucleus -\ref LOCAL_AVERAGE and \ref NLINKS are variables that can be combined with the Steinhardt parameteters allow to calculate variables that satisfy these requirements. +\ref LOCAL_AVERAGE and \ref NLINKS are variables that can be combined with the Steinhardt parameters allow to calculate variables that satisfy these requirements. LOCAL_Q3 is another variable that can be used in these sorts of calculations. The LOCAL_Q3 parameter for a particular atom is a number that measures the extent to which the orientation of the atoms in the first coordination sphere of an atom match the orientation of the central atom. It does this by calculating the following quantity for each of the atoms in the system: @@ -124,7 +124,7 @@ quantity for each of the atoms in the system: s_i = \frac{ \sum_j \sigma( r_{ij} ) \sum_{m=-3}^3 q_{3m}^{*}(i)q_{3m}(j) }{ \sum_j \sigma( r_{ij} ) } \f] -where \f$q_{3m}(i)\f$ and \f$q_{3m}(j)\f$ are the 3rd order Steinhardt vectors calculated for atom \f$i\f$ and atom \f$j\f$ respectively and the asterix denotes complex +where \f$q_{3m}(i)\f$ and \f$q_{3m}(j)\f$ are the 3rd order Steinhardt vectors calculated for atom \f$i\f$ and atom \f$j\f$ respectively and the asterisk denotes complex conjugation. The function \f$\sigma( r_{ij} )\f$ is a \ref switchingfunction that acts on the distance between atoms \f$i\f$ and \f$j\f$. The parameters of this function should be set so that it the function is equal to one when atom \f$j\f$ is in the first coordination sphere of atom \f$i\f$ and is zero otherwise. The sum in the numerator diff --git a/src/crystallization/Q4.cpp b/src/crystallization/Q4.cpp index 3f453a325..989289465 100644 --- a/src/crystallization/Q4.cpp +++ b/src/crystallization/Q4.cpp @@ -25,9 +25,9 @@ //+PLUMEDOC MCOLVAR Q4 /* -Calculate 4th order Steinhardt parameters. +Calculate fourth order Steinhardt parameters. -The 4th order Steinhardt parameters allow us to measure the degree to which the first coordination shell +The fourth order Steinhardt parameters allow us to measure the degree to which the first coordination shell around an atom is ordered. The Steinhardt parameter for atom, \f$i\f$ is complex vector whose components are calculated using the following formula: @@ -35,7 +35,7 @@ calculated using the following formula: q_{4m}(i) = \frac{\sum_j \sigma( r_{ij} ) Y_{4m}(\mathbf{r}_{ij}) }{\sum_j \sigma( r_{ij} ) } \f] -where \f$Y_{4m}\f$ is one of the 4th order spherical harmonics so \f$m\f$ is a number that runs from \f$-4\f$ to +where \f$Y_{4m}\f$ is one of the fourth order spherical harmonics so \f$m\f$ is a number that runs from \f$-4\f$ to \f$+4\f$. The function \f$\sigma( r_{ij} )\f$ is a \ref switchingfunction that acts on the distance between atoms \f$i\f$ and \f$j\f$. The parameters of this function should be set so that it the function is equal to one when atom \f$j\f$ is in the first coordination sphere of atom \f$i\f$ and is zero otherwise. @@ -73,9 +73,9 @@ Q4 SPECIES=1-64 D_0=1.3 R_0=0.2 HISTOGRAM={GAUSSIAN LOWER=0.0 UPPER=1.0 NBINS=20 PRINT ARG=q4.* FILE=colvar \endplumedfile -The following command could be used to measure the Q4 paramters that describe the arrangement of chlorine ions around the -sodium atoms in NaCl. The imagined system here is composed of 64 NaCl formula units and the atoms are arranged in the input -with the 64 Na\f$^+\f$ ions followed by the 64 Cl\f$-\f$ ions. Once again the average Q4 paramter is calculated and output to a +The following command could be used to measure the Q4 parameters that describe the arrangement of chlorine ions around the +sodium atoms in sodium chloride. The imagined system here is composed of 64 NaCl formula units and the atoms are arranged in the input +with the 64 Na\f$^+\f$ ions followed by the 64 Cl\f$-\f$ ions. Once again the average Q4 parameter is calculated and output to a file called colvar \plumedfile @@ -88,7 +88,7 @@ PRINT ARG=q4.mean FILE=colvar //+PLUMEDOC MCOLVARF LOCAL_Q4 /* -Calculate the local degree of order around an atoms by taking the average dot product between the \f$q_4\f$ vector on the central atom and the \f$q_4\f$ vector +Calculate the local degree of order around an atoms by taking the average dot product between the \f$q_4\f$ vector on the central atom and the \f$q_4\f$ vector on the atoms in the first coordination sphere. The \ref Q4 command allows one to calculate one complex vectors for each of the atoms in your system that describe the degree of order in the coordination sphere @@ -115,7 +115,7 @@ biased dynamics what is really required is an order parameter that measures: - Whether or not the coordination spheres around atoms are ordered - Whether or not the atoms that are ordered are clustered together in a crystalline nucleus -\ref LOCAL_AVERAGE and \ref NLINKS are variables that can be combined with the Steinhardt parameteters allow to calculate variables that satisfy these requirements. +\ref LOCAL_AVERAGE and \ref NLINKS are variables that can be combined with the Steinhardt parameters allow to calculate variables that satisfy these requirements. LOCAL_Q4 is another variable that can be used in these sorts of calculations. The LOCAL_Q4 parameter for a particular atom is a number that measures the extent to which the orientation of the atoms in the first coordination sphere of an atom match the orientation of the central atom. It does this by calculating the following quantity for each of the atoms in the system: @@ -124,7 +124,7 @@ quantity for each of the atoms in the system: s_i = \frac{ \sum_j \sigma( r_{ij} ) \sum_{m=-4}^4 q_{4m}^{*}(i)q_{4m}(j) }{ \sum_j \sigma( r_{ij} ) } \f] -where \f$q_{4m}(i)\f$ and \f$q_{4m}(j)\f$ are the 4th order Steinhardt vectors calculated for atom \f$i\f$ and atom \f$j\f$ respectively and the asterix denotes +where \f$q_{4m}(i)\f$ and \f$q_{4m}(j)\f$ are the fourth order Steinhardt vectors calculated for atom \f$i\f$ and atom \f$j\f$ respectively and the asterisk denotes complex conjugation. The function \f$\sigma( r_{ij} )\f$ is a \ref switchingfunction that acts on the distance between atoms \f$i\f$ and \f$j\f$. The parameters of this function should be set so that it the function is equal to one when atom \f$j\f$ is in the first coordination sphere of atom \f$i\f$ and is zero otherwise. The sum in the numerator diff --git a/src/crystallization/Q6.cpp b/src/crystallization/Q6.cpp index 431f67ed0..7d17b85ed 100644 --- a/src/crystallization/Q6.cpp +++ b/src/crystallization/Q6.cpp @@ -25,9 +25,9 @@ //+PLUMEDOC MCOLVAR Q6 /* -Calculate 6th order Steinhardt parameters. +Calculate sixth order Steinhardt parameters. -The 6th order Steinhardt parameters allow us to measure the degree to which the first coordination shell +The sixth order Steinhardt parameters allow us to measure the degree to which the first coordination shell around an atom is ordered. The Steinhardt parameter for atom, \f$i\f$ is complex vector whose components are calculated using the following formula: @@ -35,7 +35,7 @@ calculated using the following formula: q_{6m}(i) = \frac{\sum_j \sigma( r_{ij} ) Y_{6m}(\mathbf{r}_{ij}) }{\sum_j \sigma( r_{ij} ) } \f] -where \f$Y_{6m}\f$ is one of the 6th order spherical harmonics so \f$m\f$ is a number that runs from \f$-6\f$ to +where \f$Y_{6m}\f$ is one of the sixth order spherical harmonics so \f$m\f$ is a number that runs from \f$-6\f$ to \f$+6\f$. The function \f$\sigma( r_{ij} )\f$ is a \ref switchingfunction that acts on the distance between atoms \f$i\f$ and \f$j\f$. The parameters of this function should be set so that it the function is equal to one when atom \f$j\f$ is in the first coordination sphere of atom \f$i\f$ and is zero otherwise. @@ -73,9 +73,9 @@ Q6 SPECIES=1-64 D_0=1.3 R_0=0.2 HISTOGRAM={GAUSSIAN LOWER=0.0 UPPER=1.0 NBINS=20 PRINT ARG=q6.* FILE=colvar \endplumedfile -The following command could be used to measure the Q6 paramters that describe the arrangement of chlorine ions around the -sodium atoms in NaCl. The imagined system here is composed of 64 NaCl formula units and the atoms are arranged in the input -with the 64 Na\f$^+\f$ ions followed by the 64 Cl\f$-\f$ ions. Once again the average Q6 paramter is calculated and output to a +The following command could be used to measure the Q6 parameters that describe the arrangement of chlorine ions around the +sodium atoms in sodium chloride. The imagined system here is composed of 64 NaCl formula units and the atoms are arranged in the input +with the 64 Na\f$^+\f$ ions followed by the 64 Cl\f$-\f$ ions. Once again the average Q6 parameter is calculated and output to a file called colvar \plumedfile @@ -88,7 +88,7 @@ PRINT ARG=q6.mean FILE=colvar //+PLUMEDOC MCOLVARF LOCAL_Q6 /* -Calculate the local degree of order around an atoms by taking the average dot product between the \f$q_6\f$ vector on the central atom and the \f$q_6\f$ vector +Calculate the local degree of order around an atoms by taking the average dot product between the \f$q_6\f$ vector on the central atom and the \f$q_6\f$ vector on the atoms in the first coordination sphere. The \ref Q6 command allows one to calculate one complex vectors for each of the atoms in your system that describe the degree of order in the coordination sphere @@ -115,7 +115,7 @@ biased dynamics what is really required is an order parameter that measures: - Whether or not the coordination spheres around atoms are ordered - Whether or not the atoms that are ordered are clustered together in a crystalline nucleus -\ref LOCAL_AVERAGE and \ref NLINKS are variables that can be combined with the Steinhardt parameteters allow to calculate variables that satisfy these requirements. +\ref LOCAL_AVERAGE and \ref NLINKS are variables that can be combined with the Steinhardt parameters allow to calculate variables that satisfy these requirements. LOCAL_Q6 is another variable that can be used in these sorts of calculations. The LOCAL_Q6 parameter for a particular atom is a number that measures the extent to which the orientation of the atoms in the first coordination sphere of an atom match the orientation of the central atom. It does this by calculating the following quantity for each of the atoms in the system: @@ -124,7 +124,7 @@ quantity for each of the atoms in the system: s_i = \frac{ \sum_j \sigma( r_{ij} ) \sum_{m=-6}^6 q_{6m}^{*}(i)q_{6m}(j) }{ \sum_j \sigma( r_{ij} ) } \f] -where \f$q_{6m}(i)\f$ and \f$q_{6m}(j)\f$ are the 6th order Steinhardt vectors calculated for atom \f$i\f$ and atom \f$j\f$ respectively and the asterix denotes +where \f$q_{6m}(i)\f$ and \f$q_{6m}(j)\f$ are the sixth order Steinhardt vectors calculated for atom \f$i\f$ and atom \f$j\f$ respectively and the asterisk denotes complex conjugation. The function \f$\sigma( r_{ij} )\f$ is a \ref switchingfunction that acts on the distance between atoms \f$i\f$ and \f$j\f$. The parameters of this function should be set so that it the function is equal to one when atom \f$j\f$ is in the first coordination sphere of atom \f$i\f$ and is zero otherwise. The sum in the numerator diff --git a/src/crystallization/SMAC.cpp b/src/crystallization/SMAC.cpp index adfc323ba..d49c7975d 100644 --- a/src/crystallization/SMAC.cpp +++ b/src/crystallization/SMAC.cpp @@ -49,7 +49,7 @@ including \f$\psi\f$ in the numerator is there to ensure that only those molecul number of molecules. It is important to include this "more than" switching function when you are simulating nucleation from solution with this CV. Lastly, the $K_n functions are \ref kernelfunctions that take the torsion angle, \f$\theta_{ij}\f$, between the internal orientation vectors for molecules \f$i\f$ and \f$j\f$ as input. These kernel functions should be set so that they are -equal to one when the relative orientation of the moleclues are as they are in the solid and equal to zero otherwise. +equal to one when the relative orientation of the molecules are as they are in the solid and equal to zero otherwise. The final \f$s_i\f$ quantity thus measures whether (on average) the molecules in the first coordination sphere around molecule \f$i\f$ are oriented as they would be in the solid. Furthermore, this Action is a multicolvar so you can calculate the \f$s_i\f$ values for all the molecules in your system simultaneously and then determine the average, the number less than and so on. diff --git a/src/crystallization/SimpleCubic.cpp b/src/crystallization/SimpleCubic.cpp index e57347296..e6d55fc8e 100644 --- a/src/crystallization/SimpleCubic.cpp +++ b/src/crystallization/SimpleCubic.cpp @@ -32,7 +32,7 @@ namespace crystallization { //+PLUMEDOC MCOLVAR SIMPLECUBIC /* -Calculate whether or not the coordination spheres of atoms are arranged as they would be in a simple +Calculate whether or not the coordination spheres of atoms are arranged as they would be in a simple cubic structure. We can measure how similar the environment around atom \f$i\f$ is to a simple cubic structure is by evaluating diff --git a/src/crystallization/Steinhardt.cpp b/src/crystallization/Steinhardt.cpp index 30c31b671..63e057b16 100644 --- a/src/crystallization/Steinhardt.cpp +++ b/src/crystallization/Steinhardt.cpp @@ -32,7 +32,7 @@ void Steinhardt::registerKeywords( Keywords& keys ) { keys.add("compulsory","MM","0","The m parameter of the switching function; 0 implies 2*NN"); keys.add("compulsory","D_0","0.0","The d_0 parameter of the switching function"); keys.add("compulsory","R_0","The r_0 parameter of the switching function"); - keys.add("optional","SWITCH","This keyword is used if you want to employ an alternative to the continuous swiching function defined above. " + keys.add("optional","SWITCH","This keyword is used if you want to employ an alternative to the continuous switching function defined above. " "The following provides information on the \\ref switchingfunction that are available. " "When this keyword is present you no longer need the NN, MM, D_0 and R_0 keywords."); keys.use("SPECIES"); keys.use("SPECIESA"); keys.use("SPECIESB"); diff --git a/src/crystallization/Tetrahedral.cpp b/src/crystallization/Tetrahedral.cpp index 3deac51d3..5298d548a 100644 --- a/src/crystallization/Tetrahedral.cpp +++ b/src/crystallization/Tetrahedral.cpp @@ -34,8 +34,8 @@ namespace crystallization { /* Calculate the degree to which the environment about ions has a tetrahedral order. -We can measure the degree to which the first coordination shell around any atom, \f$i\f$ is -tetrahedrally ordered using the following function. +We can measure the degree to which the atoms in the first coordination shell around any atom, \f$i\f$ is +is arranged like a tetrahedron using the following function. \f[ s(i) = \frac{1}{\sum_j \sigma( r_{ij} )} \sum_j \sigma( r_{ij} )\left[ \frac{(x_{ij} + y_{ij} + z_{ij})^3}{r_{ij}^3} + @@ -44,14 +44,14 @@ tetrahedrally ordered using the following function. \frac{(-x_{ij} - y_{ij} + z_{ij})^3}{r_{ij}^3} \right] \f] -Here \f$r_{ij}\f$ is the magnitude fo the vector connecting atom \f$i\f$ to atom \f$j\f$ and \f$x_{ij}\f$, \f$y_{ij}\f$ and \f$z_{ij}\f$ +Here \f$r_{ij}\f$ is the magnitude of the vector connecting atom \f$i\f$ to atom \f$j\f$ and \f$x_{ij}\f$, \f$y_{ij}\f$ and \f$z_{ij}\f$ are its three components. The function \f$\sigma( r_{ij} )\f$ is a \ref switchingfunction that acts on the distance between atoms \f$i\f$ and \f$j\f$. The parameters of this function should be set so that the function is equal to one when atom \f$j\f$ is in the first coordination sphere of atom \f$i\f$ and is zero otherwise. \par Examples -The following command calculates the average value of the tetrahedrality parameter for a set of 64 atoms all of the same type +The following command calculates the average value of the TETRAHEDRAL parameter for a set of 64 atoms all of the same type and outputs this quantity to a file called colvar. \plumedfile @@ -59,7 +59,7 @@ tt: TETRAHEDRAL SPECIES=1-64 SWITCH={RATIONAL D_0=1.3 R_0=0.2} MEAN PRINT ARG=tt.mean FILE=colvar \endplumedfile -The following command calculates the number of tetrahedrality parameters that are greater than 0.8 in a set of 10 atoms. +The following command calculates the number of TETRAHEDRAL parameters that are greater than 0.8 in a set of 10 atoms. In this calculation it is assumed that there are two atom types A and B and that the first coordination sphere of the 10 atoms of type A contains atoms of type B. The formula above is thus calculated for ten different A atoms and within it the sum over \f$j\f$ runs over 40 atoms of type B that could be in the first coordination sphere. diff --git a/src/crystallization/VectorMean.cpp b/src/crystallization/VectorMean.cpp index b151dbd8f..964d009f9 100644 --- a/src/crystallization/VectorMean.cpp +++ b/src/crystallization/VectorMean.cpp @@ -49,7 +49,7 @@ void VectorMean::registerKeywords( Keywords& keys ) { void VectorMean::reserveKeyword( Keywords& keys ) { keys.reserve("vessel","VMEAN","calculate the norm of the mean vector."); - keys.addOutputComponent("vmean","VMEAN","the norm of the mean vector. The output component can be refererred to elsewhere in the input " + keys.addOutputComponent("vmean","VMEAN","the norm of the mean vector. The output component can be referred to elsewhere in the input " "file by using the label.vmean"); } diff --git a/src/crystallization/VectorMultiColvar.cpp b/src/crystallization/VectorMultiColvar.cpp index 5564647e0..aa58d4489 100644 --- a/src/crystallization/VectorMultiColvar.cpp +++ b/src/crystallization/VectorMultiColvar.cpp @@ -38,11 +38,11 @@ void VectorMultiColvar::registerKeywords( Keywords& keys ) { "with different parameters. In this case the quantities calculated can be referenced elsewhere in the " "input by using the name of the quantity followed by a numerical identifier " "e.g. <em>label</em>.lessthan-1, <em>label</em>.lessthan-2 etc. When doing this and, for clarity we have " - "made the label of the components customizable. As such by using the LABEL keyword in the description of the keyword " + "made it so that the user can set the label for the components. As such by using the LABEL keyword in the description of the keyword " "input you can customize the component name. In addition, you can calculate all of these scalar functions for " "one particular component of the calculated vector by making use of the COMPONENT keyword. The first component is used to " "refer to the norm of the vector. The individual components can then be referenced using the numbers 2, 3, and so on. So " - "as an example MEAN1={COMPONET=1} calculates the average vector norm. MEAN2={COMPONENT=2} by contrast calculates the mean " + "as an example MEAN1={COMPONENT=1} calculates the average vector norm. MEAN2={COMPONENT=2} by contrast calculates the mean " "for all of the first components of the vectors."); } diff --git a/src/crystallization/VectorSum.cpp b/src/crystallization/VectorSum.cpp index 15c99a895..52fc82583 100644 --- a/src/crystallization/VectorSum.cpp +++ b/src/crystallization/VectorSum.cpp @@ -49,7 +49,7 @@ void VectorSum::registerKeywords( Keywords& keys ) { void VectorSum::reserveKeyword( Keywords& keys ) { keys.reserve("vessel","VSUM","calculate the norm of the sum of vectors."); - keys.addOutputComponent("vsum","VSUM","the norm of sum of vectors. The output component can be refererred to elsewhere in the input " + keys.addOutputComponent("vsum","VSUM","the norm of sum of vectors. The output component can be referred to elsewhere in the input " "file by using the label.vsum"); } diff --git a/src/dimred/ClassicalMultiDimensionalScaling.cpp b/src/dimred/ClassicalMultiDimensionalScaling.cpp index ff7b58514..0aa17378d 100644 --- a/src/dimred/ClassicalMultiDimensionalScaling.cpp +++ b/src/dimred/ClassicalMultiDimensionalScaling.cpp @@ -25,7 +25,7 @@ //+PLUMEDOC DIMRED CLASSICAL_MDS /* Create a low-dimensional projection of a trajectory using the classical multidimensional -scaling algorithm. + scaling algorithm. Multidimensional scaling (MDS) is similar to what is done when you make a map. You start with distances between London, Belfast, Paris and Dublin and then you try to arrange points on a piece of paper so that the (suitably scaled) @@ -42,7 +42,7 @@ Euclidean distances between pairs of them, \f$d_{ij}\f$, resemble the dissimilar \f] where \f$D_{ij}\f$ is the distance between point \f$X^{i}\f$ and point \f$X^{j}\f$ and \f$d_{ij}\f$ is the distance between the projection -of \f$X^{i}\f$, \f$x^i\f$, and the projection of \f$X^{j}\f$, \f$x^j\f$. A tutorial on this approach can be used to analyse simulations +of \f$X^{i}\f$, \f$x^i\f$, and the projection of \f$X^{j}\f$, \f$x^j\f$. A tutorial on this approach can be used to analyze simulations can be found in the tutorial \ref belfast-3 and in the following <a href="https://www.youtube.com/watch?v=ofC2qz0_9_A&feature=youtu.be" > short video.</a> \par Examples @@ -62,7 +62,7 @@ CLASSICAL_MDS ... The following section is for people who are interested in how this method works in detail. A solid understanding of this material is not necessary to use MDS. -\section dim-sec Method of optimisation +\section dim-sec Method of optimization The stress function can be minimized using a standard optimization algorithm such as conjugate gradients or steepest descent. However, it is more common to do this minimization using a technique known as classical scaling. Classical scaling works by @@ -151,8 +151,8 @@ Much as in PCA there are generally a small number of large eigenvalues in \f$\La We can safely use only the large eigenvalues and their corresponding eigenvectors to express the relationship between the coordinates \f$\mathbf{X}\f$. This gives us our set of low-dimensional projections. -This derivation makes a number of assumptions about the how the low dimensional points should best be arranged to minimise -the stress. If you use an interative optimization algorithm such as SMACOF you may thus be able to find a better +This derivation makes a number of assumptions about the how the low dimensional points should best be arranged to minimize +the stress. If you use an interactive optimization algorithm such as SMACOF you may thus be able to find a better (lower-stress) projection of the points. For more details on the assumptions made see <a href="http://quest4rigor.com/tag/multidimensional-scaling/"> this website.</a> */ diff --git a/src/dimred/PCA.cpp b/src/dimred/PCA.cpp index a0c9b640b..91f778b45 100644 --- a/src/dimred/PCA.cpp +++ b/src/dimred/PCA.cpp @@ -49,7 +49,7 @@ calculate the average structure and the amount the system fluctuates around this \f$x\f$, \f$y\f$ and \f$z\f$ coordinates of a molecule are used as input is that the majority of the changes in the positions of the atoms comes from the translational and rotational degrees of freedom of the molecule. The first six principal components will thus, most likely, be uninteresting. Consequently, to remedy this problem PLUMED provides the functionality to perform an RMSD alignment of the all the structures -to be analysed to the first frame in the trajectory. This can be used to effectively remove translational and/or rotational motions from +to be analyzed to the first frame in the trajectory. This can be used to effectively remove translational and/or rotational motions from consideration. The resulting principal components thus describe vibrational motions of the molecule. If you wish to calculate the projection of a trajectory on a set of principal components calculated from this PCA action then the output can be @@ -59,19 +59,19 @@ used as input for the \ref PCAVARS action. The following input instructs PLUMED to perform a principal component analysis in which the covariance matrix is calculated from changes in the positions of the first 22 atoms. The TYPE=OPTIMAL instruction ensures that translational and rotational degrees of freedom are removed from consideration. -The first two principal components will be output to a file called pca-comp.pdb. Trajectory frames will be collected on every step and the PCA calculation +The first two principal components will be output to a file called PCA-comp.pdb. Trajectory frames will be collected on every step and the PCA calculation will be performed at the end of the simulation. \plumedfile -PCA METRIC=OPTIMAL ATOMS=1-22 STRIDE=1 NLOW_DIM=2 OFILE=pca-comp.pdb +PCA METRIC=OPTIMAL ATOMS=1-22 STRIDE=1 NLOW_DIM=2 OFILE=PCA-comp.pdb \endplumedfile -The following input instructs PLUMED to perform a principal component analysis in which the covariance matrix is calculated from chnages in the six distances -seen in the previous lines. Notice that here the TYPE=EUCLIDEAN keyword is used to indicate that no alighment has to be done when calculating the various -elements of the covariance matrix from the input vectors. In this calculation the first two principal components will be output to a file called pca-comp.pdb. +The following input instructs PLUMED to perform a principal component analysis in which the covariance matrix is calculated from changes in the six distances +seen in the previous lines. Notice that here the TYPE=EUCLIDEAN keyword is used to indicate that no alignment has to be done when calculating the various +elements of the covariance matrix from the input vectors. In this calculation the first two principal components will be output to a file called PCA-comp.pdb. Trajectory frames will be collected every five steps and the PCA calculation is performed every 1000 steps. Consequently, if you run a 2000 step simulation the PCA analysis will be performed twice. The REWEIGHT_BIAS keyword in this input tells PLUMED that rather that ascribing a weight of one to each of the frames -when calculating averages and covariances a reweighting should be performed based and each frames' weight in these calculations should be determined based on +when calculating averages and covariance matrices a reweighting should be performed based and each frames' weight in these calculations should be determined based on the current value of the instantaneous bias (see \ref REWEIGHT_BIAS). \plumedfile @@ -82,7 +82,7 @@ d4: DISTANCE ATOMS=2,3 d5: DISTANCE ATOMS=2,4 d6: DISTANCE ATOMS=3,4 -PCA ARG=d1,d2,d3,d4,d5,d6 METRIC=EUCLIDEAN STRIDE=5 RUN=1000 NLOW_DIM=2 REWEIGHT_BIAS OFILE=pca-comp.pdb +PCA ARG=d1,d2,d3,d4,d5,d6 METRIC=EUCLIDEAN STRIDE=5 RUN=1000 NLOW_DIM=2 REWEIGHT_BIAS OFILE=PCA-comp.pdb \endplumedfile */ diff --git a/src/dimred/ProjectNonLandmarkPoints.cpp b/src/dimred/ProjectNonLandmarkPoints.cpp index 636ff38c1..4e4c5939a 100644 --- a/src/dimred/ProjectNonLandmarkPoints.cpp +++ b/src/dimred/ProjectNonLandmarkPoints.cpp @@ -31,7 +31,7 @@ //+PLUMEDOC DIMRED PROJECT_ALL_ANALYSIS_DATA /* -Find projections of all non-landmark points using the embedding calculated by a dimensionality reduction optimisation calculation. +Find projections of all non-landmark points using the embedding calculated by a dimensionality reduction optimization calculation. \par Examples @@ -47,7 +47,7 @@ private: double cgtol; /// Number of diemsions in low dimensional space unsigned nlow; -/// The class that calcualtes the projection of the data that is required +/// The class that calculates the projection of the data that is required DimensionalityReductionBase* mybase; /// Generate a projection of the ith data point - this is called in two routine void generateProjection( const unsigned& idat, std::vector<double>& point ); @@ -71,7 +71,7 @@ PLUMED_REGISTER_ACTION(ProjectNonLandmarkPoints,"PROJECT_ALL_ANALYSIS_DATA") void ProjectNonLandmarkPoints::registerKeywords( Keywords& keys ) { analysis::AnalysisBase::registerKeywords( keys ); keys.add("compulsory","PROJECTION","the projection that you wish to generate out-of-sample projections with"); - keys.add("compulsory","CGTOL","1E-6","the tolerance for the conjugate gradient optimisation"); + keys.add("compulsory","CGTOL","1E-6","the tolerance for the conjugate gradient optimization"); keys.addOutputComponent("coord","default","the low-dimensional projections of the various input configurations"); } diff --git a/src/dimred/SketchMap.cpp b/src/dimred/SketchMap.cpp index d0713aa69..b217d6a9c 100644 --- a/src/dimred/SketchMap.cpp +++ b/src/dimred/SketchMap.cpp @@ -50,9 +50,9 @@ void SketchMap::registerKeywords( Keywords& keys ) { keys.add("compulsory","LOW_DIM_FUNCTION","the parameters of the switching function in the low dimensional space"); keys.add("compulsory","ANNEAL_RATE","0.5","the rate at which to do the annealing"); keys.add("compulsory","ANNEAL_STEPS","10","the number of steps of annealing to do"); - keys.add("compulsory","CGTOL","1E-6","the tolerance for the conjugate gradient minimisation"); + keys.add("compulsory","CGTOL","1E-6","the tolerance for the conjugate gradient minimization"); // Smap pointwise input - keys.add("compulsory","NCYCLES","5","the number of cycles of global optimisation to attempt"); + keys.add("compulsory","NCYCLES","5","the number of cycles of global optimization to attempt"); keys.add("compulsory","BUFFER","1.1","grid extent for search is (max projection - minimum projection) multiplied by this value"); keys.add("compulsory","CGRID_SIZE","10","number of points to use in each grid direction"); keys.add("compulsory","FGRID_SIZE","0","interpolate the grid onto this number of points -- only works in 2D"); diff --git a/src/dimred/SketchMapBase.cpp b/src/dimred/SketchMapBase.cpp index 181747dd1..3f5546385 100644 --- a/src/dimred/SketchMapBase.cpp +++ b/src/dimred/SketchMapBase.cpp @@ -29,7 +29,7 @@ void SketchMapBase::registerKeywords( Keywords& keys ) { keys.remove("NLOW_DIM"); keys.add("compulsory","HIGH_DIM_FUNCTION","as in input action","the parameters of the switching function in the high dimensional space"); keys.add("compulsory","LOW_DIM_FUNCTION","as in input action","the parameters of the switching function in the low dimensional space"); - keys.add("compulsory","MIXPARAM","0.0","the ammount of the pure distances to mix into the stress function"); + keys.add("compulsory","MIXPARAM","0.0","the amount of the pure distances to mix into the stress function"); } SketchMapBase::SketchMapBase( const ActionOptions& ao ): diff --git a/src/dimred/SketchMapConjGrad.cpp b/src/dimred/SketchMapConjGrad.cpp index 7283dacba..ea4bb8509 100644 --- a/src/dimred/SketchMapConjGrad.cpp +++ b/src/dimred/SketchMapConjGrad.cpp @@ -25,7 +25,7 @@ //+PLUMEDOC DIMRED SKETCHMAP_CONJGRAD /* -Optimise the sketch-map stress function using conjugate gradients. +Optimize the sketch-map stress function using conjugate gradients. \par Examples @@ -48,7 +48,7 @@ PLUMED_REGISTER_ACTION(SketchMapConjGrad,"SKETCHMAP_CONJGRAD") void SketchMapConjGrad::registerKeywords( Keywords& keys ) { SketchMapBase::registerKeywords( keys ); - keys.add("compulsory","CGTOL","1E-6","the tolerance for the conjugate gradient minimisation"); + keys.add("compulsory","CGTOL","1E-6","the tolerance for the conjugate gradient minimization"); } SketchMapConjGrad::SketchMapConjGrad( const ActionOptions& ao ): diff --git a/src/dimred/SketchMapPointwise.cpp b/src/dimred/SketchMapPointwise.cpp index 08a3451a4..f46d14fc0 100644 --- a/src/dimred/SketchMapPointwise.cpp +++ b/src/dimred/SketchMapPointwise.cpp @@ -26,7 +26,7 @@ //+PLUMEDOC DIMRED SKETCHMAP_POINTWISE /* -Optimise the sketch-map stress function using a pointwise global optimisation algorithm. +Optimize the sketch-map stress function using a pointwise global optimization algorithm. \par Examples @@ -51,8 +51,8 @@ PLUMED_REGISTER_ACTION(SketchMapPointwise,"SKETCHMAP_POINTWISE") void SketchMapPointwise::registerKeywords( Keywords& keys ) { SketchMapBase::registerKeywords( keys ); - keys.add("compulsory","NCYCLES","5","the number of cycles of global optimisation to attempt"); - keys.add("compulsory","CGTOL","1E-6","the tolerance for the conjugate gradient minimisation"); + keys.add("compulsory","NCYCLES","5","the number of cycles of global optimization to attempt"); + keys.add("compulsory","CGTOL","1E-6","the tolerance for the conjugate gradient minimization"); keys.add("compulsory","BUFFER","1.1","grid extent for search is (max projection - minimum projection) multiplied by this value"); keys.add("compulsory","CGRID_SIZE","10","number of points to use in each grid direction"); keys.add("compulsory","FGRID_SIZE","0","interpolate the grid onto this number of points -- only works in 2D"); @@ -71,7 +71,7 @@ SketchMapPointwise::SketchMapPointwise( const ActionOptions& ao ): parseVector("FGRID_SIZE",nfgrid); if( nfgrid[0]!=0 && nlow!=2 ) error("interpolation only works in two dimensions"); - log.printf(" doing %u cycles of global optimisation sweeps\n",ncycles); + log.printf(" doing %u cycles of global optimization sweeps\n",ncycles); log.printf(" using coarse grid of points that is %u",npoints[0]); log.printf(" and that is %f larger than the difference between the position of the minimum and maximum projection \n",gbuf); for(unsigned j=1; j<npoints.size(); ++j) log.printf(" by %u",npoints[j]); diff --git a/src/dimred/SketchMapSmacof.cpp b/src/dimred/SketchMapSmacof.cpp index ca5cee4a9..f490a167f 100644 --- a/src/dimred/SketchMapSmacof.cpp +++ b/src/dimred/SketchMapSmacof.cpp @@ -25,7 +25,7 @@ //+PLUMEDOC DIMRED SKETCHMAP_SMACOF /* -Optimise the sketch-map stress function using the SMACOF algorithm. +Optimize the sketch-map stress function using the SMACOF algorithm. \par Examples diff --git a/src/dimred/SmacoffMDS.cpp b/src/dimred/SmacoffMDS.cpp index cc974798a..a8917cbac 100644 --- a/src/dimred/SmacoffMDS.cpp +++ b/src/dimred/SmacoffMDS.cpp @@ -25,7 +25,7 @@ //+PLUMEDOC DIMRED SMACOF_MDS /* -Optimise the multidimensional scaling stress function using the SMACOF algorithm. +Optimize the multidimensional scaling stress function using the SMACOF algorithm. \par Examples @@ -50,7 +50,7 @@ PLUMED_REGISTER_ACTION(SmacofMDS,"SMACOF_MDS") void SmacofMDS::registerKeywords( Keywords& keys ) { DimensionalityReductionBase::registerKeywords( keys ); keys.remove("NLOW_DIM"); - keys.add("compulsory","SMACOF_TOL","1E-4","tolerance for the SMACOF optimization algorith"); + keys.add("compulsory","SMACOF_TOL","1E-4","tolerance for the SMACOF optimization algorithm"); keys.add("compulsory","SMACOF_MAXCYC","1000","maximum number of optimization cycles for SMACOF algorithm"); } @@ -58,7 +58,7 @@ SmacofMDS::SmacofMDS( const ActionOptions& ao): Action(ao), DimensionalityReductionBase(ao) { - if( !dimredbase ) error("SMACOF must be initialised using output from dimensionality reduction object"); + if( !dimredbase ) error("SMACOF must be initialized using output from dimensionality reduction object"); parse("SMACOF_TOL",tol); parse("SMACOF_MAXCYC",maxloops); log.printf(" running smacof to convergence at %f or for a maximum of %u steps \n",tol,maxloops); diff --git a/src/drr/DynamicReferenceRestraining.cpp b/src/drr/DynamicReferenceRestraining.cpp index 73a516103..d865a761a 100644 --- a/src/drr/DynamicReferenceRestraining.cpp +++ b/src/drr/DynamicReferenceRestraining.cpp @@ -45,13 +45,13 @@ namespace drr { //+PLUMEDOC EABFMOD_BIAS DRR /* -Used to performed extended-system adaptive biasing force(eABF) \cite Lelievre2007 method -on one or more collective variables. This method is also +Used to performed extended-system adaptive biasing force(eABF) \cite Lelievre2007 method +on one or more collective variables. This method is also called dynamic reference restraining(DRR) \cite Zheng2012 . For each collective variable \f$\xi_i\f$, a fictitious variable \f$\lambda_i\f$ is attached through a spring. The fictitious variable \f$\lambda_i\f$ undergoes -overdamped langevin dynamics jusk like \ref EXTENDED_LAGRANGIAN. The ABF +overdamped Langevin dynamics just like \ref EXTENDED_LAGRANGIAN. The ABF algorithm applies bias force on \f$\lambda_i\f$. The bias force acts on \f$\lambda_i\f$ is the negative average spring force on \f$\lambda_i\f$, which enhances the sampling of \f$\lambda_i\f$. @@ -79,7 +79,7 @@ A'(\xi^*)=\frac{{\sum_\lambda}N\left(\xi^*,\lambda\right)\left[\frac{\xi^*-\lang \f] The code performing UI(colvar_UIestimator.h) is contributed by Haohao Fu \cite Fu2016 . -It may be slow. I only change the boltzmann constant and output +It may be slow. I only change the Boltzmann constant and output precision in it. For new version and issues, please see: https://github.com/fhh2626/colvars @@ -300,14 +300,14 @@ void DynamicReferenceRestraining::registerKeywords(Keywords &keys) { componentsAreNotOptional(keys); keys.addOutputComponent( "_fict", "default", - "one or multiple instances of this quantity will be refereceable " + "one or multiple instances of this quantity can be referenced " "elsewhere in the input file. " "These quantities will named with the arguments of the bias followed by " "the character string _tilde. It is possible to add forces on these " "variable."); keys.addOutputComponent( "_vfict", "default", - "one or multiple instances of this quantity will be refereceable " + "one or multiple instances of this quantity can be referenced " "elsewhere in the input file. " "These quantities will named with the arguments of the bias followed by " "the character string _tilde. It is NOT possible to add forces on these " diff --git a/src/eds/EDS.cpp b/src/eds/EDS.cpp index 16a7ea7ca..dfc68dfe9 100644 --- a/src/eds/EDS.cpp +++ b/src/eds/EDS.cpp @@ -41,7 +41,7 @@ namespace eds { Add a linear bias on a set of observables. This force is the same as the linear part of the bias in \ref -RESTRAINT, but this bias has the ability to compute prefactors +RESTRAINT, but this bias has the ability to compute the prefactors adaptively using the scheme of White and Voth \cite white2014efficient in order to match target observable values for a set of CVs. Further updates to the algorithm are described in \cite hocky2017cgds. @@ -211,7 +211,7 @@ void EDS::registerKeywords(Keywords& keys) { "CENTER_ARG is for calculated centers, e.g. from a CV or analysis. "); keys.add("optional","PERIOD","Steps over which to adjust bias for adaptive or ramping"); - keys.add("compulsory","RANGE","25.0","The (starting) maximum increase in coupling constant per PERIOD (in kBT/[BIAS_SCALE unit]) for each CV based"); + keys.add("compulsory","RANGE","25.0","The (starting) maximum increase in coupling constant per PERIOD (in \\f$k_B T\\f$/[BIAS_SCALE unit]) for each CV based"); keys.add("compulsory","INCREASE_FACTOR","1.0","Factor by which to increase RANGE every time coupling exceeds RANGE. RANGE is the max prefactor for increasing coupling in a given PERIOD."); keys.add("compulsory","SEED","0","Seed for random order of changing bias"); keys.add("compulsory","INIT","0","Starting value for coupling constant"); @@ -223,7 +223,7 @@ void EDS::registerKeywords(Keywords& keys) { "Must be in interval [1,0), where 1 indicates all and any other indicates a stochastic update. " "If not set, default is 1 / N, where N is the number of CVs. "); - keys.addFlag("LM",false,"Use Levenberg-Marquadt algorithm along with simulatneous keyword. Otherwise use gradient descent."); + keys.addFlag("LM",false,"Use Levenberg-Marquadt algorithm along with simultaneous keyword. Otherwise use gradient descent."); keys.addFlag("LM_MIXING","1","Initial mixing parameter when using Levenberg-Marquadt minimization."); keys.add("optional","RESTART_FMT","the format that should be used to output real numbers in EDS restarts"); diff --git a/src/function/Combine.cpp b/src/function/Combine.cpp index 97e2bdf51..d8ca42063 100644 --- a/src/function/Combine.cpp +++ b/src/function/Combine.cpp @@ -99,7 +99,7 @@ void Combine::registerKeywords(Keywords& keys) { keys.add("compulsory","COEFFICIENTS","1.0","the coefficients of the arguments in your function"); keys.add("compulsory","PARAMETERS","0.0","the parameters of the arguments in your function"); keys.add("compulsory","POWERS","1.0","the powers to which you are raising each of the arguments in your function"); - keys.addFlag("NORMALIZE",false,"normalize all the coefficents so that in total they are equal to one"); + keys.addFlag("NORMALIZE",false,"normalize all the coefficients so that in total they are equal to one"); } Combine::Combine(const ActionOptions&ao): diff --git a/src/function/FuncPathMSD.cpp b/src/function/FuncPathMSD.cpp index 4788f7ec3..9a11bada9 100644 --- a/src/function/FuncPathMSD.cpp +++ b/src/function/FuncPathMSD.cpp @@ -41,13 +41,13 @@ This is the Path Collective Variables implementation ( see \cite brand07 ). This variable computes the progress along a given set of frames that is provided in input ("s" component) and the distance from them ("z" component). -It is a function of MSD that are obtained by the joint use of MSD variable and SQUARED flag +It is a function of mean squared displacement that are obtained by the joint use of mean squared displacement variables with the SQUARED flag (see below). \par Examples Here below is a case where you have defined three frames and you want to -calculate the progress alng the path and the distance from it in p1 +calculate the progress along the path and the distance from it in p1 \plumedfile t1: RMSD REFERENCE=frame_1.dat TYPE=OPTIMAL SQUARED diff --git a/src/function/FuncSumHills.cpp b/src/function/FuncSumHills.cpp index 28d18ccc6..d5a65a96f 100644 --- a/src/function/FuncSumHills.cpp +++ b/src/function/FuncSumHills.cpp @@ -40,9 +40,9 @@ namespace function { //+PLUMEDOC FUNCTION FUNCSUMHILLS /* -This function is intended to be called by the command line tool sum_hills -and it is meant to integrate a HILLS file or an HILLS file interpreted as -a histogram i a variety of ways. Therefore it is not expected that you use this +This function is intended to be called by the command line tool sum_hills +and it is meant to integrate a HILLS file or an HILLS file interpreted as +a histogram i a variety of ways. Therefore it is not expected that you use this during your dynamics (it will crash!) In the future one could implement periodic integration during the metadynamics @@ -211,20 +211,20 @@ void FuncSumHills::registerKeywords(Keywords& keys) { keys.add("optional","HILLSFILES"," source file for hills creation(may be the same as HILLS)"); // this can be a vector! keys.add("optional","HISTOFILES"," source file for histogram creation(may be the same as HILLS)"); // also this can be a vector! keys.add("optional","HISTOSIGMA"," sigmas for binning when the histogram correction is needed "); - keys.add("optional","PROJ"," only with sumhills: the projection on the cvs"); - keys.add("optional","KT"," only with sumhills: the kt factor when projection on cvs"); + keys.add("optional","PROJ"," only with sumhills: the projection on the CVs"); + keys.add("optional","KT"," only with sumhills: the kt factor when projection on CVs"); keys.add("optional","GRID_MIN","the lower bounds for the grid"); keys.add("optional","GRID_MAX","the upper bounds for the grid"); keys.add("optional","GRID_BIN","the number of bins for the grid"); keys.add("optional","GRID_SPACING","the approximate grid spacing (to be used as an alternative or together with GRID_BIN)"); - keys.add("optional","INTERVAL","set monodimensional INTERVAL"); + keys.add("optional","INTERVAL","set one dimensional INTERVAL"); keys.add("optional","OUTHILLS"," output file for hills "); keys.add("optional","OUTHISTO"," output file for histogram "); keys.add("optional","INITSTRIDE"," stride if you want an initial dump "); keys.add("optional","STRIDE"," stride when you do it on the fly "); - keys.addFlag("ISCLTOOL",true,"use via plumed commandline: calculate at read phase and then go"); + keys.addFlag("ISCLTOOL",true,"use via plumed command line: calculate at read phase and then go"); keys.addFlag("PARALLELREAD",false,"read parallel HILLS file"); - keys.addFlag("NEGBIAS",false,"dump negative bias ( -bias ) instead of the free energy: needed in welltempered with flexible hills "); + keys.addFlag("NEGBIAS",false,"dump negative bias ( -bias ) instead of the free energy: needed in well tempered with flexible hills "); keys.addFlag("NOHISTORY",false,"to be used with INITSTRIDE: it splits the bias/histogram in pieces without previous history "); keys.addFlag("MINTOZERO",false,"translate the resulting bias/histogram to have the minimum to zero "); keys.add("optional","FMT","the format that should be used to output real numbers"); diff --git a/src/function/LocalEnsemble.cpp b/src/function/LocalEnsemble.cpp index e52940aac..91a6a9204 100644 --- a/src/function/LocalEnsemble.cpp +++ b/src/function/LocalEnsemble.cpp @@ -38,8 +38,8 @@ are averaged separately. The average is stored in a component labelled <em>label \par Examples The following input tells plumed to calculate the chemical shifts for four -different proteins in the same simulation box then average them, calcualated -the sum of the squared deviation with respect to the experiemntal values and +different proteins in the same simulation box then average them, calculated +the sum of the squared deviation with respect to the experimental values and applies a linear restraint. \plumedfile MOLINFO STRUCTURE=data/template.pdb diff --git a/src/function/Matheval.cpp b/src/function/Matheval.cpp index 76331265f..e608d2707 100644 --- a/src/function/Matheval.cpp +++ b/src/function/Matheval.cpp @@ -33,7 +33,7 @@ namespace function { /* Calculate a combination of variables using a matheval expression. -This action computes an arbitrary function of one or more precomputed +This action computes an arbitrary function of one or more collective variables. Arguments are chosen with the ARG keyword, and the function is provided with the FUNC string. Notice that this string should contain no space. Within FUNC, one can refer to the diff --git a/src/function/Piecewise.cpp b/src/function/Piecewise.cpp index a98b94b80..84f7727f6 100644 --- a/src/function/Piecewise.cpp +++ b/src/function/Piecewise.cpp @@ -31,7 +31,7 @@ namespace function { //+PLUMEDOC FUNCTION PIECEWISE /* -Compute a piecewise straight line through its arguments that passes through +Compute a piece wise straight line through its arguments that passes through a set of ordered control points. For variables less than the first @@ -89,10 +89,10 @@ void Piecewise::registerKeywords(Keywords& keys) { keys.add("numbered","POINT","This keyword is used to specify the various points in the function above."); keys.reset_style("POINT","compulsory"); componentsAreNotOptional(keys); - keys.addOutputComponent("_pfunc","default","one or multiple instances of this quantity will be referenceable elsewhere " + keys.addOutputComponent("_pfunc","default","one or multiple instances of this quantity can be referenced elsewhere " "in the input file. These quantities will be named with the arguments of the " "function followed by the character string _pfunc. These quantities tell the " - "user the values of the piecewise functions of each of the arguments."); + "user the values of the piece wise functions of each of the arguments."); } Piecewise::Piecewise(const ActionOptions&ao): diff --git a/src/function/Stats.cpp b/src/function/Stats.cpp index 49825ddfe..59ba5b117 100644 --- a/src/function/Stats.cpp +++ b/src/function/Stats.cpp @@ -30,7 +30,7 @@ namespace function { //+PLUMEDOC FUNCTION STATS /* Calculates statistical properties of a set of collective variables with respect to a set of reference values. -In particular it calculates and store as components the sum of the squared deviations, the correlation, the +In particular it calculates and store as components the sum of the squared deviations, the correlation, the slope and the intercept of a linear fit. The reference values can be either provided as values using PARAMETERS or using value without derivatives diff --git a/src/function/Target.cpp b/src/function/Target.cpp index 6fb0207d2..e8d2da8f2 100644 --- a/src/function/Target.cpp +++ b/src/function/Target.cpp @@ -35,8 +35,7 @@ namespace function { //+PLUMEDOC DCOLVAR TARGET /* -This function measures the pythagorean distance from a particular structure measured in the space defined by some -set of collective variables. +This function measures the Pythagorean distance from a particular structure measured in the space defined by some set of collective variables. This collective variable can be used to calculate something akin to: @@ -44,7 +43,7 @@ This collective variable can be used to calculate something akin to: d(X,X') = \vert X - X' \vert \f] -where \f$ X \f$ is the instaneous values for a set of collective variables for the system and +where \f$ X \f$ is the instantaneous values for a set of collective variables for the system and \f$ X' \f$ is the values that these self-same set of collective variables take in some reference structure provided as input. If we call our set of collective variables \f$\{s_i\}\f$ then this CV computes: @@ -61,29 +60,29 @@ we can compute: d = \sqrt{ \sum_{i=1}^N \sigma_i (s_i - s_i^{(ref)})^2 } \f] -where \f$\sigma_i\f$ is a vector of weights. Lastly, by using the METRIC=MAHALONOBIS we can compute mahalonobis distances using: +where \f$\sigma_i\f$ is a vector of weights. Lastly, by using the METRIC=MAHALONOBIS we can compute Mahalonobis distances using: \f[ d = \left( \mathbf{s} - \mathbf{s}^{(ref)} \right)^T \mathbf{\Sigma} \left( \mathbf{s} - \mathbf{s}^{(ref)} \right) \f] where \f$\mathbf{s}\f$ is a column vector containing the values of all the CVs and \f$\mathbf{s}^{(ref)}\f$ is a column vector -containg the values of the CVs in the reference configuration. \f$\mathbf{\Sigma}\f$ is then an \f$N \times N\f$ matrix that is +containing the values of the CVs in the reference configuration. \f$\mathbf{\Sigma}\f$ is then an \f$N \times N\f$ matrix that is specified in the input. \par Examples -The following input calculates the distance between a reference configuration and the instaneous position of the system in the trajectory. +The following input calculates the distance between a reference configuration and the instantaneous position of the system in the trajectory. The position of the reference configuration is specified by providing the values of the distance between atoms 1 and 2 and atoms 3 and 4. \plumedfile d1: DISTANCE ATOMS=1,2 d2: DISTANCE ATOMS=3,4 -t1: TARGET REFERENCE=myref.pdb TYPE=EUCLIDEAN +t1: TARGET REFERENCE=reference.pdb TYPE=EUCLIDEAN PRINT ARG=t1 FILE=colvar \endplumedfile -The contents of the file containing the reference structure (myref.pdb) is shown below. As you can see you must provide information on the +The contents of the file containing the reference structure (reference.pdb) is shown below. As you can see you must provide information on the labels of the CVs that are being used to define the position of the reference configuration in this file together with the values that these quantities take in the reference configuration. diff --git a/src/generic/Debug.cpp b/src/generic/Debug.cpp index 077c62c3b..812650d40 100644 --- a/src/generic/Debug.cpp +++ b/src/generic/Debug.cpp @@ -70,7 +70,7 @@ void Debug::registerKeywords( Keywords& keys ) { keys.add("compulsory","STRIDE","1","the frequency with which this action is to be performed"); keys.addFlag("logActivity",false,"write in the log which actions are inactive and which are inactive"); keys.addFlag("logRequestedAtoms",false,"write in the log which atoms have been requested at a given time"); - keys.addFlag("NOVIRIAL",false,"switch off the virial contribution for the entirity of the simulation"); + keys.addFlag("NOVIRIAL",false,"switch off the virial contribution for the entirety of the simulation"); keys.addFlag("DETAILED_TIMERS",false,"switch on detailed timers"); keys.add("optional","FILE","the name of the file on which to output these quantities"); } diff --git a/src/generic/DumpAtoms.cpp b/src/generic/DumpAtoms.cpp index 9fdb22e53..543d732b6 100644 --- a/src/generic/DumpAtoms.cpp +++ b/src/generic/DumpAtoms.cpp @@ -49,7 +49,7 @@ namespace generic { Dump selected atoms on a file. This command can be used to output the positions of a particular set of atoms. -The atoms required are ouput in a xyz or gro formatted file. +The atoms required are output in a xyz or gro formatted file. If PLUMED has been compiled with xdrfile support, then also xtc and trr files can be written. To this aim one should install xdrfile library (http://www.gromacs.org/Developer_Zone/Programming_Guide/XTC_Library). If the xdrfile library is installed properly the PLUMED configure script should be able to @@ -82,7 +82,7 @@ following input COM ATOMS=11-20 LABEL=c1 DUMPATOMS STRIDE=10 FILE=file.xyz ATOMS=1-10,c1 UNITS=A \endplumedfile -As an alternative, you might want to set all the lentght used by PLUMED to Angstrom using the \ref UNITS +As an alternative, you might want to set all the length used by PLUMED to Angstrom using the \ref UNITS action. However, this latter choice will affect all your input and output. The following input is very similar but dumps a .gro (gromacs) file, diff --git a/src/generic/FitToTemplate.cpp b/src/generic/FitToTemplate.cpp index 90e32efe8..de9abb9a4 100644 --- a/src/generic/FitToTemplate.cpp +++ b/src/generic/FitToTemplate.cpp @@ -51,7 +51,7 @@ This action is used to align a molecule to a template. This can be used to move the coordinates stored in plumed so as to be aligned with a provided template in PDB format. Pdb should contain also weights for alignment (see the format of PDB files used e.g. for \ref RMSD). -Make sure your PDB file is correclty formatted as explained \ref pdbreader "in this page". +Make sure your PDB file is correctly formatted as explained \ref pdbreader "in this page". Weights for displacement are ignored, since no displacement is computed here. Notice that all atoms (not only those in the template) are aligned. To see what effect try @@ -80,7 +80,7 @@ this action is performed at every MD step. When running with periodic boundary conditions, the atoms should be in the proper periodic image. This is done automatically since PLUMED 2.5, -by considering the ordered list of atoms and rebuilding PBCs with a procedure +by considering the ordered list of atoms and rebuilding the molecules using a procedure that is equivalent to that done in \ref WHOLEMOLECULES . Notice that rebuilding is local to this action. This is different from \ref WHOLEMOLECULES which actually modifies the coordinates stored in PLUMED. @@ -126,7 +126,7 @@ frame of an aligned molecule. It could be for instance the center of mass of a ligand with respect to a protein \plumedfile # center of the ligand: -ce: CENTER ATOMS=100-110 +center: CENTER ATOMS=100-110 FIT_TO_TEMPLATE REFERENCE=protein.pdb TYPE=OPTIMAL @@ -134,20 +134,20 @@ FIT_TO_TEMPLATE REFERENCE=protein.pdb TYPE=OPTIMAL fix: FIXEDATOM AT=1.0,1.1,1.0 # take the distance between the fixed atom and the center of the ligand -d: DISTANCE ATOMS=ce,fix +d: DISTANCE ATOMS=center,fix # apply a restraint RESTRAINT ARG=d AT=0.0 KAPPA=100.0 \endplumedfile Notice that you could have obtained an (almost) identical result adding a fictitious -atom to `ref.pdb` with the serial number corresponding to the `ce` atom (there is no automatic way +atom to `ref.pdb` with the serial number corresponding to the atom labelled `center` (there is no automatic way to get it, but in this example it should be the number of atoms of the system plus one), and properly setting the weights for alignment and displacement in \ref RMSD. There are two differences to be expected: (ab) \ref FIT_TO_TEMPLATE might be slower since it has to rototranslate all the available atoms and -(b) variables employing PBCs (such as \ref DISTANCE without `NOPBC`, as in the example above) - are allowed after \ref FIT_TO_TEMPLATE, whereas \ref RMSD expects PBCs to be already solved. +(b) variables employing periodic boundary conditions (such as \ref DISTANCE without `NOPBC`, as in the example above) + are allowed after \ref FIT_TO_TEMPLATE, whereas \ref RMSD expects the issues related to the periodic boundary conditions to be already solved. The latter means that before the \ref RMSD statement one should use \ref WRAPAROUND or \ref WHOLEMOLECULES to properly place the ligand. diff --git a/src/generic/Flush.cpp b/src/generic/Flush.cpp index 1a2460cf1..0379c15e8 100644 --- a/src/generic/Flush.cpp +++ b/src/generic/Flush.cpp @@ -35,7 +35,7 @@ This command instructs plumed to flush all the open files with a user specified Notice that all files are flushed anyway every 10000 steps. This -is useful for preventing data loss that would otherwise arrise as a consequence of the code +is useful for preventing data loss that would otherwise arise as a consequence of the code storing data for printing in the buffers. Notice that wherever it is written in the plumed input file, it will flush all the open files. diff --git a/src/generic/Group.cpp b/src/generic/Group.cpp index e540acfb2..5b7c0e962 100644 --- a/src/generic/Group.cpp +++ b/src/generic/Group.cpp @@ -36,7 +36,7 @@ namespace generic { //+PLUMEDOC GENERIC GROUP /* -Define a group of atoms so that a particular list of atoms can be referenced with a single label +Define a group of atoms so that a particular list of atoms can be referenced with a single label in definitions of CVs or virtual atoms. Atoms can be listed as comma separated numbers (i.e. `1,2,3,10,45,7,9`) , simple positive ranges @@ -50,11 +50,11 @@ It is also possible to remove atoms from a list and or sort them using keywords The flow is the following: - If `ATOMS` is present, then take the ordered list of atoms from the `ATOMS` keyword as a starting list. - If `NDX_FILE` is present, then append to it the list obtained from the gromacs group. -- If `REMOVE` is present, then remove the first occurence of each of these atoms from the list. +- If `REMOVE` is present, then remove the first occurrence of each of these atoms from the list. If one tries to remove an atom that was not listed plumed adds a notice in the output. An atom that is present twice in the original list might be removed twice. - If `SORT` is present, then the resulting list is sorted by increasing serial number. -- If `UNIQUE` is present, then the resuling list is sorted by increasing serial number _and_ duplicate elements are removed. +- If `UNIQUE` is present, then the resulting list is sorted by increasing serial number _and_ duplicate elements are removed. Notice that this command just creates a shortcut, and does not imply any real calculation. So, having a huge group defined does not slow down your calculation in any way. @@ -104,7 +104,7 @@ DUMPATOMS ATOMS=pro FILE=traj.gro A list can be edited with `REMOVE`. For instance, if you are using a water model with three atoms per molecule, you can -easily construct the list of hydrogens in this manner +easily construct the list of hydrogen atoms in this manner \plumedfile # take one atom every three, that is oxygens ox: GROUP ATOMS=1-90:3 diff --git a/src/generic/Plumed.cpp b/src/generic/Plumed.cpp index 238b21e82..b1c1c4072 100644 --- a/src/generic/Plumed.cpp +++ b/src/generic/Plumed.cpp @@ -51,7 +51,7 @@ However, most of the features are expected to work correctly. Notes: - The \ref LOAD action will not work correctly since registers will be shared among the two instances. - In particular, the loaded actions will be visible to both guest and host irrespectively of where they are loaded from. + In particular, the loaded actions will be visible to both guest and host irrespective of where they are loaded from. This can be fixed and will probably be fixed in a later version. - `CHDIR` is not thread safe. However, in most implementations there will be a single process running PLUMED, with perhaps multiple OpenMP threads @@ -112,12 +112,12 @@ The files are long and complex and there are some clashes in the name of the var are used in both files, same files are written, etc). In addition, files might have been written using different units (see \ref UNITS`). If you want to run a single simulation with a bias potential that is the sum of the two bias potentials, you can: -- Place the two input files, as well as all the files required by plumed, in separate directories `dir1` and `dir2`. +- Place the two input files, as well as all the files required by plumed, in separate directories `directory1` and `directory2`. - Run with the following input file in the parent directory: \plumedfile # plumed.dat -PLUMED FILE=plumed.dat CHDIR=dir1 -PLUMED FILE=plumed.dat CHDIR=dir2 +PLUMED FILE=plumed.dat CHDIR=directory1 +PLUMED FILE=plumed.dat CHDIR=directory2 \endplumedfile */ @@ -179,7 +179,7 @@ void Plumed::registerKeywords( Keywords& keys ) { keys.add("compulsory","STRIDE","1","stride different from 1 are not supported yet"); keys.add("optional","FILE","input file for the guest PLUMED instance"); keys.add("optional","KERNEL","kernel to be used for the guest PLUMED instance (USE WITH CAUTION!)"); - keys.add("optional","LOG","logfile for the guest PLUMED instance. By default the host log is used"); + keys.add("optional","LOG","log file for the guest PLUMED instance. By default the host log is used"); keys.add("optional","CHDIR","run guest in a separate directory"); keys.addFlag("NOREPLICAS",false,"run multiple replicas as isolated ones, without letting them know that the host has multiple replicas"); keys.addOutputComponent("bias","default","the instantaneous value of the bias potential"); diff --git a/src/generic/Read.cpp b/src/generic/Read.cpp index cb16d6a1f..16b472f50 100644 --- a/src/generic/Read.cpp +++ b/src/generic/Read.cpp @@ -41,9 +41,9 @@ an MD simulation \par Description of components The READ command will read those fields that are labelled with the text string given to the -VALUE keyword. It will also read in any fields that are labelleled with the text string +VALUE keyword. It will also read in any fields that are labeled with the text string given to the VALUE keyword followed by a dot and a further string. If a single Value is read in -this value can be referenced using the label of the Action. Alternatively, if multiple quanties +this value can be referenced using the label of the Action. Alternatively, if multiple quantities are read in, they can be referenced elsewhere in the input by using the label for the Action followed by a dot and the character string that appeared after the dot in the title of the field. @@ -99,13 +99,13 @@ void Read::registerKeywords(Keywords& keys) { ActionPilot::registerKeywords(keys); ActionWithValue::registerKeywords(keys); keys.add("compulsory","STRIDE","1","the frequency with which the file should be read."); - keys.add("compulsory","EVERY","1","only read every ith line of the colvar file. This should be used if the colvar was written more frequently than the trajectory."); + keys.add("compulsory","EVERY","1","only read every \\f$n\\f$th line of the colvar file. This should be used if the colvar was written more frequently than the trajectory."); keys.add("compulsory","VALUES","the values to read from the file"); keys.add("compulsory","FILE","the name of the file from which to read these quantities"); keys.addFlag("IGNORE_TIME",false,"ignore the time in the colvar file. When this flag is not present read will be quite strict " "about the start time of the simulation and the stride between frames"); keys.addFlag("IGNORE_FORCES",false,"use this flag if the forces added by any bias can be safely ignored. As an example forces can be " - "safely ignored if you are doing postprocessing that does not involve outputting forces"); + "safely ignored if you are doing post processing that does not involve outputting forces"); keys.remove("NUMERICAL_DERIVATIVES"); keys.use("UPDATE_FROM"); keys.use("UPDATE_UNTIL"); diff --git a/src/generic/Time.cpp b/src/generic/Time.cpp index 309d10359..966609377 100644 --- a/src/generic/Time.cpp +++ b/src/generic/Time.cpp @@ -31,7 +31,7 @@ namespace generic { //+PLUMEDOC GENERIC TIME /* -retrieve the time of the simulation to be used elsewere +retrieve the time of the simulation to be used elsewhere \par Examples diff --git a/src/generic/WholeMolecules.cpp b/src/generic/WholeMolecules.cpp index 10087ee8f..0055f3084 100644 --- a/src/generic/WholeMolecules.cpp +++ b/src/generic/WholeMolecules.cpp @@ -40,7 +40,7 @@ namespace generic { //+PLUMEDOC GENERIC WHOLEMOLECULES /* -This action is used to rebuild molecules that can become split by the periodic +This action is used to rebuild molecules that can become split by the periodic boundary conditions. It is similar to the ALIGN_ATOMS keyword of plumed1, and is needed since some @@ -66,8 +66,8 @@ The way WHOLEMOLECULES modifies each of the listed entities is this: In this way, if an entity consists of a list of atoms such that consecutive atoms in the list are always closer than half a box side the entity will become whole. -This can be usually achieved selecting consecute atoms (1-100), but it is also possible -to skip some atoms, provided consecute chosen atoms are close enough. +This can be usually achieved selecting consecutive atoms (1-100), but it is also possible +to skip some atoms, provided consecutive chosen atoms are close enough. \par Examples diff --git a/src/generic/WrapAround.cpp b/src/generic/WrapAround.cpp index d06914fed..9fbb289b5 100644 --- a/src/generic/WrapAround.cpp +++ b/src/generic/WrapAround.cpp @@ -61,7 +61,7 @@ it is required during the simulation if collective variables need atoms to be in Consider that the computational cost grows with the product of the size of the two lists (ATOMS and AROUND), so that this action can become very expensive. -If you are using it to analyse a trajectory this is usually not a big problem. If you use it to +If you are using it to analyze a trajectory this is usually not a big problem. If you use it to analyze a simulation on the fly, e.g. with \ref DUMPATOMS to store a properly wrapped trajectory, consider the possibility of using the STRIDE keyword here (with great care). \par Examples @@ -129,8 +129,8 @@ each of size GROUPBY. The first atom of the group will be brought close to the AROUND atoms. The following atoms of the group will be just brought close to the first atom of the group. Assuming that oxygen is the first atom of each water molecules, -in the following examples all the water oxygens will be brought -close to the solute, and all the hydrogens will be kept close +in the following examples all the water oxygen atoms will be brought +close to the solute, and all the hydrogen atoms will be kept close to their related oxygen. \plumedfile diff --git a/src/gridtools/ActionWithGrid.cpp b/src/gridtools/ActionWithGrid.cpp index 8ea392d67..0849ef454 100644 --- a/src/gridtools/ActionWithGrid.cpp +++ b/src/gridtools/ActionWithGrid.cpp @@ -28,7 +28,7 @@ namespace gridtools { void ActionWithGrid::registerKeywords( Keywords& keys ) { vesselbase::ActionWithAveraging::registerKeywords( keys ); - keys.add("compulsory","BANDWIDTH","the bandwidths for kernel density esimtation"); + keys.add("compulsory","BANDWIDTH","the bandwidths for kernel density estimation"); keys.add("compulsory","KERNEL","gaussian","the kernel function you are using. More details on the kernels available " "in plumed plumed can be found in \\ref kernelfunctions."); keys.add("optional","CONCENTRATION","the concentration parameter for Von Mises-Fisher distributions"); diff --git a/src/gridtools/ActionWithInputGrid.cpp b/src/gridtools/ActionWithInputGrid.cpp index 8a46e7293..fca41fa44 100644 --- a/src/gridtools/ActionWithInputGrid.cpp +++ b/src/gridtools/ActionWithInputGrid.cpp @@ -29,7 +29,7 @@ namespace gridtools { void ActionWithInputGrid::registerKeywords( Keywords& keys ) { ActionWithGrid::registerKeywords( keys ); keys.add("compulsory","GRID","the action that creates the input grid you would like to use"); - keys.add("optional","COMPONENT","if your input is a vector field use this to specifiy the component of the input vector field for which you wish to use"); + keys.add("optional","COMPONENT","if your input is a vector field use this to specify the component of the input vector field for which you wish to use"); } ActionWithInputGrid::ActionWithInputGrid(const ActionOptions&ao): diff --git a/src/gridtools/ConvertToFES.cpp b/src/gridtools/ConvertToFES.cpp index 0eac068c0..2f9b0ac60 100644 --- a/src/gridtools/ConvertToFES.cpp +++ b/src/gridtools/ConvertToFES.cpp @@ -39,7 +39,7 @@ The free energy calculated on a grid is output by this action and can be printed \par Examples -This is a typical example showing how CONVERT_TO_FES might be used when postprocessing a trajectory. +This is a typical example showing how CONVERT_TO_FES might be used when post processing a trajectory. The input below calculates the free energy as a function of the distance between atom 1 and atom 2. This is done by accumulating a histogram as a function of this distance using kernel density estimation and the HISTOGRAM action. All the data within this trajectory is used in the construction of this diff --git a/src/gridtools/DumpCube.cpp b/src/gridtools/DumpCube.cpp index 7238f0435..d7a97bf4c 100644 --- a/src/gridtools/DumpCube.cpp +++ b/src/gridtools/DumpCube.cpp @@ -40,7 +40,7 @@ action thus allows you to output a function evaluated on a grid to a Gaussian cu \par Examples -The input below can be used to postprocess a trajectory. A histogram as a function of the distance +The input below can be used to post process a trajectory. A histogram as a function of the distance between atoms 1 and 2, the distance between atom 1 and 3 and the angle between the vector connecting atoms 1 and 2 and 1 and 3 is computed using kernel density estimation. Once all the data contained in the trajectory has been read in and all the kernels have been added the resulting histogram is output to a file called histoA1.cube. This file has the @@ -74,7 +74,7 @@ PLUMED_REGISTER_ACTION(DumpCube,"DUMPCUBE") void DumpCube::registerKeywords( Keywords& keys ) { GridPrintingBase::registerKeywords( keys ); - keys.add("optional","COMPONENT","if your input is a vector field use this to specifiy the component of the input vector field for which you wish to output"); + keys.add("optional","COMPONENT","if your input is a vector field use this to specify the component of the input vector field for which you wish to output"); } DumpCube::DumpCube(const ActionOptions&ao): diff --git a/src/gridtools/DumpGrid.cpp b/src/gridtools/DumpGrid.cpp index 60924878c..cc31f557f 100644 --- a/src/gridtools/DumpGrid.cpp +++ b/src/gridtools/DumpGrid.cpp @@ -31,7 +31,7 @@ namespace gridtools { Output the function on the grid to a file with the PLUMED grid format. PLUMED provides a number of actions that calculate the values of functions on grids. -For instance, whenver you calculate a free energy as a function of a collective variable using +For instance, whenever you calculate a free energy as a function of a collective variable using \ref HISTOGRAM and \ref CONVERT_TO_FES you will generally want to output the value of the free energy at a number of points on a discrete grid that covers the CV space uniformly. Alternatively you may want to calculate what value some symmetry function takes at different points inside your simulation cell using \ref MULTICOLVARDENS. @@ -54,7 +54,7 @@ information about the function plotted and that looks something like this: \endverbatim The header shown here tells us that we have grid showing the values that a function with two arguments x and y -takes at various points in our cell. The lines beheath the first line then tell us a little bit about these two +takes at various points in our cell. The lines beneath the first line then tell us a little bit about these two input arguments. The remaining lines of the file give us information on the positions of our grid points and the value the function and @@ -66,7 +66,7 @@ for y. This block is then followed by a blank line again and this pattern conti \par Examples The following input monitors two torsional angles during a simulation -and outputs a continuos histogram as a function of them at the end of the simulation. +and outputs a continuous histogram as a function of them at the end of the simulation. \plumedfile TORSION ATOMS=1,2,3,4 LABEL=r1 TORSION ATOMS=2,3,4,5 LABEL=r2 diff --git a/src/gridtools/FindContour.cpp b/src/gridtools/FindContour.cpp index e11ed7e4a..f4e88a07a 100644 --- a/src/gridtools/FindContour.cpp +++ b/src/gridtools/FindContour.cpp @@ -31,7 +31,7 @@ Find an isocontour in a smooth function. As discussed in the part of the manual on \ref Analysis PLUMED contains a number of tools that allow you to calculate a function on a grid. The function on this grid might be a \ref HISTOGRAM as a function of a few collective variables -or it might be a phase field that has been calcualted using \ref MULTICOLVARDENS. If this function has one or two input +or it might be a phase field that has been calculated using \ref MULTICOLVARDENS. If this function has one or two input arguments it is relatively straightforward to plot the function. If by contrast the data has a three or more dimensions it can be difficult to visualize. diff --git a/src/gridtools/FindContourSurface.cpp b/src/gridtools/FindContourSurface.cpp index 075c292c3..c8fb7e68d 100644 --- a/src/gridtools/FindContourSurface.cpp +++ b/src/gridtools/FindContourSurface.cpp @@ -28,12 +28,12 @@ Find an isocontour by searching along either the x, y or z direction. As discussed in the part of the manual on \ref Analysis PLUMED contains a number of tools that allow you to calculate a function on a grid. The function on this grid might be a \ref HISTOGRAM as a function of a few collective variables -or it might be a phase field that has been calcualted using \ref MULTICOLVARDENS. If this function has one or two input +or it might be a phase field that has been calculated using \ref MULTICOLVARDENS. If this function has one or two input arguments it is relatively straightforward to plot the function. If by contrast the data has a three dimensions it can be difficult to visualize. This action provides one tool for visualizing these functions. It can be used to search for a set of points on a contour -wher the function takes a particular value. In other words, for the function \f$f(x,y,z)\f$ this action would find a set +where the function takes a particular value. In other words, for the function \f$f(x,y,z)\f$ this action would find a set of points \f$\{x_c,y_c,z_c\}\f$ that have: \f[ @@ -44,14 +44,14 @@ where \f$c\f$ is some constant value that is specified by the user. The points that run parallel to the \f$x\f$, \f$y\f$ or \f$z\f$ axis of the simulation cell. The result is, therefore, a two dimensional function evaluated on a grid that gives us the height of the interface as a function of two coordinates. -It is important to note that this action can only be used to detect countours in three dimensional functions. In addition, this action will fail to +It is important to note that this action can only be used to detect contours in three dimensional functions. In addition, this action will fail to find the full set of contour points if the contour does not have the same topology as an infinite plane. If you are uncertain that the isocontours in your function have the appropriate topology you should use \ref FIND_CONTOUR in place of \ref FIND_CONTOUR_SURFACE. \par Examples -The input shown below was used to analyse the results from a simulation of an interface between solid and molten Lennard Jones. The interface between +The input shown below was used to analyze the results from a simulation of an interface between solid and molten Lennard Jones. The interface between the solid and the liquid was set up in the plane perpendicular to the \f$z\f$ direction of the simulation cell. The input below calculates something akin to a Willard-Chandler dividing surface \cite wcsurface between the solid phase and the liquid phase. There are two of these interfaces within the simulation box because of the periodic boundary conditions but we were able to determine that one of these two surfaces lies in a particular part of the diff --git a/src/gridtools/FindSphericalContour.cpp b/src/gridtools/FindSphericalContour.cpp index e57d2fce0..09ade930e 100644 --- a/src/gridtools/FindSphericalContour.cpp +++ b/src/gridtools/FindSphericalContour.cpp @@ -29,12 +29,12 @@ Find an isocontour in a three dimensional grid by searching over a Fibonacci sph As discussed in the part of the manual on \ref Analysis PLUMED contains a number of tools that allow you to calculate a function on a grid. The function on this grid might be a \ref HISTOGRAM as a function of a few collective variables -or it might be a phase field that has been calcualted using \ref MULTICOLVARDENS. If this function has one or two input +or it might be a phase field that has been calculated using \ref MULTICOLVARDENS. If this function has one or two input arguments it is relatively straightforward to plot the function. If by contrast the data has a three dimensions it can be difficult to visualize. This action provides one tool for visualizing these functions. It can be used to search for a set of points on a contour -wher the function takes a particular value. In other words, for the function \f$f(x,y,z)\f$ this action would find a set +where the function takes a particular value. In other words, for the function \f$f(x,y,z)\f$ this action would find a set of points \f$\{x_c,y_c,z_c\}\f$ that have: \f[ @@ -42,8 +42,8 @@ f(x_c,y_c,z_c) - c = 0 \f] where \f$c\f$ is some constant value that is specified by the user. The points on this contour are find by searching along a -set of equally spaced radii of a sphere that centered at on particular, user-speciified atom or virtual atom. To ensure that -these search radii are equally spaced on the surface of the sphere the search directions are generated by using a fibonacci +set of equally spaced radii of a sphere that centered at on particular, user-specified atom or virtual atom. To ensure that +these search radii are equally spaced on the surface of the sphere the search directions are generated by using a Fibonacci spiral projected on a sphere. In other words, the search directions are given by: \f[ @@ -65,21 +65,21 @@ where \f$y\f$ is the quantity second component of the vector defined above, \f$n where \f$R\f$ is a random variable between 0 and \f$n-1\f$ that is generated during the read in of the input file and that is fixed during the whole calculation. -It is important to note that this action can only be used to detect countours in three dimensional functions. In addition, this action will fail to +It is important to note that this action can only be used to detect contours in three dimensional functions. In addition, this action will fail to find the full set of contour points if the contour does not have the same topology as a sphere. If you are uncertain that the isocontours in your function have a spherical topology you should use \ref FIND_CONTOUR in place of \ref FIND_SPHERICAL_CONTOUR. \par Examples The following input demonstrates how this action can be used. The input here is used to study the shape of a droplet that has been formed during the -condensation of Lennard Jones from the vapour. The input below achieves this by calculating the coordination numbers of all the atoms within the gas. +condensation of Lennard Jones from the vapor. The input below achieves this by calculating the coordination numbers of all the atoms within the gas. Obviously, those atoms within the droplet will have a large value for the coordination number while the isolated atoms in the gas will have a low value. As such we can detect the sizes of the droplets by constructing a \ref CONTACT_MATRIX whose \f$ij\f$ element tells us whether atom \f$i\f$ and atom \f$j\f$ have coordination number that is greater that two. The atoms within the various droplets within the system can then be found by performing a \ref DFSCLUSTERING on this matrix to detect the connected components. We can take the largest of these connected components and find the center of the droplet by exploiting the functionality within \ref CENTER_OF_MULTICOLVAR. We can then construct a phase field based on the positions of the atoms in the largest cluster and the values of the coordination numbers of these atoms. The final line in the input then finds the a set of points on the dividing surface that separates -teh droplet from the surrounding gas. The value of the phase field on this isocontour is equal to 0.75. +the droplet from the surrounding gas. The value of the phase field on this isocontour is equal to 0.75. \plumedfile # Calculate coordination numbers diff --git a/src/gridtools/FourierTransform.cpp b/src/gridtools/FourierTransform.cpp index b3d151005..bfcbd8d8f 100644 --- a/src/gridtools/FourierTransform.cpp +++ b/src/gridtools/FourierTransform.cpp @@ -56,7 +56,7 @@ The default values of these parameters are: \f$a=1\f$ and \f$b=1\f$. \par Examples -The following example tells Plumed to compute the complex 2D 'backward' Discrete Fourier Transform by taking the data saved on a grid called 'density', and normalizing the output by \f$ \frac{1}{\sqrt{N_x\, N_y}}\f$, where \f$N_x\f$ and \f$N_y\f$ are the number of data on the grid (it can be the case that \f$N_x \neq N_y\f$): +The following example tells Plumed to compute the complex 2D 'backward' Discrete Fourier Transform by taking the data saved on a grid called 'density', and normalizing the output by \f$ \frac{1}{\sqrt{N_x\, N_y}}\f$, where \f$N_x\f$ and \f$N_y\f$ are the number of data on the grid (it can be the case that \f$N_x\neq N_y\f$): \plumedfile FOURIER_TRANSFORM STRIDE=1 GRID=density FT_TYPE=complex FOURIER_PARAMETERS=0,-1 FILE=fourier.dat diff --git a/src/gridtools/GridToXYZ.cpp b/src/gridtools/GridToXYZ.cpp index fd22d2a88..2a5117001 100644 --- a/src/gridtools/GridToXYZ.cpp +++ b/src/gridtools/GridToXYZ.cpp @@ -51,7 +51,7 @@ PLUMED_REGISTER_ACTION(GridToXYZ,"GRID_TO_XYZ") void GridToXYZ::registerKeywords( Keywords& keys ) { GridPrintingBase::registerKeywords( keys ); - keys.add("optional","COMPONENT","if your input is a vector field use this to specifiy the component of the input vector field for which you wish to output"); + keys.add("optional","COMPONENT","if your input is a vector field use this to specify the component of the input vector field for which you wish to output"); keys.add("compulsory","UNITS","PLUMED","the units in which to print out the coordinates. PLUMED means internal PLUMED units"); keys.add("optional", "PRECISION","The number of digits in trajectory file"); keys.remove("FMT"); diff --git a/src/gridtools/InterpolateGrid.cpp b/src/gridtools/InterpolateGrid.cpp index b45c3dd9a..2077c54bf 100644 --- a/src/gridtools/InterpolateGrid.cpp +++ b/src/gridtools/InterpolateGrid.cpp @@ -32,7 +32,7 @@ function on to a finer grained grid. The interpolation within this algorithm is \par Examples -The input below can be used to postprocess a trajectory. It calculates a \ref HISTOGRAM as a function the +The input below can be used to post process a trajectory. It calculates a \ref HISTOGRAM as a function the distance between atoms 1 and 2 using kernel density estimation. During the calculation the values of the kernels are evaluated at 100 points on a uniform grid between 0.0 and 3.0. Prior to outputting this function at the end of the simulation this function is interpolated onto a finer grid of 200 points between 0.0 and 3.0. diff --git a/src/isdb/CS2Backbone.cpp b/src/isdb/CS2Backbone.cpp index d6527120e..670c50a0c 100644 --- a/src/isdb/CS2Backbone.cpp +++ b/src/isdb/CS2Backbone.cpp @@ -52,12 +52,12 @@ Calculates the backbone chemical shifts for a protein. The functional form is that of CamShift \cite Kohlhoff:2009us. The chemical shift of the selected nuclei can be saved as components. Alternatively one can calculate either -the CAMSHIFT score (usefull as a collective variable \cite Granata:2013dkor as a scoring +the CAMSHIFT score (useful as a collective variable \cite Granata:2013dkor as a scoring function \cite Robustelli:2010dn) or a \ref METAINFERENCE score (using DOSCORE). For these two latter cases experimental chemical shifts must be provided. CS2BACKBONE calculation can be relatively heavy because it often uses a large number of atoms, it can -be parallelised using MPI and \ref Openmp. +be run in parallel using MPI and \ref Openmp. As a general rule, when using \ref CS2BACKBONE or other experimental restraints it may be better to increase the accuracy of the constraint algorithm due to the increased strain on the bonded structure. @@ -68,7 +68,7 @@ ATOMS and a TEMPLATE pdb file for the same atoms should be provided as well in t The system is made automatically whole unless NOPBC is used, in particular if the system is made by multiple chains it is usually better to use NOPBC and make the molecule whole \ref WHOLEMOLECULES selecting an appropriate order of the atoms. The pdb file is needed to the generate a simple topology of the protein. -For histidines in protonation states different from D the HIE/HSE HIP/HSP name should be used. +For histidine residues in protonation states different from D the HIE/HSE HIP/HSP name should be used. GLH and ASH can be used for the alternative protonation of GLU and ASP. Non-standard amino acids and other molecules are not yet supported, but in principle they can be named UNK. If multiple chains are present the chain identifier must be in the standard PDB format, together with the TER keyword at the end of each chain. @@ -81,7 +81,7 @@ add only the files for the nuclei you need, but each file should include all pro A chemical shift for a nucleus is calculated if a value greater than 0 is provided. For practical purposes the value can correspond to the experimental value. Residues numbers should match that used in the pdb file, but must be positive, so double check the pdb. -The first and last residue of each chain should be preceeded by a # character. +The first and last residue of each chain should be preceded by a # character. \verbatim CAshifts.dat: @@ -96,9 +96,9 @@ CAshifts.dat: #last of second chain \endverbatim -The default behaviour is to store the values for the active nuclei in components (ca_#, cb_#, +The default behavior is to store the values for the active nuclei in components (ca_#, cb_#, co_#, ha_#, hn_#, nh_# and expca_#, expcb_#, expco_#, expha_#, exphn_#, exp_nh#) with NOEXP it is possible -to only store the backcalculated values, where # includes a chain and residue number. +to only store the back-calculated values, where # includes a chain and residue number. One additional file is always needed in the folder DATADIR: camshift.db. This file includes all the parameters needed to calculate the chemical shifts and can be found in regtest/isdb/rt-cs2backbone/data/ . @@ -500,10 +500,10 @@ void CS2Backbone::registerKeywords( Keywords& keys ) { keys.addFlag("SERIAL",false,"Perform the calculation in serial - for debug purpose"); keys.add("atoms","ATOMS","The atoms to be included in the calculation, e.g. the whole protein."); keys.add("compulsory","DATADIR","data/","The folder with the experimental chemical shifts."); - keys.add("compulsory","TEMPLATE","template.pdb","A PDB file of the protein system to initialise ALMOST."); - keys.add("compulsory","NEIGH_FREQ","20","Period in step for neighbour list update."); + keys.add("compulsory","TEMPLATE","template.pdb","A PDB file of the protein system to initialize ALMOST."); + keys.add("compulsory","NEIGH_FREQ","20","Period in step for neighbor list update."); keys.addFlag("CAMSHIFT",false,"Set to TRUE if you to calculate a single CamShift score."); - keys.addFlag("NOEXP",false,"Set to TRUE if you don't want to have fixed components with the experimetnal values."); + keys.addFlag("NOEXP",false,"Set to TRUE if you don't want to have fixed components with the experimental values."); keys.addOutputComponent("ha","default","the calculated Ha hydrogen chemical shifts"); keys.addOutputComponent("hn","default","the calculated H hydrogen chemical shifts"); keys.addOutputComponent("nh","default","the calculated N nitrogen chemical shifts"); diff --git a/src/isdb/Caliber.cpp b/src/isdb/Caliber.cpp index 527b341dc..7361d8237 100644 --- a/src/isdb/Caliber.cpp +++ b/src/isdb/Caliber.cpp @@ -33,7 +33,7 @@ namespace isdb { //+PLUMEDOC ISDB_BIAS CALIBER /* Add a time-dependent, harmonic restraint on one or more variables. -This allows implementing a maximum caliber restraint on one or more experimental time serie by replica-averaged restrained simulations. +This allows implementing a maximum caliber restraint on one or more experimental time series by replica-averaged restrained simulations. See \cite Capelli:2018jt . The time resolved experiments are read from a text file and intermediate values are obtained by splines. @@ -80,10 +80,10 @@ CALIBER ... ... CALIBER \endplumedfile -In particular the file expsaxs.dat contains the time traces for the 15 intensities at the selected scattering lengths, organised as time, q_1, etc. -The strenght of the bias is automatically evaluated from the standard error of the mean over AVERAGING steps and multiplied by KAPPA. This is usefull when working with multiple experimental data -Because \ref SAXS is usually defined irrespectively of a scaling factor the scaling is evaluated from a linear fit every REGRES_ZERO step. Alternatively it can be given as a fixed constant as SCALE. -The bias is here applied every 10th steps. +In particular the file expsaxs.dat contains the time traces for the 15 intensities at the selected scattering lengths, organized as time, q_1, etc. +The strength of the bias is automatically evaluated from the standard error of the mean over AVERAGING steps and multiplied by KAPPA. This is useful when working with multiple experimental data +Because \ref SAXS is usually defined in a manner that is irrespective of a scaling factor the scaling is evaluated from a linear fit every REGRES_ZERO step. Alternatively it can be given as a fixed constant as SCALE. +The bias is here applied every tenth step. */ //+ENDPLUMEDOC @@ -128,7 +128,7 @@ void Caliber::registerKeywords( Keywords& keys ) { keys.use("ARG"); keys.addFlag("NOENSEMBLE",false,"don't perform any replica-averaging"); keys.add("compulsory","FILE","the name of the file containing the time-resolved values"); - keys.add("compulsory","KAPPA","a force constant, this can be use to scale a constant estimanted on-the-fly using AVERAGING"); + keys.add("compulsory","KAPPA","a force constant, this can be use to scale a constant estimated on-the-fly using AVERAGING"); keys.add("optional","AVERAGING", "Stride for calculation of the optimum kappa, if 0 only KAPPA is used."); keys.add("compulsory","TSCALE","1.0","Apply a time scaling on the experimental time scale"); keys.add("compulsory","SCALE","1.0","Apply a constant scaling on the data provided as arguments"); diff --git a/src/isdb/EMMI.cpp b/src/isdb/EMMI.cpp index 1f73ff4bc..675737632 100644 --- a/src/isdb/EMMI.cpp +++ b/src/isdb/EMMI.cpp @@ -51,7 +51,7 @@ GMM_FILE. We are currently working on a web server to perform this operation. In the meantime, the user can request a stand-alone version of the GMM code at massimiliano.bonomi_AT_gmail.com. When run in single-replica mode, this action allows atomistic, flexible refinement of an individual structure into a density map. -Combined with a multi-replica framework (such as the -multi option in GROMACS), the user can model an esemble of structures using +Combined with a multi-replica framework (such as the -multi option in GROMACS), the user can model an ensemble of structures using the Metainference approach \cite Bonomi:2016ip . \warning diff --git a/src/isdb/Jcoupling.cpp b/src/isdb/Jcoupling.cpp index 1666abc13..8e42a80ac 100644 --- a/src/isdb/Jcoupling.cpp +++ b/src/isdb/Jcoupling.cpp @@ -60,7 +60,7 @@ of \ref METAINFERENCE . \par Examples In the following example we calculate the Ha-N J-coupling from a set of atoms involved in -dihedral \f$\psi\f$ angles in the peptide backbone. We also add the experimental datapoints and compute +dihedral \f$\psi\f$ angles in the peptide backbone. We also add the experimental data points and compute the correlation and other measures and finally print the results. \plumedfile diff --git a/src/isdb/Metainference.cpp b/src/isdb/Metainference.cpp index 73fe5aee4..e89192078 100644 --- a/src/isdb/Metainference.cpp +++ b/src/isdb/Metainference.cpp @@ -53,10 +53,10 @@ can be given either from fixed components of other actions using PARARG or as nu PARAMETERS. The default behavior is that of averaging the data over the available replicas, if this is not wanted the keyword NOENSEMBLE prevent this averaging. -Metadynamic Metainference \cite Bonomi:2016ge or more in general biased Metainference requires the knowledge of +Metadynamics Metainference \cite Bonomi:2016ge or more in general biased Metainference requires the knowledge of biasing potential in order to calculate the weighted average. In this case the value of the bias can be provided as the last argument in ARG and adding the keyword REWEIGHT. To avoid the noise -resulting from the instantaneus value of the bias the weight of each replica can be averaged +resulting from the instantaneous value of the bias the weight of each replica can be averaged over a give time using the keyword AVERAGING. The data can be averaged by using multiple replicas and weighted for a bias if present. @@ -66,7 +66,7 @@ the arguments as a single gaussian common to all the data points, a gaussian per point, a single long-tailed gaussian common to all the data points, a log-tailed gaussian per data point or using two distinct noises as for the most general formulation of Metainference. In this latter case the noise of the replica-averaging is gaussian (one per data point) and the noise for -the comparison with the experiemntal data can chosen using the keywork LIKELIHOOD +the comparison with the experimental data can chosen using the keyword LIKELIHOOD between gaussian or log-normal (one per data point), furthermore the evolution of the estimated average over an infinite number of replicas is driven by DFTILDE. @@ -74,7 +74,7 @@ As for Metainference theory there are two sigma values: SIGMA_MEAN represent the error of calculating an average quantity using a finite set of replica and should be set as small as possible following the guidelines for replica-averaged simulations in the framework of the Maximum Entropy Principle. Alternatively, this can be obtained -automatically using the internal sigma mean optimisation as introduced in \cite Lohr:2017gc +automatically using the internal sigma mean optimization as introduced in \cite Lohr:2017gc (OPTSIGMAMEAN=SEM), in this second case sigma_mean is estimated from the maximum standard error of the mean either over the simulation or over a defined time using the keyword AVERAGING. SIGMA_BIAS is an uncertainty parameter, sampled by a MC algorithm in the bounded interval @@ -298,11 +298,11 @@ void Metainference::registerKeywords(Keywords& keys) { keys.add("optional","DSIGMA","maximum MC move of the uncertainty parameter"); keys.add("compulsory","OPTSIGMAMEAN","NONE","Set to NONE/SEM to manually set sigma mean, or to estimate it on the fly"); keys.add("optional","SIGMA_MEAN0","starting value for the uncertainty in the mean estimate"); - keys.add("optional","TEMP","the system temperature - this is only needed if code doesnt' pass the temperature to plumed"); + keys.add("optional","TEMP","the system temperature - this is only needed if code doesn't pass the temperature to plumed"); keys.add("optional","MC_STEPS","number of MC steps"); keys.add("optional","MC_STRIDE","MC stride"); keys.add("optional","MC_CHUNKSIZE","MC chunksize"); - keys.add("optional","STATUS_FILE","write a file with all the data usefull for restart/continuation of Metainference"); + keys.add("optional","STATUS_FILE","write a file with all the data useful for restart/continuation of Metainference"); keys.add("compulsory","WRITE_STRIDE","10000","write the status to a file every N steps, this can be used for restart/continuation"); keys.add("optional","SELECTOR","name of selector"); keys.add("optional","NSELECT","range of values for selector [0, N-1]"); @@ -313,7 +313,7 @@ void Metainference::registerKeywords(Keywords& keys) { keys.addOutputComponent("acceptSigma", "default", "MC acceptance"); keys.addOutputComponent("acceptScale", "SCALEDATA", "MC acceptance"); keys.addOutputComponent("weight", "REWEIGHT", "weights of the weighted average"); - keys.addOutputComponent("biasDer", "REWEIGHT", "derivatives wrt the bias"); + keys.addOutputComponent("biasDer", "REWEIGHT", "derivatives with respect to the bias"); keys.addOutputComponent("scale", "default", "scale parameter"); keys.addOutputComponent("offset", "default", "offset parameter"); keys.addOutputComponent("ftilde", "GENERIC", "ensemble average estimator"); diff --git a/src/isdb/MetainferenceBase.cpp b/src/isdb/MetainferenceBase.cpp index eaaeeed6f..ef326eb08 100644 --- a/src/isdb/MetainferenceBase.cpp +++ b/src/isdb/MetainferenceBase.cpp @@ -67,11 +67,11 @@ void MetainferenceBase::registerKeywords( Keywords& keys ) { keys.add("optional","DSIGMA","maximum MC move of the uncertainty parameter"); keys.add("compulsory","OPTSIGMAMEAN","NONE","Set to NONE/SEM to manually set sigma mean, or to estimate it on the fly"); keys.add("optional","SIGMA_MEAN0","starting value for the uncertainty in the mean estimate"); - keys.add("optional","TEMP","the system temperature - this is only needed if code doesnt' pass the temperature to plumed"); + keys.add("optional","TEMP","the system temperature - this is only needed if code doesn't pass the temperature to plumed"); keys.add("optional","MC_STEPS","number of MC steps"); keys.add("optional","MC_STRIDE","MC stride"); keys.add("optional","MC_CHUNKSIZE","MC chunksize"); - keys.add("optional","STATUS_FILE","write a file with all the data usefull for restart/continuation of Metainference"); + keys.add("optional","STATUS_FILE","write a file with all the data useful for restart/continuation of Metainference"); keys.add("compulsory","WRITE_STRIDE","10000","write the status to a file every N steps, this can be used for restart/continuation"); keys.add("optional","SELECTOR","name of selector"); keys.add("optional","NSELECT","range of values for selector [0, N-1]"); @@ -82,7 +82,7 @@ void MetainferenceBase::registerKeywords( Keywords& keys ) { keys.addOutputComponent("acceptSigma", "default", "MC acceptance"); keys.addOutputComponent("acceptScale", "SCALEDATA", "MC acceptance"); keys.addOutputComponent("weight", "REWEIGHT", "weights of the weighted average"); - keys.addOutputComponent("biasDer", "REWEIGHT", "derivatives wrt the bias"); + keys.addOutputComponent("biasDer", "REWEIGHT", "derivatives with respect to the bias"); keys.addOutputComponent("scale", "SCALEDATA", "scale parameter"); keys.addOutputComponent("offset", "ADDOFFSET", "offset parameter"); keys.addOutputComponent("ftilde", "GENERIC", "ensemble average estimator"); diff --git a/src/isdb/NOE.cpp b/src/isdb/NOE.cpp index e86af90b4..d5d35b120 100644 --- a/src/isdb/NOE.cpp +++ b/src/isdb/NOE.cpp @@ -36,7 +36,7 @@ namespace isdb { //+PLUMEDOC ISDB_COLVAR NOE /* Calculates NOE intensities as sums of 1/r^6, also averaging over multiple equivalent atoms -or ambiguous NOE. + or ambiguous NOE. Each NOE is defined by two groups containing the same number of atoms, distances are calculated in pairs, transformed in 1/r^6, summed and saved as components. diff --git a/src/isdb/PRE.cpp b/src/isdb/PRE.cpp index 5c9e8e949..443015ff1 100644 --- a/src/isdb/PRE.cpp +++ b/src/isdb/PRE.cpp @@ -35,12 +35,12 @@ namespace isdb { //+PLUMEDOC ISDB_COLVAR PRE /* -Calculates the Paramegnetic Resonance Enhancement intensity ratio between a spinlabel atom and a list of atoms . +Calculates the Paramagnetic Resonance Enhancement intensity ratio between a spin label atom and a list of atoms . The reference atom for the spin label is added with SPINLABEL, the affected atom(s) are give as numbered GROUPA1, GROUPA2, ... The additional parameters needed for the calculation are given as INEPT, the inept -time, TAUC the correlation time, OMEGA, the larmor frequency and RTWO for the relaxation +time, TAUC the correlation time, OMEGA, the Larmor frequency and RTWO for the relaxation time. \ref METAINFERENCE can be activated using DOSCORE and the other relevant keywords. @@ -48,8 +48,8 @@ time. \par Examples In the following example five PRE intensities are calculated using the distance between the -oxigen of the spin label and the backbone hydrogens. Omega is the NMR frequency, RTWO the -R2 for the hydrogens, INEPT of 8 ms for the experiment and a TAUC of 1.21 ns +oxygen of the spin label and the backbone hydrogen atoms. Omega is the NMR frequency, RTWO the +R2 for the hydrogen atoms, INEPT of 8 ms for the experiment and a TAUC of 1.21 ns \plumedfile PRE ... @@ -107,7 +107,7 @@ void PRE::registerKeywords( Keywords& keys ) { keys.reset_style("GROUPA","atoms"); keys.add("numbered","RTWO","The relaxation of the atom/atoms in the corresponding GROUPA of atoms. " "Keywords like RTWO1, RTWO2, RTWO3,... should be listed."); - keys.addFlag("ADDEXP",false,"Set to TRUE if you want to have fixed components with the experimetnal values."); + keys.addFlag("ADDEXP",false,"Set to TRUE if you want to have fixed components with the experimental values."); keys.add("numbered","PREINT","Add an experimental value for each PRE."); keys.addOutputComponent("pre","default","the # PRE"); keys.addOutputComponent("exp","ADDEXP","the # PRE experimental intensity"); diff --git a/src/isdb/RDC.cpp b/src/isdb/RDC.cpp index 735a4a675..2d426edc3 100644 --- a/src/isdb/RDC.cpp +++ b/src/isdb/RDC.cpp @@ -79,7 +79,7 @@ This collective variable can then be use to break the rotational symmetry of a s Alternatively if the molecule is rigid it is possible to use the experimental data to calculate the alignment tensor and the use that to back calculate the RDCs, this is what is usually call the Single Value Decomposition approach. In this case the code rely on the a set of function from the GNU Scientific Library (GSL). (With SVD forces are not currently implemented). -Replica-Averaged simulations can be perfomed using RDCs, \ref ENSEMBLE, \ref STATS and \ref RESTRAINT . +Replica-Averaged simulations can be performed using RDCs, \ref ENSEMBLE, \ref STATS and \ref RESTRAINT . \ref METAINFERENCE can be activated using DOSCORE and the other relevant keywords. Additional material and examples can be also found in the tutorial \ref belfast-9 @@ -88,7 +88,7 @@ Additional material and examples can be also found in the tutorial \ref belfast- In the following example five N-H RDCs are defined and averaged over multiple replicas, their correlation is then calculated with respect to a set of experimental data and restrained. In addition, and only for analysis purposes, the same RDCs each single conformation are calculated -using a Single Value Decomposition algorithm, then averaged and again compared with the experimenta data. +using a Single Value Decomposition algorithm, then averaged and again compared with the experimental data. \plumedfile RDC ... @@ -132,23 +132,23 @@ PRINT ARG=st.corr,st_svd.corr,rdce.bias FILE=colvar //+PLUMEDOC ISDB_COLVAR PCS /* -Calculates the Pseudocontact shift of a nucleus determined by the presence of a metal ion susceptible to anisotropic magnetization. +Calculates the Pseudo-contact shift of a nucleus determined by the presence of a metal ion susceptible to anisotropic magnetization. The PCS of an atomic nucleus depends on the \f$\theta\f$ angle between the vector from the spin-label to the nucleus and the external magnetic field and the module of the vector itself \cite Camilloni:2015jf . While in principle the averaging -resulting from the tumbling should remove the pseudocontact shift, in presence of the NMR magnetic field the magnatically anisotropic molecule bound to system will break the rotational symmetry does resulting in measurable PCSs and RDCs. +resulting from the tumbling should remove the pseudo-contact shift, in presence of the NMR magnetic field the magnetically anisotropic molecule bound to system will break the rotational symmetry does resulting in measurable values for the PCS and RDC. -PCSs can also be calculated using a Single Value Decomposition approach, in this case the code rely on the +PCS values can also be calculated using a Single Value Decomposition approach, in this case the code rely on the a set of function from the GNU Scientific Library (GSL). (With SVD forces are not currently implemented). -Replica-Averaged simulations can be perfomed using PCSs, \ref ENSEMBLE, \ref STATS and \ref RESTRAINT . +Replica-Averaged simulations can be performed using PCS values, \ref ENSEMBLE, \ref STATS and \ref RESTRAINT . Metainference simulations can be performed with this CV and \ref METAINFERENCE . \par Examples -In the following example five PCSs are defined and their correlation with +In the following example five PCS values are defined and their correlation with respect to a set of experimental data is calculated and restrained. In addition, -and only for analysis purposes, the same PCSs are calculated using a Single Value +and only for analysis purposes, the same PCS values are calculated using a Single Value Decomposition algorithm. \plumedfile @@ -206,9 +206,9 @@ void RDC::registerKeywords( Keywords& keys ) { keys.reset_style("ATOMS","atoms"); keys.add("compulsory","GYROM","1.","Add the product of the gyromagnetic constants for the bond. "); keys.add("compulsory","SCALE","1.","Add the scaling factor to take into account concentration and other effects. "); - keys.addFlag("SVD",false,"Set to TRUE if you want to backcalculate using Single Value Decomposition (need GSL at compilation time)."); - keys.addFlag("ADDCOUPLINGS",false,"Set to TRUE if you want to have fixed components with the experimetnal values."); - keys.add("numbered","COUPLING","Add an experimental value for each coupling (needed by SVD and usefull for \ref STATS)."); + keys.addFlag("SVD",false,"Set to TRUE if you want to back calculate using Single Value Decomposition (need GSL at compilation time)."); + keys.addFlag("ADDCOUPLINGS",false,"Set to TRUE if you want to have fixed components with the experimental values."); + keys.add("numbered","COUPLING","Add an experimental value for each coupling (needed by SVD and useful for \\ref STATS)."); keys.addOutputComponent("rdc","default","the calculated # RDC"); keys.addOutputComponent("exp","SVD/ADDCOUPLINGS","the experimental # RDC"); } diff --git a/src/isdb/Rescale.cpp b/src/isdb/Rescale.cpp index 3cb72995c..4bd8b95bf 100644 --- a/src/isdb/Rescale.cpp +++ b/src/isdb/Rescale.cpp @@ -39,7 +39,7 @@ namespace isdb { //+PLUMEDOC ISDB_BIAS RESCALE /* -Rescales the value of an another action, being a Collective Variable or a Bias. +Scales the value of an another action, being a Collective Variable or a Bias. The rescaling factor is determined by a parameter defined on a logarithmic grid of dimension NBIN in the range from 1 to MAX_RESCALE. The current value of the rescaling parameter is stored and shared across @@ -50,9 +50,9 @@ The well-tempered metadynamics bias potential is written to the file BFILE every when restarting the simulation using the directive \ref RESTART. \note -Additional arguments not to be rescaled, one for each bin in the rescaling parameter ladder, can be +Additional arguments not to be scaled, one for each bin in the rescaling parameter ladder, can be provided at the end of the ARG list. The number of such arguments is specified by the option NOT_RESCALED. -These arguments will be not be rescaled, but they will be +These arguments will be not be scaled, but they will be considered as bias potentials and used in the computation of the Metropolis acceptance probability when proposing a move in the rescaling parameter. See example below. @@ -64,7 +64,7 @@ the arguments will be summed across replicas, unless the NOT_SHARED option is us \par Examples In this example we use \ref RESCALE to implement a simulated-tempering like approach. -The total potential energy of the system is rescaled by a parameter defined on a logarithmic grid +The total potential energy of the system is scaled by a parameter defined on a logarithmic grid of 5 bins in the range from 1 to 1.5. A well-tempered metadynamics bias potential is used to ensure diffusion in the space of the rescaling parameter. @@ -87,7 +87,7 @@ In this second example, we add to the simulated-tempering approach introduced ab one Parallel Bias metadynamics simulation (see \ref PBMETAD) for each value of the rescaling parameter. At each moment of the simulation, only one of the \ref PBMETAD actions is activated, based on the current value of the associated \ref SELECTOR. -The \ref PBMETAD bias potentials are not rescaled, but just used in the calculation of +The \ref PBMETAD bias potentials are not scaled, but just used in the calculation of the Metropolis acceptance probability when proposing a move in the rescaling parameter. \plumedfile @@ -176,7 +176,7 @@ void Rescale::registerKeywords(Keywords& keys) { keys.add("compulsory","BSTRIDE", "stride for writing bias"); keys.add("compulsory","BFILE", "file name for bias"); keys.add("optional","NOT_SHARED", "list of arguments (from 1 to N) not summed across replicas"); - keys.add("optional","NOT_RESCALED", "these last N arguments will not be rescaled"); + keys.add("optional","NOT_RESCALED", "these last N arguments will not be scaled"); keys.add("optional","MC_STEPS","number of MC steps"); keys.add("optional","MC_STRIDE","MC stride"); keys.add("optional","PACE", "Pace for adding bias, in MC stride unit"); @@ -215,14 +215,14 @@ Rescale::Rescale(const ActionOptions&ao): // number of bias parse("NOT_RESCALED", nores_); - if(nores_>0 && nores_!=nbin) error("The number of non rescaled arguments must be equal to either 0 or the number of bins"); + if(nores_>0 && nores_!=nbin) error("The number of non scaled arguments must be equal to either 0 or the number of bins"); // maximum value of rescale vector<double> max_rescale; parseVector("MAX_RESCALE", max_rescale); // check dimension of max_rescale if(max_rescale.size()!=(getNumberOfArguments()-nores_)) - error("Size of MAX_RESCALE array must be equal to the number of arguments that will to be rescaled"); + error("Size of MAX_RESCALE array must be equal to the number of arguments that will to be scaled"); // calculate exponents double igamma_max = static_cast<double>(nbin); @@ -277,7 +277,7 @@ Rescale::Rescale(const ActionOptions&ao): log.printf(" temperature of the system in energy unit %f\n",kbt_); log.printf(" name of the SELECTOR use for this action %s\n",selector_.c_str()); log.printf(" number of bins in grid %u\n",nbin); - log.printf(" number of arguments that will not be rescaled %u\n",nores_); + log.printf(" number of arguments that will not be scaled %u\n",nores_); if(nrep_>1) log.printf(" number of arguments that will not be summed across replicas %u\n",not_shared.size()); log.printf(" biasfactor %f\n",biasf_); log.printf(" initial hills height %f\n",w0_); diff --git a/src/isdb/SAXS.cpp b/src/isdb/SAXS.cpp index 5de552c90..4712736b1 100644 --- a/src/isdb/SAXS.cpp +++ b/src/isdb/SAXS.cpp @@ -59,19 +59,19 @@ namespace isdb { /* Calculates SAXS scattered intensity using either the Debye equation or the harmonic sphere approximation. -Intensities are calculated for a set of scattering lenght set using QVALUES numbered keywords, QVALUE cannot be 0. +Intensities are calculated for a set of scattering length set using QVALUE keywords that are numbered starting from 0. Structure factors can be either assigned using a polynomial expansion to any order using the PARAMETERS keywords; automatically assigned to atoms using the ATOMISTIC flag reading a PDB file, a correction for the water density is automatically added, with water density that by default is 0.334 but that can be set otherwise using WATERDENS; -automatically assigned to Martini pseudoatoms using the MARTINI flag. +automatically assigned to Martini pseudo atoms using the MARTINI flag. The calculated intensities can be scaled using the SCALEINT keywords. This is applied by rescaling the structure factors. Experimental reference intensities can be added using the ADDEXP and EXPINT flag and keywords. By default SAXS is calculated using Debye on CPU, by adding the GPU flag it is possible to solve the equation on a GPU -if the arrayfire libraries are installed and correctly linked (). Alternatively we an implementation based on Bessel functions, -BESSEL flag. This is very fast for small q values because a short expasion is enough. -An automatic choice is made for which q bessel are used and for which the calculation is done by Debye. If one wants to force +if the ARRAYFIRE libraries are installed and correctly linked (). Alternatively we an implementation based on Bessel functions, +BESSEL flag. This is very fast for small q values because a short expansion is enough. +An automatic choice is made for which q Bessel are used and for which the calculation is done by Debye. If one wants to force all q values to be calculated using Bessel function this can be done using FORCE_BESSEL. -Irrespectively of the method employed, \ref METAINFERENCE can be activated using DOSCORE and the other relevant keywords. +Irrespective of the method employed, \ref METAINFERENCE can be activated using DOSCORE and the other relevant keywords. \par Examples in the following example the saxs intensities for a martini model are calculated. structure factors @@ -157,18 +157,18 @@ void SAXS::registerKeywords(Keywords& keys) { keys.addFlag("NOPBC",false,"ignore the periodic boundary conditions when calculating distances"); keys.addFlag("SERIAL",false,"Perform the calculation in serial - for debug purpose"); keys.addFlag("BESSEL",false,"Perform the calculation using the adaptive spherical harmonic approximation"); - keys.addFlag("FORCE_BESSEL",false,"Perform the calculation using the adaptive spherical harmonic approximation, without adaptive algorithm, usefull for debug only"); + keys.addFlag("FORCE_BESSEL",false,"Perform the calculation using the adaptive spherical harmonic approximation, without adaptive algorithm, useful for debug only"); keys.add("compulsory","DEVICEID","0","Identifier of the GPU to be used"); - keys.addFlag("GPU",false,"calculate SAXS using ARRAYFIRE on an accellerator device"); + keys.addFlag("GPU",false,"calculate SAXS using ARRAYFIRE on an accelerator device"); keys.addFlag("ATOMISTIC",false,"calculate SAXS for an atomistic model"); keys.addFlag("MARTINI",false,"calculate SAXS for a Martini model"); keys.add("atoms","ATOMS","The atoms to be included in the calculation, e.g. the whole protein."); - keys.add("numbered","QVALUE","Selected scattering lenghts in Angstrom are given as QVALUE1, QVALUE2, ... ."); - keys.add("numbered","PARAMETERS","Used parameter Keywords like PARAMETERS1, PARAMETERS2. These are used to calculate the structure factor for the i-th atom/bead."); + keys.add("numbered","QVALUE","Selected scattering lengths in Angstrom are given as QVALUE1, QVALUE2, ... ."); + keys.add("numbered","PARAMETERS","Used parameter Keywords like PARAMETERS1, PARAMETERS2. These are used to calculate the structure factor for the \\f$i\\f$th atom/bead."); keys.add("compulsory","WATERDENS","0.334","Density of the water to be used for the correction of atomistic structure factors."); keys.addFlag("ADDEXP",false,"Set to TRUE if you want to have fixed components with the experimental values."); keys.add("numbered","EXPINT","Add an experimental value for each q value."); - keys.add("compulsory","SCALEINT","1.0","SCALING value of the calculated data. Usefull to simplify the comparison."); + keys.add("compulsory","SCALEINT","1.0","SCALING value of the calculated data. Useful to simplify the comparison."); keys.addOutputComponent("q","default","the # SAXS of q"); keys.addOutputComponent("exp","ADDEXP","the # experimental intensity"); } diff --git a/src/isdb/Select.cpp b/src/isdb/Select.cpp index 7decf3112..b05a0c3ba 100644 --- a/src/isdb/Select.cpp +++ b/src/isdb/Select.cpp @@ -39,7 +39,7 @@ Selects an argument based on the value of a \ref SELECTOR. \par Examples In this example we use a simulated-tempering like approach activated by the \ref RESCALE action. -For each value of the rescale parameter, we perform an independent Parallel Bias Metadynamics +For each value of the scale parameter, we perform an independent Parallel Bias Metadynamics simulation (see \ref PBMETAD). At each moment of the simulation, only one of the \ref PBMETAD actions is activated, based on the current value of the associated \ref SELECTOR. The \ref SELECT action can then be used to print out the value of the (active) \ref PBMETAD bias potential. diff --git a/src/isdb/Selector.cpp b/src/isdb/Selector.cpp index 5b0287540..38d13aeb5 100644 --- a/src/isdb/Selector.cpp +++ b/src/isdb/Selector.cpp @@ -38,9 +38,9 @@ A \ref SELECTOR can be used for example to activate or modify a bias based on it \par Examples A typical example is the simulated-tempering like approach activated by \ref RESCALE. -In this example the total potential energy of the system is rescaled +In this example the total potential energy of the system is scaled by a parameter defined on a grid of dimension NBIN in the range from 1 to MAX_RESCALE. -The value of the rescale parameter is determined by the current value of the \ref SELECTOR GAMMA. +The value of the scaling parameter is determined by the current value of the \ref SELECTOR GAMMA. The value of the \ref SELECTOR is updated by a MC protocol inside the \ref RESCALE class. A well-tempered metadynamics potential is used to enhance sampling in the \ref SELECTOR space. diff --git a/src/manyrestraints/UWalls.cpp b/src/manyrestraints/UWalls.cpp index 5776d94a9..9c9a2e05b 100644 --- a/src/manyrestraints/UWalls.cpp +++ b/src/manyrestraints/UWalls.cpp @@ -42,10 +42,10 @@ keyword and places a restraint on each quantity, \f$x\f$, with the following fun \par Examples The following set of commands can be used to stop a cluster composed of 20 atoms subliming. The position of -the centre of mass of the cluster is calculated by the \ref COM command labelled c1. The \ref DISTANCES +the center of mass of the cluster is calculated by the \ref COM command labelled c1. The \ref DISTANCES command labelled d1 is then used to calculate the distance between each of the 20 atoms in the cluster and the center of mass of the cluster. These distances are then passed to the UWALLS command, which adds -a \ref UPPER_WALLS restraint on each of them and thereby prevents each of them from moving very far from the centre +a \ref UPPER_WALLS restraint on each of them and thereby prevents each of them from moving very far from the center of mass of the cluster. \plumedfile diff --git a/src/mapping/AdaptivePath.cpp b/src/mapping/AdaptivePath.cpp index e79fdec4f..2b9d6ac58 100644 --- a/src/mapping/AdaptivePath.cpp +++ b/src/mapping/AdaptivePath.cpp @@ -54,7 +54,7 @@ To learn more about how the path is adapted we strongly recommend reading this p \par Examples -The input below provides an example of how the adaptive path works in practise. The path is updated every 50 steps of +The input below provides an example that shows how the adaptive path works. The path is updated every 50 steps of MD based on the data accumulated during the preceding 50 time steps. \plumedfile diff --git a/src/mapping/PCAVars.cpp b/src/mapping/PCAVars.cpp index d9dccf083..b47824648 100644 --- a/src/mapping/PCAVars.cpp +++ b/src/mapping/PCAVars.cpp @@ -33,7 +33,7 @@ Projection on principal component eigenvectors or other high dimensional linear subspace The collective variables described in \ref dists allow one to calculate the distance between the -instaneous structure adopted by the system and some high-dimensional, reference configuration. The +instantaneous structure adopted by the system and some high-dimensional, reference configuration. The problem with doing this is that, as one gets further and further from the reference configuration, the distance from it becomes a progressively poorer and poorer collective variable. This happens because the ``number" of structures at a distance \f$d\f$ from a reference configuration is proportional to \f$d^N\f$ in @@ -64,7 +64,7 @@ of the metrics detailed on \ref dists to calculate the displacement, \f$\mathbf{ The matrix \f$A\f$ can be found by various means including principal component analysis and normal mode analysis. In both these methods the rows of \f$A\f$ would be the principle eigenvectors of a square matrix. For PCA the covariance while for normal modes the Hessian. -\bug It is not possible to use the \ref DRMSD metric with this variable. You can get around this by listing the set of distances you wish to calculate for your DRMSD in the plumed file explicitally and using the EUCLIDEAN metric. MAHALONOBIS and NORM-EUCLIDEAN also do not work with this variable but using these options makes little sense when projecting on a linear subspace. +\bug It is not possible to use the \ref DRMSD metric with this variable. You can get around this by listing the set of distances you wish to calculate for your DRMSD in the plumed file explicitly and using the EUCLIDEAN metric. MAHALONOBIS and NORM-EUCLIDEAN also do not work with this variable but using these options makes little sense when projecting on a linear subspace. \par Examples @@ -81,7 +81,7 @@ PRINT ARG=pca2.* FILE=colvar2 \endplumedfile The reference configurations can be specified using a pdb file. The first configuration that you provide is the reference configuration, -which is refered to in the above as \f$X^{ref}\f$ subsequent configurations give the directions of row vectors that are contained in +which is referred to in the above as \f$X^{ref}\f$ subsequent configurations give the directions of row vectors that are contained in the matrix \f$A\f$ above. These directions can be specified by specifying a second configuration - in this case a vector will be constructed by calculating the displacement of this second configuration from the reference configuration. A pdb input prepared in this way would look as follows: @@ -105,7 +105,7 @@ ATOM 21 HH32 NME 3 18.572 -13.148 -16.346 1.00 1.00 END \endverbatim -Alternatively, the second configuration can specify the components of \f$A\f$ explicitally. In this case you need to include the +Alternatively, the second configuration can specify the components of \f$A\f$ explicitly. In this case you need to include the keyword TYPE=DIRECTION in the remarks to the pdb as shown below. \verbatim diff --git a/src/mapping/Path.cpp b/src/mapping/Path.cpp index d502800af..88e6dbc52 100644 --- a/src/mapping/Path.cpp +++ b/src/mapping/Path.cpp @@ -43,8 +43,8 @@ z = -\frac{1}{\lambda} \ln\left[ \sum_{i=1}^N \exp( -\lambda R[X - X_i] ) \right In these expressions \f$N\f$ high-dimensional frames (\f$X_i\f$) are used to describe the path in the high-dimensional space. The two expressions above are then functions of the distances from each of the high-dimensional frames \f$R[X - X_i]\f$. Within PLUMED there are multiple ways to define the distance from a high-dimensional configuration. You could calculate -the RMSD distance or you could calculate the ammount by which a set of collective variables change. As such this implementation -of the path cv allows one to use all the difference distance metrics that are discussed in \ref dists. This is as opposed to +the RMSD distance or you could calculate the amount by which a set of collective variables change. As such this implementation +of the path CV allows one to use all the difference distance metrics that are discussed in \ref dists. This is as opposed to the alternative implementation of path (\ref PATHMSD) which is a bit faster but which only allows one to use the RMSD distance. The \f$s\f$ and \f$z\f$ variables are calculated using the above formulas by default. However, there is an alternative method @@ -80,7 +80,7 @@ PRINT ARG=p1.sss,p1.zzz STRIDE=1 FILE=colvar FMT=%8.4f In the example below the path is defined using the values of two torsional angles (t1 and t2). In addition, the \f$s\f$ and \f$z\f$ are calculated using the geometric expressions described -above rather than the alegebraic expressions that are used by default. +above rather than the algebraic expressions that are used by default. \plumedfile t1: TORSION ATOMS=5,7,9,15 diff --git a/src/mapping/PathTools.cpp b/src/mapping/PathTools.cpp index ec92f48ab..6ad236bb9 100644 --- a/src/mapping/PathTools.cpp +++ b/src/mapping/PathTools.cpp @@ -42,7 +42,7 @@ namespace mapping { pathtools can be used to construct paths from pdb data The path CVs in PLUMED are curvilinear coordinates through a high dimensional vector space. -Enhanced sampling calculations are ofen run using the progress along the paths and the distance from the path as CVs +Enhanced sampling calculations are often run using the progress along the paths and the distance from the path as CVs as this provides a convenient way of defining a reaction coordinate for a complicated process. This method is explained in the documentation for \ref PATH. @@ -63,12 +63,12 @@ PLUMED. The way you do this with each of these tools described above is explaine \par Examples -The example below shows how you can take a set of unequally spaced frames from a pdb file named inpath.pdb +The example below shows how you can take a set of unequally spaced frames from a pdb file named in_path.pdb and use pathtools to make them equally spaced so that they can be used as the basis for a path CV. The file -containing this final path is named outpath.pdb. +containing this final path is named final_path.pdb. \verbatim -plumed pathtools --path inpath.pdb --metric EUCLIDEAN --out outpath.pdb +plumed pathtools --path in_path.pdb --metric EUCLIDEAN --out final_path.pdb \endverbatim The example below shows how can create an initial linear path connecting the two pdb frames in start.pdb and @@ -80,7 +80,7 @@ start.pdb to end.pdb. plumed pathtools --start start.pdb --end end.pdb --nframes 4 --metric OPTIMAL --out path.pdb \endverbatim -Often the idea with path cvs is to create a path connecting some initial state A to some final state B. You would +Often the idea with path collective variables is to create a path connecting some initial state A to some final state B. You would in this case have representative configurations from your A and B states defined in the input files to pathtools that we have called start.pdb and end.pdb in the example above. Furthermore, it may be useful to have a few frames before your start frame and after your end frame. You can use path tools to create these extended paths as shown below. @@ -92,7 +92,7 @@ frame just after end.pdb. All these frames would be equally spaced. plumed pathtools --start start.pdb --end end.pdb --nframes 4 --metric OPTIMAL --out path.pdb --nframes-before-start 2 --nframes-after-end 2 \endverbatim -Notice also that when you reparameterise paths you must choose two frames to fix. Generally you chose to fix the states +Notice also that when you re-parameterize paths you must choose two frames to fix. Generally you chose to fix the states that are representative of your states A and B. By default pathtools will fix the first and last frames. You can, however, change the states to fix by taking advantage of the fixed flag as shown below. @@ -126,7 +126,7 @@ void PathTools::registerKeywords( Keywords& keys ) { keys.add("compulsory","--metric","the measure to use to calculate the distance between frames"); keys.add("compulsory","--out","the name of the file on which to output your path"); keys.add("compulsory","--arg-fmt","%f","the format to use for argument values in your frames"); - keys.add("compulsory","--tolerance","1E-4","the tolerance to use for the path reparameterization algorithm"); + keys.add("compulsory","--tolerance","1E-4","the tolerance to use for the algorithm that is used to re-parameterize the path"); keys.add("compulsory","--nframes-before-start","1","the number of frames to include in the path before the first frame"); keys.add("compulsory","--nframes","1","the number of frames between the start and end frames in your path"); keys.add("compulsory","--nframes-after-end","1","the number of frames to put after the last frame of your path"); diff --git a/src/mapping/PropertyMap.cpp b/src/mapping/PropertyMap.cpp index 0f95c6e1b..dcfd56643 100644 --- a/src/mapping/PropertyMap.cpp +++ b/src/mapping/PropertyMap.cpp @@ -36,8 +36,8 @@ X=\frac{\sum_i X_i*\exp(-\lambda D_i(x))}{\sum_i \exp(-\lambda D_i(x))} \f] Within PLUMED there are multiple ways to define the distance from a high-dimensional configuration, \f$D_i\f$. You could calculate -the RMSD distance or you could calculate the ammount by which a set of collective variables change. As such this implementation -of the propertymap allows one to use all the different distance metric that are discussed in \ref dists. This is as opposed to +the RMSD distance or you could calculate the amount by which a set of collective variables change. As such this implementation +of the property map allows one to use all the different distance metric that are discussed in \ref dists. This is as opposed to the alternative implementation \ref PROPERTYMAP which is a bit faster but which only allows one to use the RMSD distance. \par Examples diff --git a/src/multicolvar/AlphaBeta.cpp b/src/multicolvar/AlphaBeta.cpp index 72af69623..799439f4f 100644 --- a/src/multicolvar/AlphaBeta.cpp +++ b/src/multicolvar/AlphaBeta.cpp @@ -71,7 +71,7 @@ LABEL=ab PRINT ARG=ab FILE=colvar STRIDE=10 \endplumedfile -Writing out the atoms involved in all the torsions in this way can be rather tedious. Thankfully if you are working with protein you +Writing out the atoms involved in all the torsion angles in this way can be rather tedious. Thankfully if you are working with protein you can avoid this by using the \ref MOLINFO command. PLUMED uses the pdb file that you provide to this command to learn about the topology of the protein molecule. This means that you can specify torsion angles using the following syntax: @@ -87,7 +87,7 @@ PRINT ARG=ab FILE=colvar STRIDE=10 \endplumedfile Here, \@phi-3 tells plumed that you would like to calculate the \f$\phi\f$ angle in the third residue of the protein. -Similarly \@psi-4 tells plumed that you want to calculate the \f$\psi\f$ angle of the 4th residue of the protein. +Similarly \@psi-4 tells plumed that you want to calculate the \f$\psi\f$ angle of the fourth residue of the protein. */ @@ -115,9 +115,9 @@ void AlphaBeta::registerKeywords( Keywords& keys ) { "action will depend on what functions of the distribution you choose to calculate."); keys.reset_style("ATOMS","atoms"); keys.add("numbered","REFERENCE","the reference values for each of the torsional angles. If you use a single REFERENCE value the " - "same reference value is used for all torsions"); + "same reference value is used for all torsional angles"); keys.add("numbered","COEFFICIENT","the coefficient for each of the torsional angles. If you use a single COEFFICIENT value the " - "same reference value is used for all torsions"); + "same reference value is used for all torsional angles"); keys.reset_style("REFERENCE","compulsory"); keys.reset_style("COEFFICIENT","optional"); } diff --git a/src/multicolvar/Angles.cpp b/src/multicolvar/Angles.cpp index 7aedd6cda..fe7bcb243 100644 --- a/src/multicolvar/Angles.cpp +++ b/src/multicolvar/Angles.cpp @@ -74,7 +74,7 @@ PRINT ARG=a1.between FILE=colvar \endplumedfile This final example instructs plumed to calculate all the angles in the first coordination -spheres of the atoms. A discretized-normalized histogram of the distribution is then output +spheres of the atoms. The bins for a normalized histogram of the distribution is then output \plumedfile ANGLES GROUP=1-38 HISTOGRAM={GAUSSIAN LOWER=0.0 UPPER=pi NBINS=20} SWITCH={GAUSSIAN R_0=1.0} LABEL=a1 @@ -115,10 +115,10 @@ void Angles::registerKeywords( Keywords& keys ) { keys.reset_style("ATOMS","atoms"); keys.add("atoms-1","GROUP","Calculate angles for each distinct set of three atoms in the group"); keys.add("atoms-2","GROUPA","A group of central atoms about which angles should be calculated"); - keys.add("atoms-2","GROUPB","When used in conjuction with GROUPA this keyword instructs plumed " + keys.add("atoms-2","GROUPB","When used in conjunction with GROUPA this keyword instructs plumed " "to calculate all distinct angles involving one atom from GROUPA " "and two atoms from GROUPB. The atom from GROUPA is the central atom."); - keys.add("atoms-3","GROUPC","This must be used in conjuction with GROUPA and GROUPB. All angles " + keys.add("atoms-3","GROUPC","This must be used in conjunction with GROUPA and GROUPB. All angles " "involving one atom from GROUPA, one atom from GROUPB and one atom from " "GROUPC are calculated. The GROUPA atoms are assumed to be the central " "atoms"); diff --git a/src/multicolvar/Bridge.cpp b/src/multicolvar/Bridge.cpp index f3fdae077..8e2a14fde 100644 --- a/src/multicolvar/Bridge.cpp +++ b/src/multicolvar/Bridge.cpp @@ -48,7 +48,7 @@ where the sum over \f$i\f$ is over all the ``bridging atoms" and \par Examples The following example instructs plumed to calculate the number of water molecules -that are bridging betweeen atoms 1-10 and atoms 11-20 and to print the value +that are bridging between atoms 1-10 and atoms 11-20 and to print the value to a file \plumedfile diff --git a/src/multicolvar/CenterOfMultiColvar.cpp b/src/multicolvar/CenterOfMultiColvar.cpp index 1cd66672b..90fac3bf1 100644 --- a/src/multicolvar/CenterOfMultiColvar.cpp +++ b/src/multicolvar/CenterOfMultiColvar.cpp @@ -49,9 +49,9 @@ the positions (in scaled coordinates) associated with each of the multicolvars c \par Examples Lets suppose that you are examining the formation of liquid droplets from gas. You may want to -determine the center of mass of any of the droplets formed. In doing this calculation you recognise that +determine the center of mass of any of the droplets formed. In doing this calculation you recognize that the atoms in the liquid droplets will have a higher coordination number than those in the surrounding gas. -As you want to calculate the position of the droplets you thus recognise that these atoms with high coordination +As you want to calculate the position of the droplets you thus recognize that these atoms with high coordination numbers should have a high weight in the weighted average you are using to calculate the position of the droplet. You can thus calculate the position of the droplet using an input like the one shown below: @@ -60,7 +60,7 @@ c1: COORDINATIONNUMBER SPECIES=1-512 SWITCH={EXP D_0=4.0 R_0=0.5} cc: CENTER_OF_MULTICOLVAR DATA=c1 \endplumedfile -The first line here calclates the coordination numbers of all the atoms in the system. The virtual atom then uses the values +The first line here calculates the coordination numbers of all the atoms in the system. The virtual atom then uses the values of the coordination numbers calculated by the action labelled c1 when it calculates the Berry Phase average described above. (N.B. the \f$w_i\f$ in the above expression are all set equal to 1 in this case) @@ -73,7 +73,7 @@ cc: CENTER_OF_MULTICOLVAR DATA=cf \endplumedfile This input once again calculates the coordination numbers of all the atoms in the system. The middle line then transforms these -coordinations numbers to numbers between 0 and 1. Essentially any atom with a coordination number larger than 2.0 is given a weight +coordination numbers to numbers between 0 and 1. Essentially any atom with a coordination number larger than 2.0 is given a weight of one and below this value the transformed value decays to zero. It is these transformed coordination numbers that are used to calculate the Berry phase average described in the previous section. diff --git a/src/multicolvar/CoordinationNumbers.cpp b/src/multicolvar/CoordinationNumbers.cpp index 0d4d3649f..e33c0109c 100644 --- a/src/multicolvar/CoordinationNumbers.cpp +++ b/src/multicolvar/CoordinationNumbers.cpp @@ -36,7 +36,7 @@ namespace multicolvar { //+PLUMEDOC MCOLVAR COORDINATIONNUMBER /* Calculate the coordination numbers of atoms so that you can then calculate functions of the distribution of -coordination numbers such as the minimum, the number less than a certain quantity and so on. + coordination numbers such as the minimum, the number less than a certain quantity and so on. To make the calculation of coordination numbers differentiable the following function is used: @@ -105,7 +105,7 @@ void CoordinationNumbers::registerKeywords( Keywords& keys ) { keys.add("compulsory","R_0","The r_0 parameter of the switching function"); keys.add("optional","R_POWER","Multiply the coordination number function by a power of r, " "as done in White and Voth (see note above, default: no)"); - keys.add("optional","SWITCH","This keyword is used if you want to employ an alternative to the continuous swiching function defined above. " + keys.add("optional","SWITCH","This keyword is used if you want to employ an alternative to the continuous switching function defined above. " "The following provides information on the \\ref switchingfunction that are available. " "When this keyword is present you no longer need the NN, MM, D_0 and R_0 keywords."); // Use actionWithDistributionKeywords diff --git a/src/multicolvar/Density.cpp b/src/multicolvar/Density.cpp index 5a8a14d75..3f32052a9 100644 --- a/src/multicolvar/Density.cpp +++ b/src/multicolvar/Density.cpp @@ -34,7 +34,7 @@ namespace multicolvar { //+PLUMEDOC MCOLVAR DENSITY /* Calculate functions of the density of atoms as a function of the box. This allows one to calculate -the number of atoms in half the box. + the number of atoms in half the box. \par Examples diff --git a/src/multicolvar/DihedralCorrelation.cpp b/src/multicolvar/DihedralCorrelation.cpp index ba9094f3d..fc743023e 100644 --- a/src/multicolvar/DihedralCorrelation.cpp +++ b/src/multicolvar/DihedralCorrelation.cpp @@ -46,7 +46,7 @@ where the \f$\phi_i\f$ and \f$\psi\f$ values and the instantaneous values for th \par Examples -The following provides an example input for the dihcor action +The following provides an example input for the DIHCOR action \plumedfile DIHCOR ... @@ -57,11 +57,11 @@ DIHCOR ... PRINT ARG=dih FILE=colvar STRIDE=10 \endplumedfile -In the above input we are calculating the correation between the torsion angle involving atoms 1, 2, 3 and 4 and the torsion angle -involving atoms 5, 6, 7 and 8. This is then added to the correlation betwene the torsion angle involving atoms 5, 6, 7 and 8 and the +In the above input we are calculating the correlation between the torsion angle involving atoms 1, 2, 3 and 4 and the torsion angle +involving atoms 5, 6, 7 and 8. This is then added to the correlation between the torsion angle involving atoms 5, 6, 7 and 8 and the correlation angle involving atoms 9, 10, 11 and 12. -Writing out the atoms involved in all the torsions in this way can be rather tedious. Thankfully if you are working with protein you +Writing out the atoms involved in all the torsion angles in this way can be rather tedious. Thankfully if you are working with protein you can avoid this by using the \ref MOLINFO command. PLUMED uses the pdb file that you provide to this command to learn about the topology of the protein molecule. This means that you can specify torsion angles using the following syntax: @@ -76,7 +76,7 @@ PRINT ARG=dih FILE=colvar STRIDE=10 \endplumedfile Here, \@phi-3 tells plumed that you would like to calculate the \f$\phi\f$ angle in the third residue of the protein. -Similarly \@psi-4 tells plumed that you want to calculate the \f$\psi\f$ angle of the 4th residue of the protein. +Similarly \@psi-4 tells plumed that you want to calculate the \f$\psi\f$ angle of the fourth residue of the protein. */ //+ENDPLUMEDOC diff --git a/src/multicolvar/DistanceFromContour.cpp b/src/multicolvar/DistanceFromContour.cpp index 020f22126..4305361f3 100644 --- a/src/multicolvar/DistanceFromContour.cpp +++ b/src/multicolvar/DistanceFromContour.cpp @@ -49,13 +49,13 @@ In other words, it is a set of points, \f$(x',y',z')\f$, in your box which have: p(x',y',z') = \rho \f] -where \f$\rho\f$ is some target density. This action caculates the distance projected on the \f$x, y\f$ or +where \f$\rho\f$ is some target density. This action calculates the distance projected on the \f$x, y\f$ or \f$z\f$ axis between the position of some test particle and this surface of constant field density. \par Examples -In this example atoms 2-100 are assumed to be concentraed along some part of the \f$z\f$ axis so that you -an interface between a liquid/solid and the vapour. The quantity dc measures the distance between the +In this example atoms 2-100 are assumed to be concentrated along some part of the \f$z\f$ axis so that you +an interface between a liquid/solid and the vapor. The quantity dc measures the distance between the surface at which the density of 2-100 atoms is equal to 0.2 and the position of the test particle atom 1. \plumedfile @@ -107,7 +107,7 @@ void DistanceFromContour::registerKeywords( Keywords& keys ) { keys.addOutputComponent("thickness","default","the distance between the two contours on the line from the reference atom"); keys.add("compulsory","DATA","The input base multicolvar which is being used to calculate the contour"); keys.add("atoms","ATOM","The atom whose perpendicular distance we are calculating from the contour"); - keys.add("compulsory","BANDWIDTH","the bandwidths for kernel density esimtation"); + keys.add("compulsory","BANDWIDTH","the bandwidths for kernel density estimation"); keys.add("compulsory","KERNEL","gaussian","the kernel function you are using. More details on the kernels available " "in plumed plumed can be found in \\ref kernelfunctions."); keys.add("compulsory","DIR","the direction perpendicular to the contour that you are looking for"); @@ -115,7 +115,7 @@ void DistanceFromContour::registerKeywords( Keywords& keys ) { keys.add("compulsory","TOLERANCE","0.1","this parameter is used to manage periodic boundary conditions. The problem " "here is that we can be between contours even when we are not within the membrane " "because of periodic boundary conditions. When we are in the contour, however, we " - "should have it so that the sums of the absoluate values of the distances to the two " + "should have it so that the sums of the absolute values of the distances to the two " "contours is approximately the distance between the two contours. There can be numerical errors in these calculations, however, so " "we specify a small tolerance here"); } diff --git a/src/multicolvar/Distances.cpp b/src/multicolvar/Distances.cpp index 99b95061d..b2709bf7a 100644 --- a/src/multicolvar/Distances.cpp +++ b/src/multicolvar/Distances.cpp @@ -36,7 +36,7 @@ namespace multicolvar { //+PLUMEDOC MCOLVAR DISTANCES /* Calculate the distances between one or many pairs of atoms. You can then calculate functions of the distribution of -distances such as the minimum, the number less than a certain quantity and so on. + distances such as the minimum, the number less than a certain quantity and so on. \par Examples @@ -85,7 +85,7 @@ PRINT ARG=d1.min FILE=colvar STRIDE=10 \endplumedfile (see \ref DISTANCES and \ref PRINT) -In order to ensure differentiability the minimum is calculated using the following function: +In order to ensure that the minimum value has continuous derivatives we use the following function: \f[ s = \frac{\beta}{ \log \sum_i \exp\left( \frac{\beta}{s_i} \right) } @@ -94,7 +94,7 @@ s = \frac{\beta}{ \log \sum_i \exp\left( \frac{\beta}{s_i} \right) } where \f$\beta\f$ is a user specified parameter. This input is used rather than a separate MINDIST colvar so that the same routine and the same input style can be -used to calculate minimum coordinatetion numbers (see \ref COORDINATIONNUMBER), minimum +used to calculate minimum coordination numbers (see \ref COORDINATIONNUMBER), minimum angles (see \ref ANGLES) and many other variables. This new way of calculating mindist is part of plumed 2's multicolvar functionality. These special actions @@ -149,9 +149,9 @@ void Distances::registerKeywords( Keywords& keys ) { keys.reset_style("ATOMS","atoms"); keys.add("atoms-1","GROUP","Calculate the distance between each distinct pair of atoms in the group"); keys.add("atoms-2","GROUPA","Calculate the distances between all the atoms in GROUPA and all " - "the atoms in GROUPB. This must be used in conjuction with GROUPB."); + "the atoms in GROUPB. This must be used in conjunction with GROUPB."); keys.add("atoms-2","GROUPB","Calculate the distances between all the atoms in GROUPA and all the atoms " - "in GROUPB. This must be used in conjuction with GROUPA."); + "in GROUPB. This must be used in conjunction with GROUPA."); } Distances::Distances(const ActionOptions&ao): diff --git a/src/multicolvar/FilterBetween.cpp b/src/multicolvar/FilterBetween.cpp index 20f42a1cf..738ddf83d 100644 --- a/src/multicolvar/FilterBetween.cpp +++ b/src/multicolvar/FilterBetween.cpp @@ -25,9 +25,9 @@ //+PLUMEDOC MTRANSFORMS MTRANSFORM_BETWEEN /* -This action can be useed to transform the colvar values calculated by a multicolvar using a \ref histogrambead +This action can be used to transform the colvar values calculated by a MultiColvar using a \ref histogrambead -In this action each colvar, \f$s_i\f$, calculated by multicolvar is transformed by a \ref histogrambead function that +In this action each colvar, \f$s_i\f$, calculated by MultiColvar is transformed by a \ref histogrambead function that is equal to one if the colvar is within a certain range and which is equal to zero otherwise. In other words, we compute: @@ -46,7 +46,7 @@ MEAN for \ref MFILTER_BETWEEN one is thus calculating: \f] In this action by contrast the colvar is being transformed by the \ref histogrambead. If one thus calculates a MEAN for -thia action one computes: +this action one computes: \f[ \mu = \frac{ \sum_{i=1}^N f_i }{ N } @@ -56,8 +56,8 @@ In other words, you are calculating the mean for the transformed colvar. \par Examples -The following input gives an example of how a MTRANSFORM_BETWEEN action can be used to duplicate -functionality that is elsehwere in PLUMED. +The following input gives an example of how a \ref MTRANSFORM_BETWEEN action can be used to duplicate +functionality that is elsewhere in PLUMED. \plumedfile DISTANCES ... @@ -77,7 +77,7 @@ DISTANCES ... \endplumedfile (see \ref DISTANCES) -The advantage of MTRANSFORM_BETWEEN comes, however, if you want to use transformed colvars as input +The advantage of \ref MTRANSFORM_BETWEEN comes, however, if you want to use transformed colvars as input for \ref MULTICOLVARDENS */ @@ -85,12 +85,12 @@ for \ref MULTICOLVARDENS //+PLUMEDOC MFILTERS MFILTER_BETWEEN /* -This action can be used to filter the colvar values calculated by a multicolvar +This action can be used to filter the colvar values calculated by a \ref mcolv so that one can compute the mean and so on for only those multicolvars within a certain range. This action can be used to create a dynamic group of atom based on the value of a multicolvar. In this action a multicolvar is within the dynamic group if its value lies in a particular range. -In practise a weight, \f$w_i\f$ is ascribed to each colvar, \f$s_i\f$ calculated by a multicolvar +In actuality a weight, \f$w_i\f$ is ascribed to each colvar, \f$s_i\f$ calculated by a multicolvar and this weight measures the degree to which a colvar is a member of the group. This weight is calculated using a \ref histogrambead so it is given by: @@ -151,8 +151,8 @@ void FilterBetween::registerKeywords( Keywords& keys ) { MultiColvarFilter::registerKeywords( keys ); keys.add("compulsory","LOWER","the lower boundary for the range of interest"); keys.add("compulsory","UPPER","the upper boundary for the range of interest"); - keys.add("compulsory","SMEAR","0.5","the ammount by which to smear the value for kernel density estimation"); - keys.add("optional","BEAD","This keywords is used if you want to employ an alternative to the function defeind above. " + keys.add("compulsory","SMEAR","0.5","the amount by which to smear the value for kernel density estimation"); + keys.add("optional","BEAD","This keywords is used if you want to employ an alternative to the function defined above. " "The following provides information on the \\ref histogrambead that are available. " "When this keyword is present you no longer need the LOWER, UPPER and SMEAR keywords."); } diff --git a/src/multicolvar/FilterLessThan.cpp b/src/multicolvar/FilterLessThan.cpp index 23a9603d5..5f39cb6fc 100644 --- a/src/multicolvar/FilterLessThan.cpp +++ b/src/multicolvar/FilterLessThan.cpp @@ -25,9 +25,9 @@ //+PLUMEDOC MTRANSFORMS MTRANSFORM_LESS /* -This action can be useed to transform the colvar values calculated by a multicolvar using a \ref switchingfunction +This action can be used to transform the colvar values calculated by a \ref mcolv using a \ref switchingfunction -In this action each colvar, \f$s_i\f$, calculated by multicolvar is transformed by a \ref switchingfunction function that +In this action each colvar, \f$s_i\f$, calculated by \ref mcolv is transformed by a \ref switchingfunction function that is equal to one if the colvar is less than a certain target value and which is equal to zero otherwise. It is important to understand the distinction between what is done here and what is done by \ref MFILTER_LESS. In \ref MFILTER_LESS a weight, \f$w_i\f$ for the colvar is calculated using the \ref switchingfunction. If one calculates the @@ -38,7 +38,7 @@ MEAN for \ref MFILTER_LESS one is thus calculating: \f] where \f$\sigma\f$ is the \ref switchingfunction. In this action by contrast the colvar is being transformed by -the \ref switchingfunction. If one thus calculates a MEAN for thia action one computes: +the \ref switchingfunction. If one thus calculates a MEAN for this action one computes: \f[ \mu = \frac{ \sum_{i=1}^N \simga(s_i) }{ N } @@ -49,7 +49,7 @@ In other words, you are calculating the mean for the transformed colvar. \par Examples The following input gives an example of how a MTRANSFORM_LESS action can be used to duplicate -functionality that is elsehwere in PLUMED. +functionality that is elsewhere in PLUMED. \plumedfile DISTANCES ... @@ -77,12 +77,12 @@ for \ref MULTICOLVARDENS //+PLUMEDOC MFILTERS MFILTER_LESS /* -This action can be used to filter the distribution of colvar values in a multicolvar +This action can be used to filter the distribution of colvar values in a \ref mcolv so that one can compute the mean and so on for only those multicolvars less than a tolerance. This action can be used to create a dynamic group of atom based on the value of a multicolvar. In this action a multicolvar is within the dynamic group if its value is less than a target. -In practise a weight, \f$w_i\f$ is ascribed to each colvar, \f$s_i\f$ calculated by a multicolvar +In actuality a weight, \f$w_i\f$ is ascribed to each colvar, \f$s_i\f$ calculated by a multicolvar and this weight measures the degree to which a colvar is a member of the group. This weight is a number between 0 and 1 that is calculated using a \ref switchingfunction , \f$\sigma\f$. If one calculates a function of the set of multicolvars @@ -128,7 +128,7 @@ void FilterLess::registerKeywords( Keywords& keys ) { keys.add("compulsory","MM","0","The m parameter of the switching function "); keys.add("compulsory","D_0","0.0","The d_0 parameter of the switching function"); keys.add("compulsory","R_0","The r_0 parameter of the switching function"); - keys.add("optional","SWITCH","This keyword is used if you want to employ an alternative to the continuous swiching function defined above. " + keys.add("optional","SWITCH","This keyword is used if you want to employ an alternative to the continuous switching function defined above. " "The following provides information on the \\ref switchingfunction that are available. " "When this keyword is present you no longer need the NN, MM, D_0 and R_0 keywords."); } diff --git a/src/multicolvar/FilterMoreThan.cpp b/src/multicolvar/FilterMoreThan.cpp index bfca0d218..77ef47c37 100644 --- a/src/multicolvar/FilterMoreThan.cpp +++ b/src/multicolvar/FilterMoreThan.cpp @@ -25,9 +25,9 @@ //+PLUMEDOC MTRANSFORMS MTRANSFORM_MORE /* -This action can be useed to transform the colvar values calculated by a multicolvar using one minus a \ref switchingfunction +This action can be used to transform the colvar values calculated by a \ref mcolv using one minus a \ref switchingfunction -In this action each colvar, \f$s_i\f$, calculated by multicolvar is transformed by a \ref switchingfunction function that +In this action each colvar, \f$s_i\f$, calculated by \ref mcolv is transformed by a \ref switchingfunction function that is equal to one if the colvar is greater than a certain target value and which is equal to zero otherwise. It is important to understand the distinction between what is done here and what is done by \ref MFILTER_MORE. In \ref MFILTER_MORE a weight, \f$w_i\f$ for the colvar is calculated using the \ref histogrambead. If one calculates the @@ -49,7 +49,7 @@ In other words, you are calculating the mean for the transformed colvar. \par Examples The following input gives an example of how a MTRANSFORM_MORE action can be used to duplicate -functionality that is elsehwere in PLUMED. +functionality that is elsewhere in PLUMED. \plumedfile DISTANCES ... @@ -77,12 +77,12 @@ for \ref MULTICOLVARDENS //+PLUMEDOC MFILTERS MFILTER_MORE /* -This action can be used to filter the distribution of colvar values in a multicolvar +This action can be used to filter the distribution of colvar values in a \ref mcolv so that one can compute the mean and so on for only those multicolvars more than a tolerance. This action can be used to create a dynamic group of atom based on the value of a multicolvar. In this action a multicolvar is within the dynamic group if its value is greater than a target. -In practise a weight, \f$w_i\f$ is ascribed to each colvar, \f$s_i\f$ calculated by a multicolvar +In actuality a weight, \f$w_i\f$ is ascribed to each colvar, \f$s_i\f$ calculated by a multicolvar and this weight measures the degree to which a colvar is a member of the group. This weight is calculated using a \ref switchingfunction , \f$\sigma\f$ so it is given by: @@ -145,7 +145,7 @@ void FilterMore::registerKeywords( Keywords& keys ) { keys.add("compulsory","MM","0","The m parameter of the switching function; 0 implies 2*NN"); keys.add("compulsory","D_0","0.0","The d_0 parameter of the switching function"); keys.add("compulsory","R_0","The r_0 parameter of the switching function"); - keys.add("optional","SWITCH","This keyword is used if you want to employ an alternative to the continuous swiching function defined above. " + keys.add("optional","SWITCH","This keyword is used if you want to employ an alternative to the continuous switching function defined above. " "The following provides information on the \\ref switchingfunction that are available. " "When this keyword is present you no longer need the NN, MM, D_0 and R_0 keywords."); } diff --git a/src/multicolvar/InPlaneDistances.cpp b/src/multicolvar/InPlaneDistances.cpp index a1fc79cdb..e85b64715 100644 --- a/src/multicolvar/InPlaneDistances.cpp +++ b/src/multicolvar/InPlaneDistances.cpp @@ -79,7 +79,7 @@ void InPlaneDistances::registerKeywords( Keywords& keys ) { keys.use("MEAN"); keys.use("MIN"); keys.use("MAX"); keys.use("LESS_THAN"); keys.use("MORE_THAN"); keys.use("BETWEEN"); keys.use("HISTOGRAM"); keys.use("MOMENTS"); keys.add("atoms","VECTORSTART","The first atom position that is used to define the normal to the plane of interest"); - keys.add("atoms","VECTOREND","The second atom position that is used to defin the normal to the plane of interest"); + keys.add("atoms","VECTOREND","The second atom position that is used to define the normal to the plane of interest"); keys.add("atoms-2","GROUP","The set of atoms for which you wish to calculate the in plane distance "); } diff --git a/src/multicolvar/LocalAverage.cpp b/src/multicolvar/LocalAverage.cpp index bbcf17ba4..e72670144 100644 --- a/src/multicolvar/LocalAverage.cpp +++ b/src/multicolvar/LocalAverage.cpp @@ -31,7 +31,7 @@ Calculate averages over spherical regions centered on atoms As is explained in <a href="http://www.youtube.com/watch?v=iDvZmbWE5ps"> this video </a> certain multicolvars calculate one scalar quantity or one vector for each of the atoms in the system. For example \ref COORDINATIONNUMBER measures the coordination number of each of the atoms in the system and \ref Q4 measures -the 4th order Steinhardt parameter for each of the atoms in the system. These quantities provide tell us something about +the fourth order Steinhardt parameter for each of the atoms in the system. These quantities provide tell us something about the disposition of the atoms in the first coordination sphere of each of the atoms of interest. Lechner and Dellago \cite dellago-q6 have suggested that one can probe local order in a system by taking the average value of such symmetry functions over the atoms within a spherical cutoff of each of these atoms in the systems. When this is done with Steinhardt parameters @@ -49,7 +49,7 @@ multicolvars. The function \f$\sigma( r_{ij} )\f$ is a \ref switchingfunction t atoms \f$i\f$ and \f$j\f$. Lechner and Dellago suggest that the parameters of this function should be set so that it the function is equal to one when atom \f$j\f$ is in the first coordination sphere of atom \f$i\f$ and is zero otherwise. -The \f$s_i\f$ quantities calculated using the above command can be again thought of as atom-centred symmetry functions. They +The \f$s_i\f$ quantities calculated using the above command can be again thought of as atom-centered symmetry functions. They thus operate much like multicolvars. You can thus calculate properties of the distribution of \f$s_i\f$ values using MEAN, LESS_THAN, HISTOGRAM and so on. You can also probe the value of these averaged variables in regions of the box by using the command in tandem with the \ref AROUND command. @@ -66,7 +66,7 @@ PRINT ARG=la.* FILE=colvar \endplumedfile This example input calculates the \f$q_4\f$ (see \ref Q4) vectors for each of the atoms in the system. These vectors are then averaged -component by component over a spherical region. The average value for this quantity is then outputeed to a file. This calculates the +component by component over a spherical region. The average value for this quantity is then output to a file. This calculates the quantities that were used in the paper by Lechner and Dellago \cite dellago-q6 \plumedfile @@ -108,7 +108,7 @@ void LocalAverage::registerKeywords( Keywords& keys ) { keys.add("compulsory","MM","0","The m parameter of the switching function; 0 implies 2*NN"); keys.add("compulsory","D_0","0.0","The d_0 parameter of the switching function"); keys.add("compulsory","R_0","The r_0 parameter of the switching function"); - keys.add("optional","SWITCH","This keyword is used if you want to employ an alternative to the continuous swiching function defined above. " + keys.add("optional","SWITCH","This keyword is used if you want to employ an alternative to the continuous switching function defined above. " "The following provides information on the \\ref switchingfunction that are available. " "When this keyword is present you no longer need the NN, MM, D_0 and R_0 keywords."); // Use actionWithDistributionKeywords diff --git a/src/multicolvar/MultiColvarBase.cpp b/src/multicolvar/MultiColvarBase.cpp index 3e4e6925b..a8d1d048f 100644 --- a/src/multicolvar/MultiColvarBase.cpp +++ b/src/multicolvar/MultiColvarBase.cpp @@ -52,11 +52,11 @@ void MultiColvarBase::registerKeywords( Keywords& keys ) { "This Action can be used to calculate the following scalar quantities directly. These quantities are calculated by " "employing the keywords listed below. " "These quantities can then be referenced elsewhere in the input file by using this Action's label " - "followed by a dot and the name of the quantity. Some amongst them can be calculated multiple times " + "followed by a dot and the name of the quantity. Some of them can be calculated multiple times " "with different parameters. In this case the quantities calculated can be referenced elsewhere in the " "input by using the name of the quantity followed by a numerical identifier " "e.g. <em>label</em>.lessthan-1, <em>label</em>.lessthan-2 etc. When doing this and, for clarity we have " - "made the label of the components customizable. As such by using the LABEL keyword in the description of the keyword " + "made it so that the user can set a particular label for each of the components. As such by using the LABEL keyword in the description of the keyword " "input you can customize the component name"); keys.reserve("atoms-3","SPECIES","this keyword is used for colvars such as coordination number. In that context it specifies that plumed should calculate " "one coordination number for each of the atoms specified. Each of these coordination numbers specifies how many of the " @@ -65,7 +65,7 @@ void MultiColvarBase::registerKeywords( Keywords& keys ) { "in the previous multicolvar. This is useful if you would like to calculate the Steinhardt parameter for those atoms that have a " "coordination number more than four for example"); keys.reserve("atoms-4","SPECIESA","this keyword is used for colvars such as the coordination number. In that context it species that plumed should calculate " - "one coordination number for each of the atoms specified in SPECIESA. Each of these cooordination numbers specifies how many " + "one coordination number for each of the atoms specified in SPECIESA. Each of these coordination numbers specifies how many " "of the atoms specifies using SPECIESB is within the specified cutoff. As with the species keyword the input can also be specified " "using the label of another multicolvar"); keys.reserve("atoms-4","SPECIESB","this keyword is used for colvars such as the coordination number. It must appear with SPECIESA. For a full explanation see " diff --git a/src/multicolvar/MultiColvarCombine.cpp b/src/multicolvar/MultiColvarCombine.cpp index c947268f3..161160131 100644 --- a/src/multicolvar/MultiColvarCombine.cpp +++ b/src/multicolvar/MultiColvarCombine.cpp @@ -55,7 +55,7 @@ PLUMED_REGISTER_ACTION(MultiColvarCombine,"MCOLV_COMBINE") void MultiColvarCombine::registerKeywords( Keywords& keys ) { MultiColvarBase::registerKeywords( keys ); keys.add("compulsory","DATA","the multicolvars you are calculating linear combinations for"); - keys.add("compulsory","COEFFICIENTS","1.0","the coeficients to use for the various multicolvars"); + keys.add("compulsory","COEFFICIENTS","1.0","the coefficients to use for the various multicolvars"); keys.use("MEAN"); keys.use("MORE_THAN"); keys.use("SUM"); keys.use("LESS_THAN"); keys.use("HISTOGRAM"); keys.use("HISTOGRAM"); keys.use("MIN"); keys.use("MAX"); keys.use("LOWEST"); keys.use("HIGHEST"); keys.use("ALT_MIN"); keys.use("BETWEEN"); keys.use("MOMENTS"); } diff --git a/src/multicolvar/MultiColvarDensity.cpp b/src/multicolvar/MultiColvarDensity.cpp index b44779881..1fb396ca8 100644 --- a/src/multicolvar/MultiColvarDensity.cpp +++ b/src/multicolvar/MultiColvarDensity.cpp @@ -55,7 +55,7 @@ actions. \par Examples The following example shows perhaps the simplest way in which this action can be used. The following -input computes the density of atoms at each point on the grid and ouptuts this quantity to a file. In +input computes the density of atoms at each point on the grid and outputs this quantity to a file. In other words this input instructs plumed to calculate \f$\rho(\mathbf{r}) = \sum_i K(\mathbf{r} - \mathbf{r}_i )\f$ \plumedfile @@ -66,7 +66,7 @@ DUMPGRID GRID=grid STRIDE=500 FILE=density In the above example density is added to the grid on every step. The PRINT_GRID instruction thus tells PLUMED to output the average density at each point on the grid every 500 steps of simulation. Notice that the that grid output -on step 1000 is an average over all 1000 frames of the trajectory. If you would like to analyse these two blocks +on step 1000 is an average over all 1000 frames of the trajectory. If you would like to analyze these two blocks of data separately you must use the CLEAR flag. This second example computes an order parameter (in this case \ref FCCUBIC) and constructs a phase field model @@ -118,19 +118,19 @@ void MultiColvarDensity::registerKeywords( Keywords& keys ) { keys.add("optional","SPACING","the approximate grid spacing (to be used as an alternative or together with NBINS)"); keys.addFlag("FRACTIONAL",false,"use fractional coordinates for the various axes"); keys.addFlag("XREDUCED",false,"limit the calculation of the density/average to a portion of the z-axis only"); - keys.add("optional","XLOWER","this is required if you are using XREDUCED. It specifes the lower bound for the region of the x-axis that for " + keys.add("optional","XLOWER","this is required if you are using XREDUCED. It specifies the lower bound for the region of the x-axis that for " "which you are calculating the density/average"); - keys.add("optional","XUPPER","this is required if you are using XREDUCED. It specifes the upper bound for the region of the x-axis that for " + keys.add("optional","XUPPER","this is required if you are using XREDUCED. It specifies the upper bound for the region of the x-axis that for " "which you are calculating the density/average"); keys.addFlag("YREDUCED",false,"limit the calculation of the density/average to a portion of the y-axis only"); - keys.add("optional","YLOWER","this is required if you are using YREDUCED. It specifes the lower bound for the region of the y-axis that for " + keys.add("optional","YLOWER","this is required if you are using YREDUCED. It specifies the lower bound for the region of the y-axis that for " "which you are calculating the density/average"); - keys.add("optional","YUPPER","this is required if you are using YREDUCED. It specifes the upper bound for the region of the y-axis that for " + keys.add("optional","YUPPER","this is required if you are using YREDUCED. It specifies the upper bound for the region of the y-axis that for " "which you are calculating the density/average"); keys.addFlag("ZREDUCED",false,"limit the calculation of the density/average to a portion of the z-axis only"); - keys.add("optional","ZLOWER","this is required if you are using ZREDUCED. It specifes the lower bound for the region of the z-axis that for " + keys.add("optional","ZLOWER","this is required if you are using ZREDUCED. It specifies the lower bound for the region of the z-axis that for " "which you are calculating the density/average"); - keys.add("optional","ZUPPER","this is required if you are using ZREDUCED. It specifes the upper bound for the region of the z-axis that for " + keys.add("optional","ZUPPER","this is required if you are using ZREDUCED. It specifies the upper bound for the region of the z-axis that for " "which you are calculating the density/average"); } diff --git a/src/multicolvar/NumberOfLinks.cpp b/src/multicolvar/NumberOfLinks.cpp index 3b930b0e2..2b83e674b 100644 --- a/src/multicolvar/NumberOfLinks.cpp +++ b/src/multicolvar/NumberOfLinks.cpp @@ -49,7 +49,7 @@ NLINKS ARG=d1 SWITCH={RATIONAL D_0=1.3 R_0=0.2} LABEL=dd PRINT ARG=dd FILE=colvar \endplumedfile -The following calculates how many pairs of neighbouring atoms in a system containg 64 atoms have +The following calculates how many pairs of neighboring atoms in a system containing 64 atoms have similar dispositions for the atoms in their coordination sphere. This calculation uses the dot product of the Q6 vectors on adjacent atoms to measure whether or not two atoms have the same ``orientation" @@ -94,7 +94,7 @@ void NumberOfLinks::registerKeywords( Keywords& keys ) { keys.add("compulsory","MM","0","The m parameter of the switching function; 0 implies 2*NN"); keys.add("compulsory","D_0","0.0","The d_0 parameter of the switching function"); keys.add("compulsory","R_0","The r_0 parameter of the switching function"); - keys.add("optional","SWITCH","This keyword is used if you want to employ an alternative to the continuous swiching function defined above. " + keys.add("optional","SWITCH","This keyword is used if you want to employ an alternative to the continuous switching function defined above. " "The following provides information on the \\ref switchingfunction that are available. " "When this keyword is present you no longer need the NN, MM, D_0 and R_0 keywords."); // Use actionWithDistributionKeywords diff --git a/src/multicolvar/Torsions.cpp b/src/multicolvar/Torsions.cpp index 91b79dc75..45d5730bc 100644 --- a/src/multicolvar/Torsions.cpp +++ b/src/multicolvar/Torsions.cpp @@ -38,7 +38,7 @@ Calculate whether or not a set of torsional angles are within a particular range \par Examples -The following provides an example of the input for the torsions command +The following provides an example of the input for the TORSIONS command \plumedfile TORSIONS ... @@ -50,7 +50,7 @@ LABEL=ab PRINT ARG=ab.* FILE=colvar STRIDE=10 \endplumedfile -Writing out the atoms involved in all the torsions in this way can be rather tedious. Thankfully if you are working with protein you +Writing out the atoms involved in all the torsion angles in this way can be rather tedious. Thankfully if you are working with protein you can avoid this by using the \ref MOLINFO command. PLUMED uses the pdb file that you provide to this command to learn about the topology of the protein molecule. This means that you can specify torsion angles using the following syntax: @@ -66,7 +66,7 @@ PRINT ARG=ab FILE=colvar STRIDE=10 \endplumedfile Here, \@phi-3 tells plumed that you would like to calculate the \f$\phi\f$ angle in the third residue of the protein. -Similarly \@psi-4 tells plumed that you want to calculate the \f$\psi\f$ angle of the 4th residue of the protein. +Similarly \@psi-4 tells plumed that you want to calculate the \f$\psi\f$ angle of the fourth residue of the protein. */ diff --git a/src/multicolvar/VolumeAround.cpp b/src/multicolvar/VolumeAround.cpp index 4d9b54f89..fc61a39a3 100644 --- a/src/multicolvar/VolumeAround.cpp +++ b/src/multicolvar/VolumeAround.cpp @@ -25,7 +25,7 @@ //+PLUMEDOC VOLUMES AROUND /* -This quantity can be used to calculate functions of the distribution of collective +This quantity can be used to calculate functions of the distribution of collective variables for the atoms that lie in a particular, user-specified part of of the cell. Each of the base quantities calculated by a multicolvar can can be assigned to a particular point in three diff --git a/src/multicolvar/VolumeBetweenContours.cpp b/src/multicolvar/VolumeBetweenContours.cpp index 1d29a2f49..ffa8b63f6 100644 --- a/src/multicolvar/VolumeBetweenContours.cpp +++ b/src/multicolvar/VolumeBetweenContours.cpp @@ -28,7 +28,7 @@ //+PLUMEDOC VOLUMES INENVELOPE /* -This quantity can be used to calculate functions of the distribution of collective +This quantity can be used to calculate functions of the distribution of collective variables for the atoms that lie in a region where the density of a certain type of atom is high. This collective variable can be used to determine whether colvars are within region where the density @@ -81,8 +81,8 @@ PLUMED_REGISTER_ACTION(VolumeInEnvelope,"INENVELOPE") void VolumeInEnvelope::registerKeywords( Keywords& keys ) { ActionVolume::registerKeywords( keys ); keys.remove("SIGMA"); keys.add("atoms","ATOMS","the atom whose positions we are constructing a field from"); - keys.add("compulsory","BANDWIDTH","the bandwidths for kernel density esimtation"); - keys.add("compulsory","CONTOUR","a switching funciton that tells PLUMED how large the density should be"); + keys.add("compulsory","BANDWIDTH","the bandwidths for kernel density estimation"); + keys.add("compulsory","CONTOUR","a switching function that tells PLUMED how large the density should be"); } VolumeInEnvelope::VolumeInEnvelope(const ActionOptions& ao): diff --git a/src/multicolvar/VolumeCavity.cpp b/src/multicolvar/VolumeCavity.cpp index 44215c605..0d300cc1f 100644 --- a/src/multicolvar/VolumeCavity.cpp +++ b/src/multicolvar/VolumeCavity.cpp @@ -28,7 +28,7 @@ //+PLUMEDOC VOLUMES CAVITY /* -This quantity can be used to calculate functions of the distribution of collective +This quantity can be used to calculate functions of the distribution of collective variables for the atoms that lie in a box defined by the positions of four atoms. Each of the base quantities calculated by a multicolvar can can be assigned to a particular point in three @@ -64,7 +64,7 @@ position to a position in \f$ (u_i,v_i,z_i)\f$. This is done using a rotation m \f] where \f$\mathbf{R}\f$ is a rotation matrix that is calculated by constructing a set of three orthonormal vectors from the -refererence positions specified by the user. The first of these unit vectors points from the first reference atom to the second. +reference positions specified by the user. The first of these unit vectors points from the first reference atom to the second. The second is then the normal to the plane containing atoms 1,2 and 3 and the the third is the unit vector orthogonal to these first two vectors. \f$(x_o,y_o,z_o)\f$, meanwhile, specifies the position of the first reference atom. @@ -83,8 +83,8 @@ described above and the resulting projections determine the \f$u'\f$, \f$v'\f$ a \par Examples -The following commands tell plumed to calculate the number of atoms in an ion chanel in a protein. -The extent of the chanel is calculated from the positions of atoms 1, 4, 5 and 11. The final value will be labeled cav. +The following commands tell plumed to calculate the number of atoms in an ion channel in a protein. +The extent of the channel is calculated from the positions of atoms 1, 4, 5 and 11. The final value will be labeled cav. \plumedfile d1: DENSITY SPECIES=20-500 diff --git a/src/multicolvar/VolumeInCylinder.cpp b/src/multicolvar/VolumeInCylinder.cpp index 0db391169..0b03cb0e3 100644 --- a/src/multicolvar/VolumeInCylinder.cpp +++ b/src/multicolvar/VolumeInCylinder.cpp @@ -26,7 +26,7 @@ //+PLUMEDOC VOLUMES INCYLINDER /* -This quantity can be used to calculate functions of the distribution of collective +This quantity can be used to calculate functions of the distribution of collective variables for the atoms that lie in a particular, user-specified part of of the cell. Each of the base quantities calculated by a multicolvar can can be assigned to a particular point in three @@ -91,7 +91,7 @@ void VolumeInCylinder::registerKeywords( Keywords& keys ) { ActionVolume::registerKeywords( keys ); keys.add("atoms","ATOM","the atom whose vicinity we are interested in examining"); keys.add("compulsory","DIRECTION","the direction of the long axis of the cylinder. Must be x, y or z"); - keys.add("compulsory","RADIUS","a switching function that gives the extent of the cyclinder in the plane perpendicular to the direction"); + keys.add("compulsory","RADIUS","a switching function that gives the extent of the cylinder in the plane perpendicular to the direction"); keys.add("compulsory","LOWER","0.0","the lower boundary on the direction parallel to the long axis of the cylinder"); keys.add("compulsory","UPPER","0.0","the upper boundary on the direction parallel to the long axis of the cylinder"); keys.reset_style("SIGMA","optional"); diff --git a/src/multicolvar/VolumeInSphere.cpp b/src/multicolvar/VolumeInSphere.cpp index ff7d23d23..ee4b0ee27 100644 --- a/src/multicolvar/VolumeInSphere.cpp +++ b/src/multicolvar/VolumeInSphere.cpp @@ -26,7 +26,7 @@ //+PLUMEDOC VOLUMES INSPHERE /* -This quantity can be used to calculate functions of the distribution of collective +This quantity can be used to calculate functions of the distribution of collective variables for the atoms that lie in a particular, user-specified part of of the cell. Each of the base quantities calculated by a multicolvar can can be assigned to a particular point in three @@ -86,7 +86,7 @@ PLUMED_REGISTER_ACTION(VolumeInSphere,"INSPHERE") void VolumeInSphere::registerKeywords( Keywords& keys ) { ActionVolume::registerKeywords( keys ); keys.add("atoms","ATOM","the atom whose vicinity we are interested in examining"); - keys.add("compulsory","RADIUS","the switching function that tells us the extent of the sphereical region of interest"); + keys.add("compulsory","RADIUS","the switching function that tells us the extent of the spherical region of interest"); keys.remove("SIGMA"); } diff --git a/src/multicolvar/VolumeTetrapore.cpp b/src/multicolvar/VolumeTetrapore.cpp index 5e92018e9..beb77c996 100644 --- a/src/multicolvar/VolumeTetrapore.cpp +++ b/src/multicolvar/VolumeTetrapore.cpp @@ -28,7 +28,7 @@ //+PLUMEDOC VOLUMES TETRAHEDRALPORE /* -This quantity can be used to calculate functions of the distribution of collective variables +This quantity can be used to calculate functions of the distribution of collective variables for the atoms lie that lie in a box defined by the positions of four atoms at the corners of a tetrahedron. Each of the base quantities calculated by a multicolvar can can be assigned to a particular point in three @@ -64,7 +64,7 @@ position to a position in \f$ (u_i,v_i,z_i)\f$. This is done using a rotation m \f] where \f$\mathbf{R}\f$ is a rotation matrix that is calculated by constructing a set of three orthonormal vectors from the -refererence positions specified by the user. Initially unit vectors are found by calculating the bisector, \f$\mathbf{b}\f$, and +reference positions specified by the user. Initially unit vectors are found by calculating the bisector, \f$\mathbf{b}\f$, and cross product, \f$\mathbf{c}\f$, of the vectors connecting atoms 1 and 2. A third unit vector, \f$\mathbf{p}\f$ is then found by taking the cross product between the cross product calculated during the first step, \f$\mathbf{c}\f$ and the bisector, \f$\mathbf{b}\f$. From this second cross product \f$\mathbf{p}\f$ and the bisector \f$\mathbf{b}\f$ two new vectors are calculated using: diff --git a/src/multicolvar/XAngle.cpp b/src/multicolvar/XAngle.cpp index 35fbf838c..d2e2ef408 100644 --- a/src/multicolvar/XAngle.cpp +++ b/src/multicolvar/XAngle.cpp @@ -116,9 +116,9 @@ void XAngles::registerKeywords( Keywords& keys ) { keys.reset_style("ATOMS","atoms"); keys.add("atoms-1","GROUP","Calculate the distance between each distinct pair of atoms in the group"); keys.add("atoms-2","GROUPA","Calculate the distances between all the atoms in GROUPA and all " - "the atoms in GROUPB. This must be used in conjuction with GROUPB."); + "the atoms in GROUPB. This must be used in conjunction with GROUPB."); keys.add("atoms-2","GROUPB","Calculate the distances between all the atoms in GROUPA and all the atoms " - "in GROUPB. This must be used in conjuction with GROUPA."); + "in GROUPB. This must be used in conjunction with GROUPA."); keys.add("optional","SWITCH","A switching function that ensures that only angles are only computed when atoms are within " "are within a certain fixed cutoff. The following provides information on the \\ref switchingfunction that are available."); } diff --git a/src/multicolvar/XDistances.cpp b/src/multicolvar/XDistances.cpp index 9f3ef5049..a8854a695 100644 --- a/src/multicolvar/XDistances.cpp +++ b/src/multicolvar/XDistances.cpp @@ -34,7 +34,7 @@ namespace multicolvar { //+PLUMEDOC MCOLVAR XDISTANCES /* Calculate the x components of the vectors connecting one or many pairs of atoms. -You can then calculate functions of the distribution of +You can then calculate functions of the distribution of values such as the minimum, the number less than a certain quantity and so on. \par Examples @@ -81,7 +81,7 @@ PRINT ARG=d1.gt0.1 //+PLUMEDOC MCOLVAR YDISTANCES /* Calculate the y components of the vectors connecting one or many pairs of atoms. -You can then calculate functions of the distribution of +You can then calculate functions of the distribution of values such as the minimum, the number less than a certain quantity and so on. \par Examples @@ -129,7 +129,7 @@ PRINT ARG=d1.gt0.1 //+PLUMEDOC MCOLVAR ZDISTANCES /* Calculate the z components of the vectors connecting one or many pairs of atoms. -You can then calculate functions of the distribution of +You can then calculate functions of the distribution of values such as the minimum, the number less than a certain quantity and so on. \par Examples @@ -205,9 +205,9 @@ void XDistances::registerKeywords( Keywords& keys ) { keys.reset_style("ATOMS","atoms"); keys.add("atoms-1","GROUP","Calculate the distance between each distinct pair of atoms in the group"); keys.add("atoms-2","GROUPA","Calculate the distances between all the atoms in GROUPA and all " - "the atoms in GROUPB. This must be used in conjuction with GROUPB."); + "the atoms in GROUPB. This must be used in conjunction with GROUPB."); keys.add("atoms-2","GROUPB","Calculate the distances between all the atoms in GROUPA and all the atoms " - "in GROUPB. This must be used in conjuction with GROUPA."); + "in GROUPB. This must be used in conjunction with GROUPA."); } XDistances::XDistances(const ActionOptions&ao): diff --git a/src/multicolvar/XYDistances.cpp b/src/multicolvar/XYDistances.cpp index 9da8fd943..51f1a40b9 100644 --- a/src/multicolvar/XYDistances.cpp +++ b/src/multicolvar/XYDistances.cpp @@ -34,8 +34,8 @@ namespace multicolvar { //+PLUMEDOC MCOLVAR XYDISTANCES /* Calculate distance between a pair of atoms neglecting the z-component. -You can then calculate functions of the distribution of -values such as the minimum, the number less than a certain quantity and so on. +You can then calculate functions of the distribution of + values such as the minimum, the number less than a certain quantity and so on. \par Examples @@ -55,7 +55,7 @@ PRINT ARG=d1.min //+PLUMEDOC MCOLVAR XZDISTANCES /* Calculate distance between a pair of atoms neglecting the y-component. -You can then calculate functions of the distribution of +You can then calculate functions of the distribution of values such as the minimum, the number less than a certain quantity and so on. \par Examples @@ -76,7 +76,7 @@ PRINT ARG=d1.min //+PLUMEDOC MCOLVAR YZDISTANCES /* Calculate distance between a pair of atoms neglecting the x-component. -You can then calculate functions of the distribution of +You can then calculate functions of the distribution of values such as the minimum, the number less than a certain quantity and so on. \par Examples @@ -119,14 +119,14 @@ void XYDistances::registerKeywords( Keywords& keys ) { keys.add("numbered","ATOMS","the atoms involved in each of the distances you wish to calculate. " "Keywords like ATOMS1, ATOMS2, ATOMS3,... should be listed and one distance will be " "calculated for each ATOM keyword you specify (all ATOM keywords should " - "specify the incides of two atoms). The eventual number of quantities calculated by this " + "specify the indices of two atoms). The eventual number of quantities calculated by this " "action will depend on what functions of the distribution you choose to calculate."); keys.reset_style("ATOMS","atoms"); keys.add("atoms-1","GROUP","Calculate the distance between each distinct pair of atoms in the group"); keys.add("atoms-2","GROUPA","Calculate the distances between all the atoms in GROUPA and all " - "the atoms in GROUPB. This must be used in conjuction with GROUPB."); + "the atoms in GROUPB. This must be used in conjunction with GROUPB."); keys.add("atoms-2","GROUPB","Calculate the distances between all the atoms in GROUPA and all the atoms " - "in GROUPB. This must be used in conjuction with GROUPA."); + "in GROUPB. This must be used in conjunction with GROUPA."); } XYDistances::XYDistances(const ActionOptions&ao): diff --git a/src/multicolvar/XYTorsion.cpp b/src/multicolvar/XYTorsion.cpp index 48c956644..d79e34556 100644 --- a/src/multicolvar/XYTorsion.cpp +++ b/src/multicolvar/XYTorsion.cpp @@ -161,17 +161,17 @@ void XYTorsion::registerKeywords( Keywords& keys ) { keys.use("MEAN"); keys.use("MIN"); keys.use("LOWEST"); keys.use("HIGHEST"); keys.use("BETWEEN"); keys.use("HISTOGRAM"); keys.use("MOMENTS"); - keys.add("numbered","ATOMS","the atoms involved in each of the torsions you wish to calculate. " + keys.add("numbered","ATOMS","the atoms involved in each of the torsion angles you wish to calculate. " "Keywords like ATOMS1, ATOMS2, ATOMS3,... should be listed and one torsion will be " "calculated for each ATOM keyword you specify (all ATOM keywords should " - "specify the incides of two atoms). The eventual number of quantities calculated by this " + "specify the indices of two atoms). The eventual number of quantities calculated by this " "action will depend on what functions of the distribution you choose to calculate."); keys.reset_style("ATOMS","atoms"); keys.add("atoms-1","GROUP","Calculate the distance between each distinct pair of atoms in the group"); keys.add("atoms-2","GROUPA","Calculate the distances between all the atoms in GROUPA and all " - "the atoms in GROUPB. This must be used in conjuction with GROUPB."); + "the atoms in GROUPB. This must be used in conjunction with GROUPB."); keys.add("atoms-2","GROUPB","Calculate the distances between all the atoms in GROUPA and all the atoms " - "in GROUPB. This must be used in conjuction with GROUPA."); + "in GROUPB. This must be used in conjunction with GROUPA."); keys.add("optional","SWITCH","A switching function that ensures that only angles are only computed when atoms are within " "are within a certain fixed cutoff. The following provides information on the \\ref switchingfunction that are available."); } diff --git a/src/pamm/HBPammHydrogens.cpp b/src/pamm/HBPammHydrogens.cpp index 4d4571940..15f8ce6e3 100644 --- a/src/pamm/HBPammHydrogens.cpp +++ b/src/pamm/HBPammHydrogens.cpp @@ -63,7 +63,7 @@ void HBPammHydrogens::registerKeywords( Keywords& keys ) { multicolvar::MultiColvarBase::registerKeywords( keys ); keys.add("atoms-1","HYDROGENS","The list of hydrogen atoms that can form part of a hydrogen bond. The atoms must be specified using a comma separated list."); keys.add("atoms-1","SITES","The list of atoms which can be part of a hydrogen bond. When this command is used the set of atoms that can donate a " - "hydrogen bond is assumed to be the same as the set of atoms that can form hydrogen bonds. The atoms involved must be specified" + "hydrogen bond is assumed to be the same as the set of atoms that can form hydrogen bonds. The atoms involved must be specified " "as a list of labels of \\ref mcolv or labels of a \\ref multicolvarfunction actions. If you would just like to use " "the atomic positions you can use a \\ref DENSITY command to specify a group of atoms. Specifying your atomic positions using labels of " "other \\ref mcolv or \\ref multicolvarfunction commands is useful, however, as you can then exploit a much wider " diff --git a/src/pamm/HBPammMatrix.cpp b/src/pamm/HBPammMatrix.cpp index c8425b979..c30c5104d 100644 --- a/src/pamm/HBPammMatrix.cpp +++ b/src/pamm/HBPammMatrix.cpp @@ -63,7 +63,7 @@ PLUMED_REGISTER_ACTION(HBPammMatrix,"HBPAMM_MATRIX") void HBPammMatrix::registerKeywords( Keywords& keys ) { adjmat::AdjacencyMatrixBase::registerKeywords( keys ); keys.add("atoms-1","SITES","The list of atoms which can be part of a hydrogen bond. When this command is used the set of atoms that can donate a " - "hydrogen bond is assumed to be the same as the set of atoms that can form hydrogen bonds. The atoms involved must be specified" + "hydrogen bond is assumed to be the same as the set of atoms that can form hydrogen bonds. The atoms involved must be specified " "as a list of labels of \\ref mcolv or labels of a \\ref multicolvarfunction actions. If you would just like to use " "the atomic positions you can use a \\ref DENSITY command to specify a group of atoms. Specifying your atomic positions using labels of " "other \\ref mcolv or \\ref multicolvarfunction commands is useful, however, as you can then exploit a much wider " diff --git a/src/pamm/PAMM.cpp b/src/pamm/PAMM.cpp index 4574f48ed..7f2c313d9 100644 --- a/src/pamm/PAMM.cpp +++ b/src/pamm/PAMM.cpp @@ -28,9 +28,9 @@ //+PLUMEDOC MCOLVARF PAMM /* -Probabilistic analysis of molecular mofifs. +Probabilistic analysis of molecular motifs. -Probabilistic analysis of molecular motifs (PAMM) was introduced in this paper \cite{pamm}. +Probabilistic analysis of molecular motifs (PAMM) was introduced in this paper \cite pamm. The essence of this approach involves calculating some large set of collective variables for a set of atoms in a short trajectory and fitting this data using a Gaussian Mixture Model. The idea is that modes in these distributions can be used to identify features such as hydrogen bonds or @@ -38,7 +38,7 @@ secondary structure types. The assumption within this implementation is that the fitting of the Gaussian mixture model has been done elsewhere by a separate code. You thus provide an input file to this action which contains the -means, covariances and weights for a set of Gaussian kernels, \f$\{ \phi \}\f$. The values and +means, covariance matrices and weights for a set of Gaussian kernels, \f$\{ \phi \}\f$. The values and derivatives for the following set of quantities is then computed: \f[ @@ -46,14 +46,14 @@ s_k = \frac{ \phi_k}{ \sum_i \phi_i } \f] Each of the \f$\phi_k\f$ is a Gaussian function that acts on a set of quantities calculated within -a \ref mcolv. These might be \ref TORSIONS, \ref DISTANCES, \ref ANGLES or any one of the many +a \ref mcolv . These might be \ref TORSIONS, \ref DISTANCES, \ref ANGLES or any one of the many symmetry functions that are available within \ref mcolv actions. These quantities are then inserted into the set of \f$n\f$ kernels that are in the the input file. This will be done for multiple sets of values for the input quantities and a final quantity will be calculated by summing the above \f$s_k\f$ values or some transformation of the above. This sounds less complicated than it is and is best understood by looking through the example given below. -\warning Mixing periodic and aperiodic \ref mcolv actions has not been tested +\warning Mixing \ref mcolv actions that are periodic with variables that are not periodic has not been tested \par Examples @@ -77,21 +77,21 @@ The best place to start our explanation is to look at the contents of the cluste 0.6 1.0 +1.0 0.1 -0.03 -0.03 0.1 \endverbatim -This files contains the parameters of two two-dimensional Gaussian functions. Each of these Gaussians has a weight, \f$w_k\f$, -a vector that specifies the position of its centre, \f$\mathbf{c}_k\f$, and a covariance matrix, \f$\Sigma_k\f$. The \f$\phi_k\f$ functions that +This files contains the parameters of two two-dimensional Gaussian functions. Each of these Gaussian kernels has a weight, \f$w_k\f$, +a vector that specifies the position of its center, \f$\mathbf{c}_k\f$, and a covariance matrix, \f$\Sigma_k\f$. The \f$\phi_k\f$ functions that we use to calculate our PAMM components are thus: \f[ \phi_k = \frac{w_k}{N_k} \exp\left( -(\mathbf{s} - \mathbf{c}_k)^T \Sigma^{-1}_k (\mathbf{s} - \mathbf{c}_k) \right) \f] -In the above \f$N_k\f$ is a normalisation factor that is calculated based on \f$\Sigma\f$. The vector \f$\mathbf{s}\f$ is a vector of quantities +In the above \f$N_k\f$ is a normalization factor that is calculated based on \f$\Sigma\f$. The vector \f$\mathbf{s}\f$ is a vector of quantities that are calculated by the \ref TORSIONS actions. This vector must be two dimensional and in this case each component is the value of a torsion angle. If we look at the two \ref TORSIONS actions in the above we are calculating the \f$\phi\f$ and \f$\psi\f$ backbone torsional angles in a protein (Note the use of \ref MOLINFO to make specification of atoms straightforward). We thus calculate the values of our -2 \f$ \{ \phi \} \f$ kernels 3 times. The first time we use the \f$\phi\f$ and \f$\psi\f$ angles in the 2nd resiude of the protein, -the second time it is the \f$\phi\f$ and \f$\psi\f$ angles of the 3rd residue of the protein and the third time it is the \f$\phi\f$ and \f$\psi\f$ angles -of the 4th residue in the protein. The final two quantities that are output by the print command, p.mean-1 and p.mean-2, are the averages +2 \f$ \{ \phi \} \f$ kernels 3 times. The first time we use the \f$\phi\f$ and \f$\psi\f$ angles in the second residue of the protein, +the second time it is the \f$\phi\f$ and \f$\psi\f$ angles of the third residue of the protein and the third time it is the \f$\phi\f$ and \f$\psi\f$ angles +of the fourth residue in the protein. The final two quantities that are output by the print command, p.mean-1 and p.mean-2, are the averages over these three residues for the quantities: \f[ s_1 = \frac{ \phi_1}{ \phi_1 + \phi_2 } @@ -142,12 +142,12 @@ void PAMM::registerKeywords( Keywords& keys ) { "This Action can be used to calculate the following scalar quantities directly from the underlying set of PAMM variables. " "These quantities are calculated by employing the keywords listed below and they can be referenced elsewhere in the input " "file by using this Action's label followed by a dot and the name of the quantity. The particular PAMM variable that should " - "be averaged in a MEAN command or transformed by a swiching function in a LESS_THAN command is specified using the COMPONENT " + "be averaged in a MEAN command or transformed by a switching function in a LESS_THAN command is specified using the COMPONENT " "keyword. COMPONENT=1 refers to the PAMM variable in which the first kernel in your input file is on the numerator, COMPONENT=2 refers to " "PAMM variable in which the second kernel in the input file is on the numerator and so on. The same quantity can be calculated " "multiple times for different PAMM components by a single PAMM action. In this case the relevant keyword must appear multiple " "times on the input line followed by a numerical identifier i.e. MEAN1, MEAN2, ... The quantities calculated when multiple " - "MEAN commands appear on the input line can be referenece elsewhere in the input file by using the name of the quantity followed " + "MEAN commands appear on the input line can be reference elsewhere in the input file by using the name of the quantity followed " "followed by a numerical identifier e.g. <em>label</em>.lessthan-1, <em>label</em>.lessthan-2 etc. Alternatively, you can " "customize the labels of the quantities by using the LABEL keyword in the description of the keyword."); keys.remove("ALL_INPUT_SAME_TYPE"); diff --git a/src/piv/PIV.cpp b/src/piv/PIV.cpp index e09845634..8aaa1a96f 100644 --- a/src/piv/PIV.cpp +++ b/src/piv/PIV.cpp @@ -52,9 +52,9 @@ and prints the results in a file named colvar. Three atoms (PIVATOMS=3) with names (pdb file) A B and C are used to construct the PIV and all PIV blocks (AA, BB, CC, AB, AC, BC) are considered. SFACTOR is a scaling factor that multiplies the contribution to the PIV-distance given by the single PIV block. NLIST sets the use of neighbor lists for calculating atom-atom distances. -The SWITCH keyword specifies the perameters of the switching function that transforms atom-atom distances. -SORT=1 meand that the PIV block elements are sorted (SORT=0 no sorting.) -Values for SORT, SFACTOR and Neighborlist parameters have to be specified for each block. +The SWITCH keyword specifies the parameters of the switching function that transforms atom-atom distances. +SORT=1 means that the PIV block elements are sorted (SORT=0 no sorting.) +Values for SORT, SFACTOR and the neighbor list parameters have to be specified for each block. The order is the following: AA,BB,CC,AB,AC,BC. If ONLYDIRECT (ONLYCROSS) is used the order is AA,BB,CC (AB,AC,BC). The sorting operation within each PIV block is performed using the counting sort algorithm, PRECISION specifies the size of the counting array. @@ -229,23 +229,23 @@ void PIV::registerKeywords( Keywords& keys ) "Details of the various switching " "functions you can use are provided on \\ref switchingfunction."); keys.add("compulsory","PRECISION","the precision for approximating reals with integers in sorting."); - keys.add("compulsory","REF_FILE","PDB file name that contains the i-th reference structure."); + keys.add("compulsory","REF_FILE","PDB file name that contains the \\f$i\\f$th reference structure."); keys.add("compulsory","PIVATOMS","Number of atoms to use for PIV."); keys.add("compulsory","SORT","Whether to sort or not the PIV block."); - keys.add("compulsory","ATOMTYPES","The atomtypes to use for PIV."); + keys.add("compulsory","ATOMTYPES","The atom types to use for PIV."); keys.add("optional","SFACTOR","Scale the PIV-distance by such block-specific factor"); keys.add("optional","VOLUME","Scale atom-atom distances by the cubic root of the cell volume. The input volume is used to scale the R_0 value of the switching function. "); - keys.add("optional","UPDATEPIV","Frequency (timesteps) at which the PIV is updated."); + keys.add("optional","UPDATEPIV","Frequency (in steps) at which the PIV is updated."); keys.addFlag("TEST",false,"Print the actual and reference PIV and exit"); - keys.addFlag("COM",false,"Use centers of mass of groups of atoms instead of atoms as secified in the Pdb file"); + keys.addFlag("COM",false,"Use centers of mass of groups of atoms instead of atoms as specified in the Pdb file"); keys.addFlag("ONLYCROSS",false,"Use only cross-terms (A-B, A-C, B-C, ...) in PIV"); keys.addFlag("ONLYDIRECT",false,"Use only direct-terms (A-A, B-B, C-C, ...) in PIV"); keys.addFlag("DERIVATIVES",false,"Activate the calculation of the PIV for every class (needed for numerical derivatives)."); - keys.addFlag("NLIST",false,"Use a neighbour list for distance calculations."); + keys.addFlag("NLIST",false,"Use a neighbor list for distance calculations."); keys.addFlag("SERIAL",false,"Perform the calculation in serial - for debug purpose"); - keys.addFlag("TIMER",false,"Permorm timing analysis on heavy loops."); - keys.add("optional","NL_CUTOFF","Neighbour lists cutoff."); - keys.add("optional","NL_STRIDE","Update neighbour lists every NL_STRIDE steps."); + keys.addFlag("TIMER",false,"Perform timing analysis on heavy loops."); + keys.add("optional","NL_CUTOFF","Neighbor lists cutoff."); + keys.add("optional","NL_STRIDE","Update neighbor lists every NL_STRIDE steps."); keys.add("optional","NL_SKIN","The maximum atom displacement tolerated for the neighbor lists update."); keys.reset_style("SWITCH","compulsory"); } diff --git a/src/secondarystructure/AlphaRMSD.cpp b/src/secondarystructure/AlphaRMSD.cpp index 54add88e2..4cc73f61b 100644 --- a/src/secondarystructure/AlphaRMSD.cpp +++ b/src/secondarystructure/AlphaRMSD.cpp @@ -35,7 +35,7 @@ colvar thus generates the set of all possible six residue sections and calculate the RMSD distance between the configuration in which the residues find themselves and an idealized alpha helical structure. These distances can be calculated by either aligning the instantaneous structure with the reference structure and measuring each -atomic displacement or by calculating differences between the set of interatomic +atomic displacement or by calculating differences between the set of inter-atomic distances in the reference and instantaneous structures. This colvar is based on the following reference \cite pietrucci09jctc. The authors of @@ -60,7 +60,7 @@ options you no longer need to specify NN, R_0, MM and D_0. Please be aware that for codes like gromacs you must ensure that plumed reconstructs the chains involved in your CV when you calculate this CV using -anthing other than TYPE=DRMSD. For more details as to how to do this see \ref WHOLEMOLECULES. +anything other than TYPE=DRMSD. For more details as to how to do this see \ref WHOLEMOLECULES. \par Examples @@ -69,7 +69,7 @@ protein that are in an alpha helical configuration. \plumedfile MOLINFO STRUCTURE=helix.pdb -hh: ALPHARMSD RESIDUES=all +alpha: ALPHARMSD RESIDUES=all \endplumedfile Here the same is done use RMSD instead of DRMSD @@ -77,7 +77,7 @@ Here the same is done use RMSD instead of DRMSD \plumedfile MOLINFO STRUCTURE=helix.pdb WHOLEMOLECULES ENTITY0=1-100 -hh: ALPHARMSD RESIDUES=all TYPE=OPTIMAL R_0=0.1 +alpha: ALPHARMSD RESIDUES=all TYPE=OPTIMAL R_0=0.1 \endplumedfile */ diff --git a/src/secondarystructure/AntibetaRMSD.cpp b/src/secondarystructure/AntibetaRMSD.cpp index 37c22cb87..ca6270abc 100644 --- a/src/secondarystructure/AntibetaRMSD.cpp +++ b/src/secondarystructure/AntibetaRMSD.cpp @@ -30,19 +30,19 @@ namespace secondarystructure { /* Probe the antiparallel beta sheet content of your protein structure. -Two protein segments containing three continguous residues can form an antiparallel beta sheet. +Two protein segments containing three contiguous residues can form an antiparallel beta sheet. Although if the two segments are part of the same protein chain they must be separated by a minimum of 2 residues to make room for the turn. This colvar thus generates the set of all possible six residue sections that could conceivably form an antiparallel beta sheet and calculates the RMSD distance between the configuration in which the residues find themselves and an idealized antiparallel beta sheet structure. These distances can be calculated by either aligning the instantaneous structure with the reference structure and measuring each -atomic displacement or by calculating differences between the set of interatomic +atomic displacement or by calculating differences between the set of inter-atomic distances in the reference and instantaneous structures. This colvar is based on the following reference \cite pietrucci09jctc. The authors of this paper use the set of distances from the anti parallel beta sheet configurations to measure -the number of segments that have an configuration that resemebles an anti paralel beta sheet. This is done by calculating +the number of segments that have an configuration that resembles an anti parallel beta sheet. This is done by calculating the following sum of functions of the rmsd distances: \f[ @@ -62,7 +62,7 @@ options you no longer need to specify NN, R_0, MM and D_0. Please be aware that for codes like gromacs you must ensure that plumed reconstructs the chains involved in your CV when you calculate this CV using -anthing other than TYPE=DRMSD. For more details as to how to do this see \ref WHOLEMOLECULES. +anything other than TYPE=DRMSD. For more details as to how to do this see \ref WHOLEMOLECULES. \par Examples diff --git a/src/secondarystructure/ParabetaRMSD.cpp b/src/secondarystructure/ParabetaRMSD.cpp index 2a4f7996a..360a4bd11 100644 --- a/src/secondarystructure/ParabetaRMSD.cpp +++ b/src/secondarystructure/ParabetaRMSD.cpp @@ -30,14 +30,14 @@ namespace secondarystructure { /* Probe the parallel beta sheet content of your protein structure. -Two protein segments containing three continguous residues can form a parallel beta sheet. +Two protein segments containing three contiguous residues can form a parallel beta sheet. Although if the two segments are part of the same protein chain they must be separated by a minimum of 3 residues to make room for the turn. This colvar thus generates the set of all possible six residue sections that could conceivably form a parallel beta sheet and calculates the RMSD distance between the configuration in which the residues find themselves and an idealized parallel beta sheet structure. These distances can be calculated by either aligning the instantaneous structure with the reference structure and measuring each -atomic displacement or by calculating differences between the set of interatomic +atomic displacement or by calculating differences between the set of inter-atomic distances in the reference and instantaneous structures. This colvar is based on the following reference \cite pietrucci09jctc. The authors of @@ -62,7 +62,7 @@ options you no longer need to specify NN, R_0, MM and D_0. Please be aware that for codes like gromacs you must ensure that plumed reconstructs the chains involved in your CV when you calculate this CV using -anthing other than TYPE=DRMSD. For more details as to how to do this see \ref WHOLEMOLECULES. +anything other than TYPE=DRMSD. For more details as to how to do this see \ref WHOLEMOLECULES. \par Examples diff --git a/src/secondarystructure/SecondaryStructureRMSD.cpp b/src/secondarystructure/SecondaryStructureRMSD.cpp index ebc60ff84..8f4d3765f 100644 --- a/src/secondarystructure/SecondaryStructureRMSD.cpp +++ b/src/secondarystructure/SecondaryStructureRMSD.cpp @@ -60,14 +60,14 @@ void SecondaryStructureRMSD::registerKeywords( Keywords& keys ) { ActionWithVessel::registerKeywords( keys ); keys.use("LESS_THAN"); keys.use("MIN"); keys.use("ALT_MIN"); keys.use("LOWEST"); keys.use("HIGHEST"); keys.setComponentsIntroduction("By default this Action calculates the number of structural units that are within a certain " - "distance of a idealised secondary structure element. This quantity can then be referenced " + "distance of a idealized secondary structure element. This quantity can then be referenced " "elsewhere in the input by using the label of the action. However, this Action can also be used to " "calculate the following quantities by using the keywords as described below. The quantities then " - "calculated can be referened using the label of the action followed by a dot and then the name " + "calculated can be referenced using the label of the action followed by a dot and then the name " "from the table below. Please note that you can use the LESS_THAN keyword more than once. The resulting " "components will be labelled <em>label</em>.lessthan-1, <em>label</em>.lessthan-2 and so on unless you " - "exploit the fact that these labels are customizable. In particular, by using the LABEL keyword in the " - "description of you LESS_THAN function you can set name of the component that you are calculating"); + "exploit the fact that these labels can be given custom labels by using the LABEL keyword in the " + "description of you LESS_THAN function that you are computing"); } SecondaryStructureRMSD::SecondaryStructureRMSD(const ActionOptions&ao): diff --git a/src/setup/Load.cpp b/src/setup/Load.cpp index ce70a519c..d6f5b9385 100644 --- a/src/setup/Load.cpp +++ b/src/setup/Load.cpp @@ -52,7 +52,7 @@ small change to one collective variable that is already implemented in PLUMED, say \ref DISTANCE . Copy the file `src/colvar/Distance.cpp` into your work directory, rename it as `Distance2.cpp` and edit it as you wish. It might be better -to also replace any occurence of the string DISTANCE within the file +to also replace any occurrence of the string DISTANCE within the file with DISTANCE2, so that both old and new implementation will be available with different names. Then you can compile it into a shared object using \verbatim diff --git a/src/setup/MolInfo.cpp b/src/setup/MolInfo.cpp index 000dd46b4..634f55323 100644 --- a/src/setup/MolInfo.cpp +++ b/src/setup/MolInfo.cpp @@ -38,7 +38,7 @@ using this command you can find the backbone atoms in your structure automatical \warning Please be aware that the PDB parser in plumed is far from perfect. You should thus check the log file -and examine what plumed is actually doing whenenver you use the MOLINFO action. +and examine what plumed is actually doing whenever you use the MOLINFO action. Also make sure that the atoms are listed in the pdb with the correct order. If you are using gromacs, the safest way is to use reference pdb file generated with `gmx editconf -f topol.tpr -o reference.pdb`. @@ -48,7 +48,7 @@ More information of the PDB parser implemented in PLUMED can be found \ref pdbre Providing `MOLTYPE=protein`, `MOLTYPE=rna`, or `MOLTYPE=dna` will instruct plumed to look for known residues from these three types of molecule. In other words, this is available for historical reasons and to allow future extensions where alternative lists will be provided. -As of now, you can just ignore this keyoword. +As of now, you can just ignore this keyword. Using \ref MOLINFO extends the possibility of atoms selection using the @ special symbol. The following shortcuts are available that do not refer to one specific residue: @@ -171,7 +171,7 @@ hb3: DISTANCE ATOMS=@O6-1,@N4-14 PRINT ARG=hb1,hb2,hb3 \endplumedfile -This example use MOLINFO to calculate torsions angles +This example use MOLINFO to calculate torsion angles \plumedfile MOLINFO MOLTYPE=protein STRUCTURE=myprotein.pdb diff --git a/src/setup/Units.cpp b/src/setup/Units.cpp index 804002c24..a56376c66 100644 --- a/src/setup/Units.cpp +++ b/src/setup/Units.cpp @@ -32,11 +32,11 @@ namespace setup { //+PLUMEDOC GENERIC UNITS /* -This command sets the internal units for the code. A new unit can be set by either -specifying a conversion factor from the plumed default unit or by using a string -corresponding to one of the defined units given below. This directive MUST -appear at the BEGINNING of the plumed.dat file. The same units must be used -througout the plumed.dat file. +This command sets the internal units for the code. A new unit can be set by either +specifying a conversion factor from the plumed default unit or by using a string +corresponding to one of the defined units given below. This directive MUST +appear at the BEGINNING of the plumed.dat file. The same units must be used +throughout the plumed.dat file. Notice that all input/output will then be made using the specified units. That is: all the input parameters, all the output files, etc. The only @@ -45,7 +45,7 @@ the units. For example, trajectories written in .gro format (with \ref DUMPATOMS are going to be always in nm. The following strings can be used to specify units. Note that the strings are -case senstive. +case sensitive. - LENGTH: nm (default), A (for Angstrom), um (for micrometer), Bohr (0.052917721067 nm) - ENERGY: kj/mol (default), j/mol, kcal/mol (4.184 kj/mol), eV (96.48530749925792 kj/mol), Ha (for Hartree, 2625.499638 kj/mol) - TIME: ps (default), fs, ns, atomic (2.418884326509e-5 ps) @@ -72,8 +72,8 @@ DUMPATOMS FILE=out.gro STRIDE=10 ATOMS=1-100 DUMPATOMS FILE=out.xyz STRIDE=10 ATOMS=1-100 \endplumedfile -In the `COLVAR` file, time and distance will appear in fs and A respectively, *irrespectively* of which units -you are using the the host MD code. The coordinates in the `out.gro` file will be expressed in nm, +In the `COLVAR` file, time and distance will appear in fs and A respectively, *irrespective* of which units +you are using in the host MD code. The coordinates in the `out.gro` file will be expressed in nm, since `gro` files are by convention written in nm. The coordinates in the `out.xyz` file will be written in Angstrom *since we used the UNITS command setting Angstrom units*. Indeed, within PLUMED xyz files are using internal PLUMED units and not necessarily Angstrom! diff --git a/src/tools/HistogramBead.cpp b/src/tools/HistogramBead.cpp index 3c17ce144..87ede128d 100644 --- a/src/tools/HistogramBead.cpp +++ b/src/tools/HistogramBead.cpp @@ -31,15 +31,15 @@ namespace PLMD { /* A function that can be used to calculate whether quantities are between fixed upper and lower bounds. -If we have multiple instances of a variable we can estimate the probability distribution (pdf) +If we have multiple instances of a variable we can estimate the probability density function for that variable using a process called kernel density estimation: \f[ P(s) = \sum_i K\left( \frac{s - s_i}{w} \right) \f] -In this equation \f$K\f$ is a symmetric funciton that must integrate to one that is often -called a kernel function and \f$w\f$ is a smearing parameter. From a pdf calculated using +In this equation \f$K\f$ is a symmetric function that must integrate to one that is often +called a kernel function and \f$w\f$ is a smearing parameter. From a probability density function calculated using kernel density estimation we can calculate the number/fraction of values between an upper and lower bound using: @@ -68,7 +68,7 @@ in the table below: </tr> </table> -Some keywords can also be used to calculate a descretized version of the histogram. That +Some keywords can also be used to calculate a discrete version of the histogram. That is to say the number of values between \f$a\f$ and \f$b\f$, the number of values between \f$b\f$ and \f$c\f$ and so on. A keyword that specifies this sort of calculation would look something like diff --git a/src/tools/KernelFunctions.cpp b/src/tools/KernelFunctions.cpp index 9c422a70e..56a665ae5 100644 --- a/src/tools/KernelFunctions.cpp +++ b/src/tools/KernelFunctions.cpp @@ -30,7 +30,7 @@ namespace PLMD { /* Functions that are used to construct histograms -Constructing histograms is something you learnt to do relatively early in life. You perform an experiment a number of times, +Constructing histograms is something you learned to do relatively early in life. You perform an experiment a number of times, count the number of times each result comes up and then draw a bar graph that describes how often each of the results came up. This only works when there are a finite number of possible results. If the result a number between 0 and 1 the bar chart is less easy to draw as there are as many possible results as there are numbers between zero and one - an infinite number. @@ -74,7 +74,7 @@ The following variants are available. </table> In the above \f$H(y)\f$ is a function that is equal to one when \f$y>0\f$ and zero when \f$y \le 0\f$. \f$n\f$ is -the dimensionality of the vector \f$\mathbf{x}\f$ and \f$V\f$ is the volume of an elipse in an \f$n\f$ dimensional +the dimensionality of the vector \f$\mathbf{x}\f$ and \f$V\f$ is the volume of an ellipse in an \f$n\f$ dimensional space which is given by: \f{eqnarray*}{ @@ -86,8 +86,8 @@ In \ref METAD the normalization constants are ignored so that the value of the f to one. In addition in \ref METAD we must be able to differentiate the bias in order to get forces. This limits the kernels we can use in this method. Notice also that Gaussian kernels should have infinite support. When used with grids, however, they are assumed to only be non-zero over a finite range. The difference between the -truncated-gaussian and regular gaussian is that the trucated gaussian is scaled so that its integral over the grid -is equal to one when it is normalised. The integral of a regular gaussian when it is evaluated on a grid will be +truncated-gaussian and regular gaussian is that the truncated gaussian is scaled so that its integral over the grid +is equal to one when it is normalized. The integral of a regular gaussian when it is evaluated on a grid will be slightly less that one because of the truncation of a function that should have infinite support. */ //+ENDPLUMEDOC diff --git a/src/tools/Keywords.cpp b/src/tools/Keywords.cpp index 43e451a16..267863809 100644 --- a/src/tools/Keywords.cpp +++ b/src/tools/Keywords.cpp @@ -389,7 +389,7 @@ void Keywords::print_html() const { } if( nkeys>0 ) { if(isaction && isatoms) std::cout<<"\\par The atoms involved can be specified using\n\n"; - else if(isaction) std::cout<<"\\par The data to analyse can be the output from another analysis algorithm\n\n"; + else if(isaction) std::cout<<"\\par The data to analyze can be the output from another analysis algorithm\n\n"; else std::cout<<"\\par The input trajectory is specified using one of the following\n\n"; std::cout<<" <table align=center frame=void width=95%% cellpadding=5%%> \n"; std::string prevtag="start"; unsigned counter=0; @@ -400,7 +400,7 @@ void Keywords::print_html() const { std::cout<<"</table>\n\n"; if( isatoms ) std::cout<<"\\par Or alternatively by using\n\n"; else if( counter==0 ) { std::cout<<"\\par Alternatively data can be collected from the trajectory using \n\n"; counter++; } - else std::cout<<"\\par Lastly data collected in a previous analysis action can be reanalysed by using the keyword \n\n"; + else std::cout<<"\\par Lastly data collected in a previous analysis action can be reanalyzed by using the keyword \n\n"; std::cout<<" <table align=center frame=void width=95%% cellpadding=5%%> \n"; } print_html_item( keys[i] ); diff --git a/src/tools/PDB.cpp b/src/tools/PDB.cpp index a66887cb0..cb4165aaf 100644 --- a/src/tools/PDB.cpp +++ b/src/tools/PDB.cpp @@ -56,7 +56,7 @@ columns | content \endverbatim PLUMED parser is slightly more permissive than the official PDB format in the fact that the format of real numbers is not fixed. In other words, -any parsable real number is ok and the dot can be placed anywhere. However, +any real number that can be parsed is OK and the dot can be placed anywhere. However, __columns are interpret strictly__. A sample PDB should look like the following \verbatim ATOM 2 CH3 ACE 1 12.932 -14.718 -6.016 1.00 1.00 @@ -78,7 +78,7 @@ for instance in \ref RMSD and in \ref FIT_TO_TEMPLATE), the occupancy column is to provide the weight of each atom in the alignment. In cases where, perhaps after alignment, the displacement between running coordinates and the provided PDB is computed, the beta factors are used as weight for the displacement. -Since setting the weights to zero is the same as __not__ including an atom in the alignement or +Since setting the weights to zero is the same as __not__ including an atom in the alignment or displacement calculation, the two following reference files would be equivalent when used in an \ref RMSD calculation. First file: \verbatim diff --git a/src/tools/SwitchingFunction.cpp b/src/tools/SwitchingFunction.cpp index 42252c625..b76e3c2d4 100644 --- a/src/tools/SwitchingFunction.cpp +++ b/src/tools/SwitchingFunction.cpp @@ -131,7 +131,7 @@ s(r) = FUNC </table> Notice that for backward compatibility we allow using `MATHEVAL` instead of `CUSTOM`. -Also notice that if the a `CUSTOM` switching function only depents on even powers of `x` it can be +Also notice that if the a `CUSTOM` switching function only depends on even powers of `x` it can be made faster by using `x2` as a variable. For instance \verbatim {CUSTOM FUNC=1/(1+x2^3) R_0=0.3} @@ -337,7 +337,7 @@ std::string SwitchingFunction::description() const { } else { plumed_merror("Unknown switching function type"); } - ostr<<" swiching function with parameters d0="<<d0; + ostr<<" switching function with parameters d0="<<d0; if(type==rational) { ostr<<" nn="<<nn<<" mm="<<mm; } else if(type==nativeq) { diff --git a/src/vatom/Center.cpp b/src/vatom/Center.cpp index 854986cbc..7227bebe1 100644 --- a/src/vatom/Center.cpp +++ b/src/vatom/Center.cpp @@ -43,7 +43,7 @@ then it provides a result identical to \ref COM. When running with periodic boundary conditions, the atoms should be in the proper periodic image. This is done automatically since PLUMED 2.2, -by considering the ordered list of atoms and rebuilding PBCs with a procedure +by considering the ordered list of atoms and rebuilding the molecule using a procedure that is equivalent to that done in \ref WHOLEMOLECULES . Notice that rebuilding is local to this action. This is different from \ref WHOLEMOLECULES which actually modifies the coordinates stored in PLUMED. @@ -53,7 +53,7 @@ In that case you need to take care that atoms are in the correct periodic image. \note As an experimental feature, CENTER also supports a keyword PHASES. -This keyword solves PBCs by computing scaled coordinates and average +This keyword finds the center of mass for sets of atoms that have been split by the period boundaries by computing scaled coordinates and average trigonometric functions, similarly to \ref CENTER_OF_MULTICOLVAR. Notice that by construction this center position is not invariant with respect to rotations of the atoms at fixed cell lattice. @@ -94,7 +94,7 @@ For arbitrary weights (e.g. geometric center) see \ref CENTER. When running with periodic boundary conditions, the atoms should be in the proper periodic image. This is done automatically since PLUMED 2.2, -by considering the ordered list of atoms and rebuilding PBCs with a procedure +by considering the ordered list of atoms and rebuilding the molecule using a procedure that is equivalent to that done in \ref WHOLEMOLECULES . Notice that rebuilding is local to this action. This is different from \ref WHOLEMOLECULES which actually modifies the coordinates stored in PLUMED. diff --git a/src/vatom/FixedAtom.cpp b/src/vatom/FixedAtom.cpp index 8a8546104..6cd6ad66a 100644 --- a/src/vatom/FixedAtom.cpp +++ b/src/vatom/FixedAtom.cpp @@ -34,8 +34,8 @@ namespace vatom { Add a virtual atom in a fixed position. This action creates a virtual atom at a fixed position. -The coordinates can be specified in cartesian components (by default) -or in scaled coordinats (SCALED_COMPONENTS). +The coordinates can be specified in Cartesian components (by default) +or in scaled coordinates (SCALED_COMPONENTS). It is also possible to assign a predefined charge or mass to the atom. \attention diff --git a/src/vatom/Ghost.cpp b/src/vatom/Ghost.cpp index 7d57c8969..8745a71c5 100644 --- a/src/vatom/Ghost.cpp +++ b/src/vatom/Ghost.cpp @@ -31,10 +31,11 @@ namespace vatom { //+PLUMEDOC VATOM GHOST /* -Calculate the absolute position of a ghost atom with fixed coordinates +Calculate the absolute position of a ghost atom with fixed coordinates in the local reference frame formed by three atoms. + The computed ghost atom is stored as a virtual atom that can be accessed in -an atom list through the the label for the GHOST action that creates it. + an atom list through the the label for the GHOST action that creates it. \par Examples diff --git a/src/ves/CoeffsVector.cpp b/src/ves/CoeffsVector.cpp index 2a4a659a3..b73e6b6bf 100644 --- a/src/ves/CoeffsVector.cpp +++ b/src/ves/CoeffsVector.cpp @@ -854,7 +854,7 @@ size_t CoeffsVector::readDataFromFile(IFile& ifile, const bool ignore_missing_co ncoeffs_read++; if(ncoeffs_read==numberOfCoeffs()) { if((static_cast<unsigned int>(idx_tmp)+1)!=numberOfCoeffs()) { - plumed_merror("something strange about the coefficent file that is being read in, perhaps multiple entries or missing values"); + plumed_merror("something strange about the coefficient file that is being read in, perhaps multiple entries or missing values"); } break; } diff --git a/src/ves/MD_LinearExpansionPES.cpp b/src/ves/MD_LinearExpansionPES.cpp index 59a6a594f..153bc0d5b 100644 --- a/src/ves/MD_LinearExpansionPES.cpp +++ b/src/ves/MD_LinearExpansionPES.cpp @@ -118,7 +118,7 @@ The corresponding pot_coeffs_input.data file is #!------------------- \endverbatim -One then uses the (x,y) postion of the particle as CVs by using the \ref POSITION +One then uses the (x,y) position of the particle as CVs by using the \ref POSITION action as shown in the following PLUMED input \plumedfile p: POSITION ATOM=1 @@ -151,21 +151,21 @@ void MD_LinearExpansionPES::registerKeywords( Keywords& keys ) { CLTool::registerKeywords( keys ); keys.add("compulsory","nstep","10","The number of steps of dynamics you want to run."); keys.add("compulsory","tstep","0.005","The integration timestep."); - keys.add("compulsory","temperature","1.0","The temperature to perform the simulation at. For multiple replica you can give a seperate value for each replica."); - keys.add("compulsory","friction","10.","The friction of the Langevin thermostat. For multiple replica you can give a seperate value for each replica."); + keys.add("compulsory","temperature","1.0","The temperature to perform the simulation at. For multiple replica you can give a separate value for each replica."); + keys.add("compulsory","friction","10.","The friction of the Langevin thermostat. For multiple replica you can give a separate value for each replica."); keys.add("compulsory","random_seed","5293818","Value of random number seed."); - keys.add("compulsory","plumed_input","plumed.dat","The name of the plumed input file(s). For multiple replica you can give a seperate value for each replica."); + keys.add("compulsory","plumed_input","plumed.dat","The name of the plumed input file(s). For multiple replica you can give a separate value for each replica."); keys.add("compulsory","dimension","1","Number of dimensions, supports 1 to 3."); - keys.add("compulsory","initial_position","Initial position of the particle. For multiple replica you can give a seperate value for each replica."); + keys.add("compulsory","initial_position","Initial position of the particle. For multiple replica you can give a separate value for each replica."); keys.add("compulsory","replicas","1","Number of replicas."); keys.add("compulsory","basis_functions_1","Basis functions for dimension 1."); keys.add("optional","basis_functions_2","Basis functions for dimension 2 if needed."); keys.add("optional","basis_functions_3","Basis functions for dimension 3 if needed."); - keys.add("compulsory","input_coeffs","potential-coeffs.in.data","Filename of the input coefficent file for the potential. For multiple replica you can give a seperate value for each replica."); - keys.add("compulsory","output_coeffs","potential-coeffs.out.data","Filename of the output coefficent file for the potential."); - keys.add("compulsory","output_coeffs_fmt","%30.16e","Format of the output coefficent file for the potential. Useful for regtests."); - keys.add("optional","coeffs_prefactor","prefactor for multiplying the coefficents with. For multiple replica you can give a seperate value for each replica."); - keys.add("optional","template_coeffs_file","only generate a template coefficent file with the filename given and exit."); + keys.add("compulsory","input_coeffs","potential-coeffs.in.data","Filename of the input coefficient file for the potential. For multiple replica you can give a separate value for each replica."); + keys.add("compulsory","output_coeffs","potential-coeffs.out.data","Filename of the output coefficient file for the potential."); + keys.add("compulsory","output_coeffs_fmt","%30.16e","Format of the output coefficient file for the potential. Useful for regtests."); + keys.add("optional","coeffs_prefactor","prefactor for multiplying the coefficients with. For multiple replica you can give a separate value for each replica."); + keys.add("optional","template_coeffs_file","only generate a template coefficient file with the filename given and exit."); keys.add("compulsory","output_potential_grid","100","The number of grid points used for the potential and histogram output files."); keys.add("compulsory","output_potential","potential.data","Filename of the potential output file."); keys.add("compulsory","output_histogram","histogram.data","Filename of the histogram output file."); @@ -276,7 +276,7 @@ int MD_LinearExpansionPES::main( FILE* in, FILE* out, PLMD::Communicator& pc) { } else { if(seeds_vec.size()!=1 && seeds_vec.size()!=replicas) { - error("problem with random_seed keyword, for multiple replicas you should give either one value or a seperate value for each replica"); + error("problem with random_seed keyword, for multiple replicas you should give either one value or a separate value for each replica"); } if(seeds_vec.size()==1) { seeds_vec.resize(replicas); @@ -361,7 +361,7 @@ int MD_LinearExpansionPES::main( FILE* in, FILE* out, PLMD::Communicator& pc) { ofile_coeffstmpl.open(template_coeffs_fname); coeffs_pntr->writeToFile(ofile_coeffstmpl,true); ofile_coeffstmpl.close(); - error("Only generating a template coefficent file - Should stop now."); + error("Only generating a template coefficient file - Should stop now."); } std::vector<std::string> input_coeffs_fnames(0); diff --git a/src/ves/Opt_BachAveragedSGD.cpp b/src/ves/Opt_BachAveragedSGD.cpp index 197f9aca4..57757f545 100644 --- a/src/ves/Opt_BachAveragedSGD.cpp +++ b/src/ves/Opt_BachAveragedSGD.cpp @@ -36,7 +36,7 @@ namespace ves { /* Averaged stochastic gradient decent with fixed step size. -\par Algorithim +\par Algorithm This optimizer updates the coefficients according to the averaged stochastic gradient decent algorithm described in ref \cite Bach-NIPS-2013. This algorithm considers two sets of coefficients, the so-called instantaneous coefficients that are updated according to the recursion formula given by \f[ @@ -66,17 +66,17 @@ The coefficients will be outputted to the file given by the COEFFS_FILE keyword. How often the coefficients are written to this file is controlled by the COEFFS_OUTPUT keyword. -If the VES bias employes a dynamic target distribution that needes to be +If the VES bias employs a dynamic target distribution that needs to be iteratively updated (e.g. \ref TD_WELLTEMPERED) \cite Valsson-JCTC-2015, you will need to specify the stride for updating the target distribution by using the TARGETDIST_STRIDE keyword where the stride -is given in terms coefficent iterations. For example if the +is given in terms coefficient iterations. For example if the MD time step is 0.02 ps and STRIDE=1000, such that the coefficients are updated every 2 ps, will TARGETDIST_STRIDE=500 mean that the target distribution will be updated every 1000 ps. -The output of FESs and biases is controlled by the FES_OUTPUT and the BIAS_OUTPUT -keywords. It is also possible to output one-dimensional projections of the FESs +The output of the free energy surfaces and biases is controlled by the FES_OUTPUT and the BIAS_OUTPUT +keywords. It is also possible to output one-dimensional projections of the free energy surfaces by using the FES_PROJ_OUTPUT keyword but for that to work you will need to select for which argument to do the projections by using the numbered PROJ_ARG keyword in the VES bias that is optimized. @@ -87,7 +87,7 @@ It is possible to start the optimization from some initial set of coefficients that have been previously obtained by using the INITIAL_COEFFS keyword. -When restarting simulations it should be sufficent to put the \ref RESTART action +When restarting simulations it should be sufficient to put the \ref RESTART action in the beginning of the input files (or some MD codes the PLUMED should automatically detect if it is a restart run) and keep the same input as before The restarting of the optimization should be automatic as the optimizer will then read in the @@ -98,14 +98,14 @@ is not used). This optimizer supports the usage of multiple walkers where different copies of the system share the same bias potential (i.e. coefficients) and cooperatively sample the averages needed for the gradient and Hessian. This can significantly help with convergence in difficult cases. It is of course best to start the different copies from different positions in CV space. To activate this option you just need to add the MULTIPLE_WALKERS flag. Note that this is only supported if the MD code support running multiple replicas connected via MPI. -The optimizer supports the usage of a so-called mask file that can be used to employ different step sizes for different coefficents and/or deactive the optimization of certain coefficients (by putting values of 0.0). The mask file is read in by using the MASK_FILE keyword and should be in the same format as the coefficent file. It is possible to generate a template mask file by using the OUTPUT_MASK_FILE keyword. +The optimizer supports the usage of a so-called mask file that can be used to employ different step sizes for different coefficients and/or deactivate the optimization of certain coefficients (by putting values of 0.0). The mask file is read in by using the MASK_FILE keyword and should be in the same format as the coefficient file. It is possible to generate a template mask file by using the OUTPUT_MASK_FILE keyword. \par Examples -In the following input we emloy an averaged stochastic gradient decent with a -fixed step size of 1.0 and update the coefficent every 1000 MD steps -(e.g. every 2 ps if the MD time step is 0.02 ps). The coefficent are outputted -to the coeffs.data every 50 iterations while the FES and bias is outputted +In the following input we employ an averaged stochastic gradient decent with a +fixed step size of 1.0 and update the coefficient every 1000 MD steps +(e.g. every 2 ps if the MD time step is 0.02 ps). The coefficient are outputted +to the coefficients.data every 50 iterations while the FES and bias is outputted to files every 500 iterations (e.g. every 1000 ps). \plumedfile phi: TORSION ATOMS=5,7,9,15 @@ -125,7 +125,7 @@ OPT_AVERAGED_SGD ... STRIDE=1000 LABEL=o1 STEPSIZE=1.0 - COEFFS_FILE=coeffs.data + COEFFS_FILE=coefficients.data COEFFS_OUTPUT=50 FES_OUTPUT=500 BIAS_OUTPUT=500 @@ -164,7 +164,7 @@ OPT_AVERAGED_SGD ... LABEL=o1 STEPSIZE=1.0 MULTIPLE_WALKERS - COEFFS_FILE=coeffs.data + COEFFS_FILE=coefficients.data COEFFS_OUTPUT=50 FES_OUTPUT=500 FES_PROJ_OUTPUT=500 diff --git a/src/ves/Optimizer.cpp b/src/ves/Optimizer.cpp index f895c033a..bf052646a 100644 --- a/src/ves/Optimizer.cpp +++ b/src/ves/Optimizer.cpp @@ -169,7 +169,7 @@ Optimizer::Optimizer(const ActionOptions&ao): } } else { - log.printf(" optimizing %u coefficent sets from following %u VES biases:\n",ncoeffssets_,nbiases_); + log.printf(" optimizing %u coefficient sets from following %u VES biases:\n",ncoeffssets_,nbiases_); for(unsigned int i=0; i<nbiases_; i++) { log.printf(" %s of type %s (KbT: %f) \n",bias_pntrs_[i]->getLabel().c_str(),bias_pntrs_[i]->getName().c_str(),bias_pntrs_[i]->getKbT()); } @@ -244,10 +244,10 @@ Optimizer::Optimizer(const ActionOptions&ao): } if(ustride_targetdist_>0) { if(nbiases_==1) { - log.printf(" the target distribution will be updated very %u coefficent iterations\n",ustride_targetdist_); + log.printf(" the target distribution will be updated very %u coefficient iterations\n",ustride_targetdist_); } else { - log.printf(" the target distribution will be updated very %u coefficent iterations for the following biases\n ",ustride_targetdist_); + log.printf(" the target distribution will be updated very %u coefficient iterations for the following biases\n ",ustride_targetdist_); for(unsigned int i=0; i<nbiases_; i++) { log.printf("%s ",bias_pntrs_[i]->getLabel().c_str()); } @@ -272,7 +272,7 @@ Optimizer::Optimizer(const ActionOptions&ao): if(!reweightfactor_calculated) { plumed_merror("In order to use the REWEIGHT_FACTOR_STRIDE keyword you need to enable the calculation of the reweight factor in the VES bias by using the CALC_REWEIGHT_FACTOR flag."); } - log.printf(" the reweight factor c(t) will be updated very %u coefficent iterations\n",ustride_reweightfactor_); + log.printf(" the reweight factor c(t) will be updated very %u coefficient iterations\n",ustride_reweightfactor_); } } @@ -319,7 +319,7 @@ Optimizer::Optimizer(const ActionOptions&ao): parse("COEFFS_SET_ID_PREFIX",coeffssetid_prefix_); } if(coeffssetid_prefix_.size()>0) { - plumed_merror("COEFFS_SET_ID_PREFIX should only be given if optimizing multiple coefficent sets"); + plumed_merror("COEFFS_SET_ID_PREFIX should only be given if optimizing multiple coefficient sets"); } } @@ -510,7 +510,7 @@ Optimizer::Optimizer(const ActionOptions&ao): else { for(unsigned int i=0; i<ncoeffssets_; i++) { size_t nread = coeffs_mask_pntrs_[i]->readFromFile(mask_fnames_in[i],true,true); - log.printf(" mask for coefficent set %u:\n",i); + log.printf(" mask for coefficient set %u:\n",i); log.printf(" read %zu values from file %s\n",nread,mask_fnames_in[i].c_str()); size_t ndeactived = coeffs_mask_pntrs_[0]->countValues(0.0); log.printf(" deactived optimization of %zu coefficients\n",ndeactived); @@ -733,7 +733,7 @@ Optimizer::Optimizer(const ActionOptions&ao): } else { for(unsigned int i=0; i<ncoeffssets_; i++) { - log.printf(" Output Components for coefficent set %u:\n",i); + log.printf(" Output Components for coefficient set %u:\n",i); std::string is=""; Tools::convert(i,is); is = "_" + coeffssetid_prefix_ + is; log.printf(" "); if(monitor_instantaneous_gradient_) { @@ -823,9 +823,9 @@ void Optimizer::registerKeywords( Keywords& keys ) { keys.add("compulsory","COEFFS_FILE","coeffs.data","the name of output file for the coefficients"); keys.add("compulsory","COEFFS_OUTPUT","100","how often the coefficients should be written to file. This parameter is given as the number of iterations."); keys.add("optional","COEFFS_FMT","specify format for coefficient file(s) (useful for decrease the number of digits in regtests)"); - keys.add("optional","COEFFS_SET_ID_PREFIX","suffix to add to the filename given in FILE to identfy the bias, should only be given if a single filename is given in FILE when optimizing multiple biases."); + keys.add("optional","COEFFS_SET_ID_PREFIX","suffix to add to the filename given in FILE to identify the bias, should only be given if a single filename is given in FILE when optimizing multiple biases."); // - keys.add("optional","INITIAL_COEFFS","the name(s) of file(s) with the initial coefficents"); + keys.add("optional","INITIAL_COEFFS","the name(s) of file(s) with the initial coefficients"); // Hidden keywords to output the gradient to a file. keys.add("hidden","GRADIENT_FILE","the name of output file for the gradient"); keys.add("hidden","GRADIENT_OUTPUT","how often the gradient should be written to file. This parameter is given as the number of bias iterations. It is by default 100 if GRADIENT_FILE is specficed"); @@ -841,7 +841,7 @@ void Optimizer::registerKeywords( Keywords& keys ) { // Keywords related to the multiple walkers, actived with the useMultipleWalkersKeywords function keys.reserveFlag("MULTIPLE_WALKERS",false,"if optimization is to be performed using multiple walkers connected via MPI"); // Keywords related to the mask file, actived with the useMaskKeywords function - keys.reserve("optional","MASK_FILE","read in a mask file which allows one to employ different step sizes for different coefficents and/or deactive the optimization of certain coefficients (by putting values of 0.0). One can write out the resulting mask by using the OUTPUT_MASK_FILE keyword."); + keys.reserve("optional","MASK_FILE","read in a mask file which allows one to employ different step sizes for different coefficients and/or deactivate the optimization of certain coefficients (by putting values of 0.0). One can write out the resulting mask by using the OUTPUT_MASK_FILE keyword."); keys.reserve("optional","OUTPUT_MASK_FILE","Name of the file to write out the mask resulting from using the MASK_FILE keyword. Can also be used to generate a template mask file."); // keys.reserveFlag("START_OPTIMIZATION_AFRESH",false,"if the iterations should be started afresh when a restart has been triggered by the RESTART keyword or the MD code."); @@ -851,27 +851,27 @@ void Optimizer::registerKeywords( Keywords& keys ) { keys.reserveFlag("MONITOR_AVERAGE_GRADIENT",false,"if the averaged gradient should be monitored and quantities related to it should be outputted."); keys.reserve("optional","MONITOR_AVERAGES_GRADIENT_EXP_DECAY","use an exponentially decaying averaging with a given time constant when monitoring the averaged gradient"); // - keys.reserve("optional","TARGETDIST_STRIDE","stride for updating a target distribution that is iteratively updated during the optimization. Note that the value is given in terms of coefficent iterations."); - keys.reserve("optional","TARGETDIST_OUTPUT","how often the dynamic target distribution(s) should be written out to file. Note that the value is given in terms of coefficent iterations."); - keys.reserve("optional","TARGETDIST_PROJ_OUTPUT","how often the projections of the dynamic target distribution(s) should be written out to file. Note that the value is given in terms of coefficent iterations."); + keys.reserve("optional","TARGETDIST_STRIDE","stride for updating a target distribution that is iteratively updated during the optimization. Note that the value is given in terms of coefficient iterations."); + keys.reserve("optional","TARGETDIST_OUTPUT","how often the dynamic target distribution(s) should be written out to file. Note that the value is given in terms of coefficient iterations."); + keys.reserve("optional","TARGETDIST_PROJ_OUTPUT","how often the projections of the dynamic target distribution(s) should be written out to file. Note that the value is given in terms of coefficient iterations."); // keys.add("optional","TARGETDIST_AVERAGES_FILE","the name of output file for the target distribution averages. By default it is targetdist-averages.data."); - keys.add("optional","TARGETDIST_AVERAGES_OUTPUT","how often the target distribution averages should be written out to file. Note that the value is given in terms of coefficent iterations. If no value is given are the averages only written at the begining of the optimization"); + keys.add("optional","TARGETDIST_AVERAGES_OUTPUT","how often the target distribution averages should be written out to file. Note that the value is given in terms of coefficient iterations. If no value is given are the averages only written at the beginning of the optimization"); keys.add("hidden","TARGETDIST_AVERAGES_FMT","specify format for target distribution averages file(s) (useful for decrease the number of digits in regtests)"); // - keys.add("optional","BIAS_OUTPUT","how often the bias(es) should be written out to file. Note that the value is given in terms of coefficent iterations."); - keys.add("optional","FES_OUTPUT","how often the FES(s) should be written out to file. Note that the value is given in terms of coefficent iterations."); - keys.add("optional","FES_PROJ_OUTPUT","how often the projections of the FES(s) should be written out to file. Note that the value is given in terms of coefficent iterations."); + keys.add("optional","BIAS_OUTPUT","how often the bias(es) should be written out to file. Note that the value is given in terms of coefficient iterations."); + keys.add("optional","FES_OUTPUT","how often the FES(s) should be written out to file. Note that the value is given in terms of coefficient iterations."); + keys.add("optional","FES_PROJ_OUTPUT","how often the projections of the FES(s) should be written out to file. Note that the value is given in terms of coefficient iterations."); // - keys.reserve("optional","REWEIGHT_FACTOR_STRIDE","stride for updating the reweighting factor c(t). Note that the value is given in terms of coefficent iterations."); + keys.reserve("optional","REWEIGHT_FACTOR_STRIDE","stride for updating the reweighting factor c(t). Note that the value is given in terms of coefficient iterations."); // keys.use("RESTART"); // keys.use("UPDATE_FROM"); keys.use("UPDATE_UNTIL"); // Components that are always active - keys.addOutputComponent("gradrms","MONITOR_INSTANTANEOUS_GRADIENT","the root mean square value of the coefficent gradient. For multiple biases this component is labeled using the number of the bias as gradrms-#."); - keys.addOutputComponent("gradmax","MONITOR_INSTANTANEOUS_GRADIENT","the largest absolute value of the coefficent gradient. For multiple biases this component is labeled using the number of the bias as gradmax-#."); + keys.addOutputComponent("gradrms","MONITOR_INSTANTANEOUS_GRADIENT","the root mean square value of the coefficient gradient. For multiple biases this component is labeled using the number of the bias as gradrms-#."); + keys.addOutputComponent("gradmax","MONITOR_INSTANTANEOUS_GRADIENT","the largest absolute value of the coefficient gradient. For multiple biases this component is labeled using the number of the bias as gradmax-#."); ActionWithValue::useCustomisableComponents(keys); // keys.addOutputComponent("gradmaxidx","default","the index of the maximum absolute value of the gradient"); @@ -916,8 +916,8 @@ void Optimizer::useRestartKeywords(Keywords& keys) { void Optimizer::useMonitorAverageGradientKeywords(Keywords& keys) { keys.use("MONITOR_AVERAGE_GRADIENT"); keys.use("MONITOR_AVERAGES_GRADIENT_EXP_DECAY"); - keys.addOutputComponent("avergradrms","MONITOR_AVERAGE_GRADIENT","the root mean square value of the averaged coefficent gradient. For multiple biases this component is labeled using the number of the bias as gradrms-#."); - keys.addOutputComponent("avergradmax","MONITOR_AVERAGE_GRADIENT","the largest absolute value of the averaged coefficent gradient. For multiple biases this component is labeled using the number of the bias as gradmax-#."); + keys.addOutputComponent("avergradrms","MONITOR_AVERAGE_GRADIENT","the root mean square value of the averaged coefficient gradient. For multiple biases this component is labeled using the number of the bias as gradrms-#."); + keys.addOutputComponent("avergradmax","MONITOR_AVERAGE_GRADIENT","the largest absolute value of the averaged coefficient gradient. For multiple biases this component is labeled using the number of the bias as gradmax-#."); } @@ -1172,10 +1172,10 @@ void Optimizer::readCoeffsFromFiles(const std::vector<std::string>& fnames, cons plumed_assert(ncoeffssets_>0); plumed_assert(fnames.size()==ncoeffssets_); if(ncoeffssets_==1) { - log.printf(" Read in coefficents from file "); + log.printf(" Read in coefficients from file "); } else { - log.printf(" Read in coefficents from files:\n"); + log.printf(" Read in coefficients from files:\n"); } for(unsigned int i=0; i<ncoeffssets_; i++) { IFile ifile; @@ -1185,7 +1185,7 @@ void Optimizer::readCoeffsFromFiles(const std::vector<std::string>& fnames, cons } ifile.open(fnames[i]); if(!ifile.FieldExist(coeffs_pntrs_[i]->getDataLabel())) { - std::string error_msg = "Problem with reading coefficents from file " + ifile.getPath() + ": no field with name " + coeffs_pntrs_[i]->getDataLabel() + "\n"; + std::string error_msg = "Problem with reading coefficients from file " + ifile.getPath() + ": no field with name " + coeffs_pntrs_[i]->getDataLabel() + "\n"; plumed_merror(error_msg); } size_t ncoeffs_read = coeffs_pntrs_[i]->readFromFile(ifile,false,false); @@ -1193,13 +1193,13 @@ void Optimizer::readCoeffsFromFiles(const std::vector<std::string>& fnames, cons log.printf("%s (read %zu of %zu values)\n", ifile.getPath().c_str(),ncoeffs_read,coeffs_pntrs_[i]->numberOfCoeffs()); } else { - log.printf(" coefficent set %u: %s (read %zu of %zu values)\n",i,ifile.getPath().c_str(),ncoeffs_read,coeffs_pntrs_[i]->numberOfCoeffs()); + log.printf(" coefficient set %u: %s (read %zu of %zu values)\n",i,ifile.getPath().c_str(),ncoeffs_read,coeffs_pntrs_[i]->numberOfCoeffs()); } ifile.close(); if(read_aux_coeffs) { ifile.open(fnames[i]); if(!ifile.FieldExist(aux_coeffs_pntrs_[i]->getDataLabel())) { - std::string error_msg = "Problem with reading coefficents from file " + ifile.getPath() + ": no field with name " + aux_coeffs_pntrs_[i]->getDataLabel() + "\n"; + std::string error_msg = "Problem with reading coefficients from file " + ifile.getPath() + ": no field with name " + aux_coeffs_pntrs_[i]->getDataLabel() + "\n"; plumed_merror(error_msg); } aux_coeffs_pntrs_[i]->readFromFile(ifile,false,false); diff --git a/src/ves/OutputBasisFunctions.cpp b/src/ves/OutputBasisFunctions.cpp index 03eb91c47..73af22e8f 100644 --- a/src/ves/OutputBasisFunctions.cpp +++ b/src/ves/OutputBasisFunctions.cpp @@ -42,7 +42,7 @@ namespace ves { Output basis functions to file. This action can be used to write out to a grid file the values and derivatives of -given basis functions. This is normally used for debugging when programing new +given basis functions. This is normally used for debugging when programming new types of basis functions. For example, it is possible to calculate the derivatives numerically and compare to the analytically calculated derivatives. @@ -70,10 +70,10 @@ VES_OUTPUT_BASISFUNCTIONS ... \endplumedfile This input should be run through the driver by using a command similar to the -following one where the trajectory/configuration file conf.gro is needed to +following one where the trajectory/configuration file configuration.gro is needed to trick the code to exit correctly. \verbatim -plumed driver --plumed plumed.dat --igro conf.gro +plumed driver --plumed plumed.dat --igro configuration.gro \endverbatim */ diff --git a/src/ves/OutputFesBias.cpp b/src/ves/OutputFesBias.cpp index 04a70ade5..7f24b1a37 100644 --- a/src/ves/OutputFesBias.cpp +++ b/src/ves/OutputFesBias.cpp @@ -35,11 +35,11 @@ namespace ves { //+PLUMEDOC VES_UTILS VES_OUTPUT_FES /* -Tool to output biases and FESs for VES biases from previously obtained coefficients. +Tool to output biases and free energy surfaces for VES biases from previously obtained coefficients. -This action can be used to output to file biases and FESs for VES biases from +This action can be used to output to file biases and free energy surfaces for VES biases from previously obtained coefficients. It should be used through the \ref driver and -can only be used in postprocessing. The VES bias needs to be defined in the +can only be used in post processing. The VES bias needs to be defined in the exact same way as during the simulation. At the current moment this action does not support dynamic target distributions (e.g. well-tempered). @@ -74,10 +74,10 @@ VES_OUTPUT_FES ... \endplumedfile This input should be run through the driver by using a command similar to the -following one where the trajectory/configuration file conf.gro is needed to +following one where the trajectory/configuration file configuration.gro is needed to correctly define the CVs \verbatim -plumed driver --plumed plumed.dat --igro conf.gro +plumed driver --plumed plumed.dat --igro configuration.gro \endverbatim */ @@ -98,11 +98,11 @@ PLUMED_REGISTER_ACTION(OutputFesBias,"VES_OUTPUT_FES") void OutputFesBias::registerKeywords(Keywords& keys) { - keys.add("compulsory","BIAS","the label of the VES bias for to output the FESs and the bias files"); + keys.add("compulsory","BIAS","the label of the VES bias for to output the free energy surfaces and the bias files"); keys.add("compulsory","COEFFS_INPUT","the name of input coefficient file"); - keys.add("optional","BIAS_OUTPUT","how often the bias(es) should be written out to file. Note that the value is given in terms of coefficent iterations."); - keys.add("optional","FES_OUTPUT","how often the FES(s) should be written out to file. Note that the value is given in terms of coefficent iterations."); - keys.add("optional","FES_PROJ_OUTPUT","how often the projections of the FES(s) should be written out to file. Note that the value is given in terms of coefficent iterations."); + keys.add("optional","BIAS_OUTPUT","how often the bias(es) should be written out to file. Note that the value is given in terms of coefficient iterations."); + keys.add("optional","FES_OUTPUT","how often the FES(s) should be written out to file. Note that the value is given in terms of coefficient iterations."); + keys.add("optional","FES_PROJ_OUTPUT","how often the projections of the FES(s) should be written out to file. Note that the value is given in terms of coefficient iterations."); // } diff --git a/src/ves/OutputTargetDistribution.cpp b/src/ves/OutputTargetDistribution.cpp index f6d4321d6..ef7b6766d 100644 --- a/src/ves/OutputTargetDistribution.cpp +++ b/src/ves/OutputTargetDistribution.cpp @@ -68,10 +68,10 @@ VES_OUTPUT_TARGET_DISTRIBUTION ... \endplumedfile This input should be run through the driver by using a command similar to the -following one where the trajectory/configuration file conf.gro is needed to +following one where the trajectory/configuration file configuration.gro is needed to trick the code to exit correctly. \verbatim -plumed driver --plumed plumed.dat --igro conf.gro +plumed driver --plumed plumed.dat --igro configuration.gro \endverbatim */ @@ -96,7 +96,7 @@ void OutputTargetDistribution::registerKeywords(Keywords& keys) { keys.add("compulsory","GRID_MIN","the lower bounds for the grid"); keys.add("compulsory","GRID_MAX","the upper bounds for the grid"); keys.add("compulsory","GRID_BINS","the number of bins used for the grid."); - keys.add("optional","GRID_PERIODICITY","specfiy if the individual arguments should be made periodic (YES) or not (NO). By default all arguments are taken as not periodic."); + keys.add("optional","GRID_PERIODICITY","specify if the individual arguments should be made periodic (YES) or not (NO). By default all arguments are taken as not periodic."); keys.add("compulsory","TARGETDIST_FILE","filename of the file for writing the target distribution"); keys.add("optional","LOG_TARGETDIST_FILE","filename of the file for writing the log of the target distribution"); keys.add("compulsory","TARGET_DISTRIBUTION","the target distribution to be used."); @@ -158,7 +158,7 @@ OutputTargetDistribution::OutputTargetDistribution(const ActionOptions&ao): arguments[i]->setNotPeriodic(); } else { - plumed_merror("wrong value given in GRID_PERIODICITY, either specfiy YES or NO"); + plumed_merror("wrong value given in GRID_PERIODICITY, either specify YES or NO"); } } diff --git a/src/ves/TD_Chi.cpp b/src/ves/TD_Chi.cpp index 90d455095..748953b2f 100644 --- a/src/ves/TD_Chi.cpp +++ b/src/ves/TD_Chi.cpp @@ -43,7 +43,7 @@ p(s) = \, \left(\frac{s-a}{\sigma}\right)^{k-1} \, \exp\left(- \frac{1}{2} \left(\frac{s-a}{\sigma}\right)^2\right), \f] where \f$a\f$ is the minimum of the distribution that is defined on the interval \f$[a,\infty)\f$, -the parameter \f$k\f$ (given as a postive integer larger than 1) determines how far +the parameter \f$k\f$ (given as a positive integer larger than 1) determines how far the peak of the distribution is from the minimum (known as the "degrees of freedom"), and the parameter \f$\sigma>0\f$ determines the broadness of the distribution. @@ -94,8 +94,8 @@ PLUMED_REGISTER_ACTION(TD_Chi,"TD_CHI") void TD_Chi::registerKeywords(Keywords& keys) { TargetDistribution::registerKeywords(keys); keys.add("compulsory","MINIMUM","The minimum of the chi distribution."); - keys.add("compulsory","SIGMA","The \\f$\\sigma\\f$ parameter of the chi distribution given as a postive number."); - keys.add("compulsory","KAPPA","The \\f$k\\f$ parameter of the chi distribution given as postive integer larger than 1."); + keys.add("compulsory","SIGMA","The \\f$\\sigma\\f$ parameter of the chi distribution given as a positive number."); + keys.add("compulsory","KAPPA","The \\f$k\\f$ parameter of the chi distribution given as positive integer larger than 1."); keys.use("WELLTEMPERED_FACTOR"); keys.use("SHIFT_TO_ZERO"); keys.use("NORMALIZE"); @@ -112,13 +112,13 @@ TD_Chi::TD_Chi(const ActionOptions& ao): parseVector("MINIMUM",minima_); parseVector("SIGMA",sigma_); for(unsigned int k=0; k<sigma_.size(); k++) { - if(sigma_[k] < 0.0) {plumed_merror(getName()+": the value given in SIGMA should be postive.");} + if(sigma_[k] < 0.0) {plumed_merror(getName()+": the value given in SIGMA should be positive.");} } std::vector<unsigned int> kappa_int(0); parseVector("KAPPA",kappa_int); - if(kappa_int.size()==0) {plumed_merror(getName()+": some problem with KAPPA keyword, should given as postive integer larger than 1");} + if(kappa_int.size()==0) {plumed_merror(getName()+": some problem with KAPPA keyword, should given as positive integer larger than 1");} kappa_.resize(kappa_int.size()); for(unsigned int k=0; k<kappa_int.size(); k++) { if(kappa_int[k] < 1) {plumed_merror(getName()+": KAPPA should be an integer 1 or higher");} diff --git a/src/ves/TD_ChiSquared.cpp b/src/ves/TD_ChiSquared.cpp index e5df159f0..1e8b760e5 100644 --- a/src/ves/TD_ChiSquared.cpp +++ b/src/ves/TD_ChiSquared.cpp @@ -44,7 +44,7 @@ p(s) = \left(\frac{s-a}{\sigma}\right) \right), \f] where \f$a\f$ is the minimum of the distribution that is defined on the interval \f$[a,\infty)\f$, -the parameter \f$k\f$ (given as a postive integer larger than 2) determines how far +the parameter \f$k\f$ (given as a positive integer larger than 2) determines how far the peak of the distribution is from the minimum (known as the "degrees of freedom"), and the parameter \f$\sigma>0\f$ determines the broadness of the distribution. @@ -94,8 +94,8 @@ PLUMED_REGISTER_ACTION(TD_ChiSquared,"TD_CHISQUARED") void TD_ChiSquared::registerKeywords(Keywords& keys) { TargetDistribution::registerKeywords(keys); keys.add("compulsory","MINIMUM","The minimum of the chi-squared distribution."); - keys.add("compulsory","SIGMA","The \\f$\\sigma\\f$ parameter of the chi-squared distribution given as a postive number."); - keys.add("compulsory","KAPPA","The \\f$k\\f$ parameter of the chi-squared distribution given as postive integer larger than 2."); + keys.add("compulsory","SIGMA","The \\f$\\sigma\\f$ parameter of the chi-squared distribution given as a positive number."); + keys.add("compulsory","KAPPA","The \\f$k\\f$ parameter of the chi-squared distribution given as positive integer larger than 2."); keys.use("WELLTEMPERED_FACTOR"); keys.use("SHIFT_TO_ZERO"); keys.use("NORMALIZE"); @@ -112,12 +112,12 @@ TD_ChiSquared::TD_ChiSquared(const ActionOptions& ao): parseVector("MINIMUM",minima_); parseVector("SIGMA",sigma_); for(unsigned int k=0; k<sigma_.size(); k++) { - if(sigma_[k] < 0.0) {plumed_merror(getName()+": the value given in SIGMA should be postive.");} + if(sigma_[k] < 0.0) {plumed_merror(getName()+": the value given in SIGMA should be positive.");} } std::vector<unsigned int> kappa_int(0); parseVector("KAPPA",kappa_int); - if(kappa_int.size()==0) {plumed_merror(getName()+": some problem with KAPPA keyword, should given as postive integer larger than 2");} + if(kappa_int.size()==0) {plumed_merror(getName()+": some problem with KAPPA keyword, should given as positive integer larger than 2");} kappa_.resize(kappa_int.size()); for(unsigned int k=0; k<kappa_int.size(); k++) { if(kappa_int[k] < 2) {plumed_merror(getName()+": KAPPA should be an integer 2 or higher");} diff --git a/src/ves/TD_Custom.cpp b/src/ves/TD_Custom.cpp index 9e77b99de..e40c6e02a 100644 --- a/src/ves/TD_Custom.cpp +++ b/src/ves/TD_Custom.cpp @@ -57,11 +57,11 @@ best estimate of \f$F(\mathbf{s})\f$, similarly as for the \ref TD_WELLTEMPERED "well-tempered target distribution". Furthermore, the inverse temperature \f$\beta = (k_{\mathrm{B}}T)^{-1}\f$ and the thermal energy \f$k_{\mathrm{B}}T\f$ can be included -by using the _beta_ and _kBT_ variables. +by using the _beta_ and \f$k_B T\f$ variables. The target distribution will be automatically normalized over the region on which it is defined on. Therefore, the function given in -FUNCTION needs to be non-negative and normalizable. The +FUNCTION needs to be non-negative and it must be possible to normalize the function. The code will perform checks to make sure that this is indeed the case. @@ -92,7 +92,7 @@ TD_CUSTOM ... By using the _FE_ variable the target distribution can depend on the free energy surface \f$F(\mathbf{s})\f$. For example, the following input is identical to using \ref TD_WELLTEMPERED with -BIASFACTOR=10. +a bias factor of 10. \plumedfile TD_CUSTOM ... FUNCTION=exp(-(beta/10.0)*FE) @@ -100,7 +100,7 @@ TD_CUSTOM ... ... TD_CUSTOM \endplumedfile Here the inverse temperature is automatically obtained by using the _beta_ -variable. It is also possible to use the _kBT_ variable. The following +variable. It is also possible to use the \f$k_B T\f$ variable. The following syntax will give the exact same results as the syntax above \plumedfile TD_CUSTOM ... @@ -143,7 +143,7 @@ PLUMED_REGISTER_ACTION(TD_Custom,"TD_CUSTOM") void TD_Custom::registerKeywords(Keywords& keys) { TargetDistribution::registerKeywords(keys); - keys.add("compulsory","FUNCTION","The function you wish to use for the target distribution where you should use the variables _s1_,_s2_,... for the arguments. You can also use the current estimate of the FES by using the variable _FE_ and the temperature by using the _kBT_ and _beta_ variables."); + keys.add("compulsory","FUNCTION","The function you wish to use for the target distribution where you should use the variables _s1_,_s2_,... for the arguments. You can also use the current estimate of the FES by using the variable _FE_ and the temperature by using the \\f$k_B T\\f$ and _beta_ variables."); keys.use("WELLTEMPERED_FACTOR"); keys.use("SHIFT_TO_ZERO"); } diff --git a/src/ves/TD_Exponential.cpp b/src/ves/TD_Exponential.cpp index 7d9b2c129..55e2cf577 100644 --- a/src/ves/TD_Exponential.cpp +++ b/src/ves/TD_Exponential.cpp @@ -87,7 +87,7 @@ PLUMED_REGISTER_ACTION(TD_Exponential,"TD_EXPONENTIAL") void TD_Exponential::registerKeywords(Keywords& keys) { TargetDistribution::registerKeywords(keys); keys.add("compulsory","MINIMUM","The minimum of the exponential distribution."); - keys.add("compulsory","LAMBDA","The \\f$\\lambda\\f$ parameter of the exponential distribution given as postive number."); + keys.add("compulsory","LAMBDA","The \\f$\\lambda\\f$ parameter of the exponential distribution given as positive number."); keys.use("WELLTEMPERED_FACTOR"); keys.use("SHIFT_TO_ZERO"); keys.use("NORMALIZE"); @@ -102,7 +102,7 @@ TD_Exponential::TD_Exponential(const ActionOptions& ao): parseVector("MINIMUM",minima_); parseVector("LAMBDA",lambda_); for(unsigned int k=0; k<lambda_.size(); k++) { - if(lambda_[k] < 0.0) {plumed_merror(getName()+": the value given in LAMBDA should be postive.");} + if(lambda_[k] < 0.0) {plumed_merror(getName()+": the value given in LAMBDA should be positive.");} } diff --git a/src/ves/TD_ExponentiallyModifiedGaussian.cpp b/src/ves/TD_ExponentiallyModifiedGaussian.cpp index 96299b69c..5e5a8ee8c 100644 --- a/src/ves/TD_ExponentiallyModifiedGaussian.cpp +++ b/src/ves/TD_ExponentiallyModifiedGaussian.cpp @@ -148,7 +148,7 @@ TD_ExponentiallyModifiedGaussian::TD_ExponentiallyModifiedGaussian(const ActionO std::vector<double> tmp_sigma; if(!parseNumberedVector("SIGMA",i,tmp_sigma) ) {break;} for(unsigned int k=0; k<tmp_sigma.size(); k++) { - if(tmp_sigma[k]<=0.0) {plumed_merror(getName()+": the values given in SIGMA should be postive");} + if(tmp_sigma[k]<=0.0) {plumed_merror(getName()+": the values given in SIGMA should be positive");} } sigmas_.push_back(tmp_sigma); } @@ -156,7 +156,7 @@ TD_ExponentiallyModifiedGaussian::TD_ExponentiallyModifiedGaussian(const ActionO std::vector<double> tmp_lambda; if(!parseNumberedVector("LAMBDA",i,tmp_lambda) ) {break;} for(unsigned int k=0; k<tmp_lambda.size(); k++) { - if(tmp_lambda[k]<=0.0) {plumed_merror(getName()+": the values given in LAMBDA should be postive");} + if(tmp_lambda[k]<=0.0) {plumed_merror(getName()+": the values given in LAMBDA should be positive");} } lambdas_.push_back(tmp_lambda); } diff --git a/src/ves/TD_Gaussian.cpp b/src/ves/TD_Gaussian.cpp index 5509eac39..292f55ca4 100644 --- a/src/ves/TD_Gaussian.cpp +++ b/src/ves/TD_Gaussian.cpp @@ -30,7 +30,7 @@ namespace ves { //+PLUMEDOC VES_TARGETDIST TD_GAUSSIAN /* -Target distribution given by a sum of Gaussians (static). +Target distribution given by a sum of Gaussian kernels (static). Employ a target distribution that is given by a sum of multivariate Gaussian (or normal) distributions, defined as @@ -62,7 +62,7 @@ numbered CENTER keywords and the standard deviations \f$\mathbf{\sigma}_{i}\f$ using the numbered SIGMA keywords. For two arguments it is possible to employ -[bivariate Gaussians](https://en.wikipedia.org/wiki/Multivariate_normal_distribution) +[bivariate Gaussian kernels](https://en.wikipedia.org/wiki/Multivariate_normal_distribution) with correlation between arguments, defined as \f[ N(\mathbf{s};\mathbf{\mu}_{i},\mathbf{\sigma}_{i},\rho_i) = @@ -104,18 +104,18 @@ normalized to 1 over the bounded region. The code will issue a warning if that is needed. For periodic CVs it is generally better to use \ref TD_VONMISES "Von Mises" -distributions instead of Gaussians as these distributions properly +distributions instead of Gaussian kernels as these distributions properly account for the periodicity of the CVs. \par Examples -One single Gaussians in one-dimension. +One single Gaussian kernel in one-dimension. \plumedfile td: TD_GAUSSIAN CENTER1=-1.5 SIGMA1=0.8 \endplumedfile -Sum of three Gaussians in two-dimensions with equal weights as +Sum of three Gaussian kernels in two-dimensions with equal weights as no weights are given. \plumedfile TD_GAUSSIAN ... @@ -126,7 +126,7 @@ TD_GAUSSIAN ... ... TD_GAUSSIAN \endplumedfile -Sum of three Gaussians in two-dimensions which +Sum of three Gaussian kernels in two-dimensions which are weighted unequally. Note that weights are automatically normalized to 1 so that WEIGHTS=1.0,2.0,1.0 is equal to specifying WEIGHTS=0.25,0.50,0.25. @@ -140,7 +140,7 @@ TD_GAUSSIAN ... ... TD_GAUSSIAN \endplumedfile -Sum of two bivariate Gaussians where there is correlation of +Sum of two bivariate Gaussian kernels where there is correlation of \f$\rho_{2}=0.75\f$ between the two arguments for the second Gaussian. \plumedfile TD_GAUSSIAN ... @@ -180,7 +180,7 @@ void TD_Gaussian::registerKeywords(Keywords& keys) { TargetDistribution::registerKeywords(keys); keys.add("numbered","CENTER","The centers of the Gaussian distributions."); keys.add("numbered","SIGMA","The standard deviations of the Gaussian distributions."); - keys.add("numbered","CORRELATION","The correlation for two-dimensional bivariate Gaussian distributions. Only works for two arguments. The value should be between -1 and 1. If no value is given the Gaussians is considered as un-correlated (i.e. value of 0.0)."); + keys.add("numbered","CORRELATION","The correlation for two-dimensional bivariate Gaussian distributions. Only works for two arguments. The value should be between -1 and 1. If no value is given the Gaussian kernels is considered as un-correlated (i.e. value of 0.0)."); keys.add("optional","WEIGHTS","The weights of the Gaussian distributions. Have to be as many as the number of centers given with the numbered CENTER keywords. If no weights are given the distributions are weighted equally. The weights are automatically normalized to 1."); keys.use("WELLTEMPERED_FACTOR"); keys.use("SHIFT_TO_ZERO"); diff --git a/src/ves/TD_GeneralizedExtremeValue.cpp b/src/ves/TD_GeneralizedExtremeValue.cpp index fcae691c1..6e83b8c39 100644 --- a/src/ves/TD_GeneralizedExtremeValue.cpp +++ b/src/ves/TD_GeneralizedExtremeValue.cpp @@ -115,7 +115,7 @@ PLUMED_REGISTER_ACTION(TD_GeneralizedExtremeValue,"TD_GENERALIZED_EXTREME_VALUE" void TD_GeneralizedExtremeValue::registerKeywords(Keywords& keys) { TargetDistribution::registerKeywords(keys); keys.add("compulsory","LOCATION","The \\f$\\mu\\f$ parameter of the generalized extreme value distribution."); - keys.add("compulsory","SCALE","The \\f$\\sigma\\f$ parameter for the generalized extreme value distribution given as a postive number."); + keys.add("compulsory","SCALE","The \\f$\\sigma\\f$ parameter for the generalized extreme value distribution given as a positive number."); keys.add("compulsory","SHAPE","The \\f$\\xi\\f$ parameter for the generalized extreme value distribution."); keys.use("WELLTEMPERED_FACTOR"); keys.use("SHIFT_TO_ZERO"); diff --git a/src/ves/TD_GeneralizedNormal.cpp b/src/ves/TD_GeneralizedNormal.cpp index 1a950a08d..b733ef224 100644 --- a/src/ves/TD_GeneralizedNormal.cpp +++ b/src/ves/TD_GeneralizedNormal.cpp @@ -48,7 +48,7 @@ are the centers of the distributions, parameters of the distributions, \f$(\beta_{1,i},\beta_{2,i},\ldots,\beta_{d,i})\f$ are the shape parameters of the distributions, and \f$\Gamma(x)\f$ is the -gaamma function. +gamma function. The weights \f$w_{i}\f$ are normalized to 1, \f$\sum_{i}w_{i}=1\f$. Employing \f$\beta=2\f$ results in a @@ -144,7 +144,7 @@ TD_GeneralizedNormal::TD_GeneralizedNormal(const ActionOptions& ao): std::vector<double> tmp_alpha; if(!parseNumberedVector("ALPHA",i,tmp_alpha) ) {break;} for(unsigned int k=0; k<tmp_alpha.size(); k++) { - if(tmp_alpha[k]<=0.0) {plumed_merror(getName()+": the values given in ALPHA should be postive");} + if(tmp_alpha[k]<=0.0) {plumed_merror(getName()+": the values given in ALPHA should be positive");} } alphas_.push_back(tmp_alpha); } @@ -152,7 +152,7 @@ TD_GeneralizedNormal::TD_GeneralizedNormal(const ActionOptions& ao): std::vector<double> tmp_beta; if(!parseNumberedVector("BETA",i,tmp_beta) ) {break;} for(unsigned int k=0; k<tmp_beta.size(); k++) { - if(tmp_beta[k]<=0.0) {plumed_merror(getName()+": the values given in BETA should be postive");} + if(tmp_beta[k]<=0.0) {plumed_merror(getName()+": the values given in BETA should be positive");} } betas_.push_back(tmp_beta); } diff --git a/src/ves/TD_LinearCombination.cpp b/src/ves/TD_LinearCombination.cpp index 700bec866..40f454163 100644 --- a/src/ves/TD_LinearCombination.cpp +++ b/src/ves/TD_LinearCombination.cpp @@ -95,10 +95,10 @@ TD_LINEAR_COMBINATION ... ... TD_LINEAR_COMBINATION \endplumedfile -In the above example the two Gaussians are given using two separate +In the above example the two Gaussian kernels are given using two separate DISTRIBUTION keywords. As the \ref TD_GAUSSIAN target distribution allows multiple centers is it also possible to use just one DISTRIBUTION keyword for the two -Gaussians. This is shown in the following example which will give the +Gaussian kernels. This is shown in the following example which will give the exact same result as the one above as the weights have been appropriately adjusted \plumedfile diff --git a/src/ves/TD_ProductCombination.cpp b/src/ves/TD_ProductCombination.cpp index d9edf784b..f3230cfee 100644 --- a/src/ves/TD_ProductCombination.cpp +++ b/src/ves/TD_ProductCombination.cpp @@ -63,7 +63,7 @@ product combination are given in the DISTRIBUTIONS keyword. The target distribution resulting from the product combination will be automatically normalized. Therefore, the product combination needs to -be a proper distribution that is non-negative and normalizable. The +be a proper distribution that is non-negative and that can be normalized. The code will perform checks to make sure that this is indeed the case. The product combination will be a dynamic target distribution if one or more diff --git a/src/ves/TD_ProductDistribution.cpp b/src/ves/TD_ProductDistribution.cpp index d56d6fdd2..594f21887 100644 --- a/src/ves/TD_ProductDistribution.cpp +++ b/src/ves/TD_ProductDistribution.cpp @@ -33,7 +33,7 @@ namespace ves { //+PLUMEDOC VES_TARGETDIST TD_PRODUCT_DISTRIBUTION /* -Target distribution given by a separable product +Target distribution given by a separable product of one-dimensional distributions (static or dynamic). Employ a target distribution that is a separable product @@ -68,13 +68,13 @@ static distribution. In the following example we employ a uniform distribution for argument 1 and a Gaussian distribution for argument 2. \plumedfile -td_uni: TD_UNIFORM +target_uniform: TD_UNIFORM -td_gauss: TD_GAUSSIAN CENTER=-2.0 SIGMA=0.5 +target_Gaussian: TD_GAUSSIAN CENTER=-2.0 SIGMA=0.5 -td_pd: TD_PRODUCT_DISTRIBUTION DISTRIBUTIONS=td_uni,td_gauss +td_pd: TD_PRODUCT_DISTRIBUTION DISTRIBUTIONS=target_uniform,target_Gaussian \endplumedfile -Note that order of the labels is important, using DISTRIBUTIONS=td_gauss,td_uni +Note that order of the labels is important, using DISTRIBUTIONS=target_Gaussian,target_uniform would mean that we would employ a Gaussian distribution for argument 1 and a uniform distribution for argument 2, which would lead to completely different results. diff --git a/src/ves/TD_WellTempered.cpp b/src/ves/TD_WellTempered.cpp index 2acfe85cb..242fd02b3 100644 --- a/src/ves/TD_WellTempered.cpp +++ b/src/ves/TD_WellTempered.cpp @@ -47,10 +47,10 @@ p(\mathbf{s}) = \f] where \f$\gamma\f$ is a so-called bias factor and \f$P_{0}(\mathbf{s})\f$ is the unbiased canonical distribution of the CVs. This target distribution thus -correponds to a biased ensemble where, as compared to the unbiased one, -the probability peaks have been broaden and the fluctations of the CVs are +corresponds to a biased ensemble where, as compared to the unbiased one, +the probability peaks have been broaden and the fluctuations of the CVs are enhanced. -The value of the bias factor \f$\gamma\f$ determines by how much the fluctations +The value of the bias factor \f$\gamma\f$ determines by how much the fluctuations are enhanced. The well-tempered distribution can be view as sampling on diff --git a/src/ves/VesBias.cpp b/src/ves/VesBias.cpp index 861ef6037..e9a3f8c20 100644 --- a/src/ves/VesBias.cpp +++ b/src/ves/VesBias.cpp @@ -252,7 +252,7 @@ void VesBias::registerKeywords( Keywords& keys ) { Bias::registerKeywords(keys); keys.add("optional","TEMP","the system temperature - this is needed if the MD code does not pass the temperature to PLUMED."); // - keys.reserve("optional","COEFFS","read in the coefficents from files."); + keys.reserve("optional","COEFFS","read in the coefficients from files."); // keys.reserve("optional","TARGET_DISTRIBUTION","the label of the target distribution to be used."); keys.reserve("optional","TARGET_DISTRIBUTIONS","the label of the target distribution to be used. Here you are allows to use multiple labels."); @@ -391,17 +391,17 @@ bool VesBias::readCoeffsFromFiles() { if(coeffs_fnames.size()>0) { plumed_massert(coeffs_fnames.size()==ncoeffssets_,"COEFFS keyword is of the wrong size"); if(ncoeffssets_==1) { - log.printf(" Read in coefficents from file "); + log.printf(" Read in coefficients from file "); } else { - log.printf(" Read in coefficents from files:\n"); + log.printf(" Read in coefficients from files:\n"); } for(unsigned int i=0; i<ncoeffssets_; i++) { IFile ifile; ifile.link(*this); ifile.open(coeffs_fnames[i]); if(!ifile.FieldExist(coeffs_pntrs_[i]->getDataLabel())) { - std::string error_msg = "Problem with reading coefficents from file " + ifile.getPath() + ": no field with name " + coeffs_pntrs_[i]->getDataLabel() + "\n"; + std::string error_msg = "Problem with reading coefficients from file " + ifile.getPath() + ": no field with name " + coeffs_pntrs_[i]->getDataLabel() + "\n"; plumed_merror(error_msg); } size_t ncoeffs_read = coeffs_pntrs_[i]->readFromFile(ifile,false,false); @@ -410,7 +410,7 @@ bool VesBias::readCoeffsFromFiles() { log.printf("%s (read %zu of %zu values)\n", ifile.getPath().c_str(),ncoeffs_read,coeffs_pntrs_[i]->numberOfCoeffs()); } else { - log.printf(" coefficent %u: %s (read %zu of %zu values)\n",i,ifile.getPath().c_str(),ncoeffs_read,coeffs_pntrs_[i]->numberOfCoeffs()); + log.printf(" coefficient %u: %s (read %zu of %zu values)\n",i,ifile.getPath().c_str(),ncoeffs_read,coeffs_pntrs_[i]->numberOfCoeffs()); } ifile.close(); } diff --git a/src/ves/VesLinearExpansion.cpp b/src/ves/VesLinearExpansion.cpp index 80c28dc49..0805856b5 100644 --- a/src/ves/VesLinearExpansion.cpp +++ b/src/ves/VesLinearExpansion.cpp @@ -103,7 +103,7 @@ Internally the code uses grids to calculate the basis set averages over the target distribution that is needed for the gradient. The same grid is also used for the output files (see next section). The size of the grid is determined by the GRID_BINS keyword. By default it has -100 grid points in each dimension, and generally this value should be sufficent. +100 grid points in each dimension, and generally this value should be sufficient. \par Outputting Free Energy Surfaces and Other Files @@ -118,7 +118,7 @@ For multi-dimensional case is it possible to also output projections of the free energy surfaces. The arguments for which to do these projections is specified using the numbered PROJ_ARG keywords. For these files a suffix indicating the projection (proj-#) will be added to the filenames. -You will also need to specfiy the frequency of the output by using the +You will also need to specify the frequency of the output by using the FES_PROJ_OUTPUT keyword within the optimizer. It is also possible to output the bias potential itself, for this the relevant @@ -136,15 +136,15 @@ simulation while for dynamic ones you will need to specify the frequency of the output by using the TARGETDIST_OUTPUT and TARGETDIST_PROJ_OUTPUT keywords within the optimizer. -It is also possible to output free energy surfaces and bias in postprocessing +It is also possible to output free energy surfaces and bias in post processing by using the \ref VES_OUTPUT_FES action. However, be aware that this action does does not support dynamic target distribution (e.g. well-tempered). \par Static Bias It is also possible to use VES_LINEAR_EXPANSION as a static bias that uses -previously obtained coefficents. In this case the coefficents should be -read in from the coefficent file given in the COEFFS keyword. +previously obtained coefficients. In this case the coefficients should be +read in from the coefficient file given in the COEFFS keyword. \par Bias Cutoff @@ -162,14 +162,14 @@ BIAS_CUTOFF_FERMI_LAMBDA keyword. In the following example we run a VES_LINEAR_EXPANSION for one CV using a Legendre basis functions (\ref BF_LEGENDRE) and a uniform target -distribution as no target distribution is specified. The coefficents +distribution as no target distribution is specified. The coefficients are optimized using averaged stochastic gradient descent optimizer (\ref OPT_AVERAGED_SGD). Within the optimizer we specify that the -FES should be outputted to file every 500 coefficents iterations (the +FES should be outputted to file every 500 coefficients iterations (the FES_OUTPUT keyword). -Parameters that are very specfic to the problem at hand, like the +Parameters that are very specific to the problem at hand, like the order of the basis functions, the interval on which the -basis functions are defined, and the stepsize used +basis functions are defined, and the step size used in the optimizer, are left unfilled. \plumedfile bf1: BF_LEGENDRE ORDER=__ MINIMUM=__ MAXIMUM=__ @@ -256,7 +256,7 @@ OPT_AVERAGED_SGD ... \endplumedfile The optimized bias potential can then be used as a static bias for obtaining -kinetics. For this you need read in the final coefficents from file +kinetics. For this you need read in the final coefficients from file (e.g. coeffs_final.data in this case) by using the COEFFS keyword (also, no optimizer should be defined in the input) \plumedfile diff --git a/src/vesselbase/ActionWithVessel.cpp b/src/vesselbase/ActionWithVessel.cpp index b0753343e..19da1cd3f 100644 --- a/src/vesselbase/ActionWithVessel.cpp +++ b/src/vesselbase/ActionWithVessel.cpp @@ -37,12 +37,12 @@ namespace vesselbase { void ActionWithVessel::registerKeywords(Keywords& keys) { keys.add("hidden","TOL","this keyword can be used to speed up your calculation. When accumulating sums in which the individual " - "terms are numbers inbetween zero and one it is assumed that terms less than a certain tolerance " + "terms are numbers in between zero and one it is assumed that terms less than a certain tolerance " "make only a small contribution to the sum. They can thus be safely ignored as can the the derivatives " "wrt these small quantities."); keys.add("hidden","MAXDERIVATIVES","The maximum number of derivatives that can be used when storing data. This controls when " "we have to start using lowmem"); - keys.addFlag("SERIAL",false,"do the calculation in serial. Do not parallelize"); + keys.addFlag("SERIAL",false,"do the calculation in serial. Do not use MPI"); keys.addFlag("LOWMEM",false,"lower the memory requirements"); keys.addFlag("TIMINGS",false,"output information on the timings of the various parts of the calculation"); keys.reserveFlag("HIGHMEM",false,"use a more memory intensive version of this collective variable"); diff --git a/src/vesselbase/Highest.cpp b/src/vesselbase/Highest.cpp index 2e6c897df..c14ad3720 100644 --- a/src/vesselbase/Highest.cpp +++ b/src/vesselbase/Highest.cpp @@ -42,7 +42,7 @@ void Highest::registerKeywords( Keywords& keys ) { void Highest::reserveKeyword( Keywords& keys ) { keys.reserve("vessel","HIGHEST","this flag allows you to recover the highest of these variables."); - keys.addOutputComponent("highest","HIGHEST","the lowest of the quantitities calculated by this action"); + keys.addOutputComponent("highest","HIGHEST","the lowest of the quantities calculated by this action"); } Highest::Highest( const VesselOptions& da ) : diff --git a/src/vesselbase/Histogram.cpp b/src/vesselbase/Histogram.cpp index dcaa69beb..5bc8cd8b7 100644 --- a/src/vesselbase/Histogram.cpp +++ b/src/vesselbase/Histogram.cpp @@ -44,8 +44,8 @@ void Histogram::registerKeywords( Keywords& keys ) { } void Histogram::reserveKeyword( Keywords& keys ) { - keys.reserve("vessel","HISTOGRAM","calculate a discretized histogram of the distribution of values. " - "This shortcut allows you to calculates NBIN quantites like BETWEEN."); + keys.reserve("vessel","HISTOGRAM","calculate how many of the values fall in each of the bins of a histogram. " + "This shortcut allows you to calculates NBIN quantities like BETWEEN."); } Histogram::Histogram( const VesselOptions& da ): diff --git a/src/vesselbase/Lowest.cpp b/src/vesselbase/Lowest.cpp index c6ee2e518..57c3d2967 100644 --- a/src/vesselbase/Lowest.cpp +++ b/src/vesselbase/Lowest.cpp @@ -42,7 +42,7 @@ void Lowest::registerKeywords( Keywords& keys ) { void Lowest::reserveKeyword( Keywords& keys ) { keys.reserve("vessel","LOWEST","this flag allows you to recover the lowest of these variables."); - keys.addOutputComponent("lowest","LOWEST","the lowest of the quantitities calculated by this action"); + keys.addOutputComponent("lowest","LOWEST","the lowest of the quantities calculated by this action"); } Lowest::Lowest( const VesselOptions& da ) : diff --git a/src/vesselbase/Mean.cpp b/src/vesselbase/Mean.cpp index cb3ab202a..10733e6fe 100644 --- a/src/vesselbase/Mean.cpp +++ b/src/vesselbase/Mean.cpp @@ -43,7 +43,7 @@ void Mean::registerKeywords( Keywords& keys ) { void Mean::reserveKeyword( Keywords& keys ) { keys.reserve("vessel","MEAN","take the mean of these variables."); - keys.addOutputComponent("mean","MEAN","the mean value. The output component can be refererred to elsewhere in the input " + keys.addOutputComponent("mean","MEAN","the mean value. The output component can be referred to elsewhere in the input " "file by using the label.mean"); } diff --git a/src/vesselbase/Moments.cpp b/src/vesselbase/Moments.cpp index 192085777..52c6317ef 100644 --- a/src/vesselbase/Moments.cpp +++ b/src/vesselbase/Moments.cpp @@ -59,10 +59,10 @@ void Moments::registerKeywords( Keywords& keys ) { void Moments::reserveKeyword( Keywords& keys ) { keys.reserve("optional","MOMENTS","calculate the moments of the distribution of collective variables. " - "The \\f$m\\f$th moment of a distribution is calculated using \\f$\\frac{1}{N} \\sum_{i=1}^N ( s_i - \\overline{s} )^m \\f$, where \\f$\\overline{s}\\f$ is " + "The mth moment of a distribution is calculated using \\f$\\frac{1}{N} \\sum_{i=1}^N ( s_i - \\overline{s} )^m \\f$, where \\f$\\overline{s}\\f$ is " "the average for the distribution. The moments keyword takes a lists of integers as input or a range. Each integer is a value of \\f$m\\f$. The final " "calculated values can be referenced using moment-\\f$m\\f$. You can use the COMPONENT keyword in this action but the syntax is slightly different. " - "If you would like the 2nd and third moments of the 3rd component you would use MOMENTS={COMPONENT=3 MOMENTS=2-3}. The moments would then be refered to " + "If you would like the second and third moments of the third component you would use MOMENTS={COMPONENT=3 MOMENTS=2-3}. The moments would then be referred to " "using the labels moment-3-2 and moment-3-3. This syntax is also required if you are using numbered MOMENT keywords i.e. MOMENTS1, MOMENTS2..."); keys.reset_style("MOMENTS","vessel"); keys.addOutputComponent("moment","MOMENTS","the central moments of the distribution of values. The second moment " diff --git a/user-doc/Analysis.md b/user-doc/Analysis.md index ae07956d7..623d12f64 100644 --- a/user-doc/Analysis.md +++ b/user-doc/Analysis.md @@ -1,7 +1,7 @@ \page Analysis Analysis -PLUMED can be used to analyse trajectories either on the fly during an MD run or via -postprocessing a trajectory using \ref driver. A molecular dynamics trajectory is in essence an ordered +PLUMED can be used to analyze trajectories either on the fly during an MD run or via +post processing a trajectory using \ref driver. A molecular dynamics trajectory is in essence an ordered set of configurations of atoms. Trajectory analysis algorithms are methods that allow us to extract meaningful information from this extremely high-dimensionality information. In extracting this information much of the information in the trajectory will be discarded and assumed to be irrelevant to the problem at hand. For example, @@ -38,10 +38,10 @@ frequently to collect data from the trajectory. In all these methods the output is a form of ensemble average. If you are running with a bias it is thus likely that you may want to reweight the trajectory frames in order to remove the effect the bias has on the static behavior of the system. The following methods can thus be used to calculate weights for the various trajectory -frames so that the final ensemble average is an average for the cannonical ensemble at the appropriate +frames so that the final ensemble average is an average for the canonical ensemble at the appropriate temperature. -\section analysisbias Unbiasing and Averaging +\section analysisbias Reweighting and Averaging @REWEIGHTING@ @@ -50,12 +50,12 @@ You can then calculate ensemble averages using the following actions. @GRIDCALC@ For many of the above commands data is accumulated on the grids. These grids can be further -analysed using one of the actions detailed below at some time. +analyzed using one of the actions detailed below at some time. @GRIDANALYSIS@ As an example the following set of commands instructs PLUMED to calculate the distance between -atoms 1 and 2 for every 5th frame in the trajectory and to accumulate a histogram from this data +atoms 1 and 2 for every fifth frame in the trajectory and to accumulate a histogram from this data which will be output every 100 steps (i.e. when 20 distances have been added to the histogram). \plumedfile @@ -66,7 +66,7 @@ DUMPGRID GRID=h FILE=histo STRIDE=100 It is important to note when using commands such as the above the first frame in the trajectory is assumed to be the initial configuration that was input to the MD code. It is thus ignored. Furthermore, if you are -running with driver and you would like to analyse the whole trajectory (without specifying its length) +running with driver and you would like to analyze the whole trajectory (without specifying its length) and then print the result you simply call \ref DUMPGRID (or any of the commands above) without a STRIDE keyword as shown in the example below. @@ -113,14 +113,14 @@ d1n: DISTANCE ATOMS=1,2 NUMERICAL_DERIVATIVES DUMPDERIVATIVES ARG=d1,d1n FILE=derivatives \endverbatim -The first of these two distance commands calculates the analytical derivtives of the distance +The first of these two distance commands calculates the analytical derivatives of the distance while the second calculates these derivatives numerically. Obviously, if your CV is implemented correctly these two sets of quantities should be nearly identical. \section storing Storing data for analysis All the analysis methods described in previous sections accumulate averages or output diagnostic information on the fly. -That is to say these methods calculate something given the instantaneous positions of the atoms or the instantaenous +That is to say these methods calculate something given the instantaneous positions of the atoms or the instantaneous values of a set of collective variables. Many methods (e.g. dimensionality reduction and clustering) will not work like this, however, as information from multiple trajectory frames is required at the point when the analysis is performed. In other words the output from these types of analysis cannot be accumulated one frame at time. When using these methods you must therefore @@ -162,7 +162,7 @@ N.B. You can only use the two commands above when you are doing post-processing. \section landmarks Landmark Selection Many of the techniques described in the following sections are very computationally expensive to run on large trajectories. -A common strategy is thus to use a landmark selection algorithm to pick a particularly-reprentative subset of trajectory +A common strategy is thus to use a landmark selection algorithm to pick a particularly-representative subset of trajectory frames and to only apply the expensive analysis algorithm on these configurations. The various landmark selection algorithms that are available in PLUMED are as follows @@ -178,7 +178,7 @@ OUTPUT_COLVAR_FILE USE_OUTPUT_DATA_FROM=ll2 FILE=mylandmarks \endverbatim When landmark selection is performed in this way a weight is ascribed to each of the landmark configurations. This weight is -calculated by summing the weights of all the trajectory frames in each of the landmarks Voronoi polyhedra +calculated by summing the weights of all the trajectory frames in each of the landmarks Voronoi polyhedron (https://en.wikipedia.org/wiki/Voronoi_diagram). The weight of each trajectory frame is one unless you are reweighting using the formula described in the \ref analysisbias to counteract the fact of a simulation bias or an elevated temperature. If you are reweighting using these formula the weight of each of the points is equal to the exponential term in the numerator of these expressions. @@ -201,8 +201,8 @@ Euclidean distances between pairs of them, \f$d_{ij}\f$, resemble the dissimilar where \f$F(D_{ij})\f$ is some transformation of the distance between point \f$X^{i}\f$ and point \f$X^{j}\f$ and \f$f(d_{ij})\f$ is some transformation of the distance between the projection of \f$X^{i}\f$, \f$x^i\f$, and the projection of \f$X^{j}\f$, \f$x^j\f$. \f$w_i\f$ and \f$w_j\f$ are the weights -of configurations \f$X^i\f$ and \f$^j\f$ respectively. These weights are caclulated using the reweighting and voronoi polyhedra approaches described in -previous sections. A tutorial on dimensionality reduction and how it can be used to analyse simulations can be found in the tutorial \ref belfast-3 and in +of configurations \f$X^i\f$ and \f$^j\f$ respectively. These weights are calculated using the reweighting and Voronoi polyhedron approaches described in +previous sections. A tutorial on dimensionality reduction and how it can be used to analyze simulations can be found in the tutorial \ref belfast-3 and in the following <a href="https://www.youtube.com/watch?v=ofC2qz0_9_A&feature=youtu.be" > short video.</a> Within PLUMED running an input to run a dimensionality reduction algorithm can be as simple as: @@ -218,17 +218,17 @@ We can even throw some landmark selection into this procedure and perform \verbatim data: COLLECT_FRAMES STRIDE=1 ARG=d1 -ss1: EUCLIDEAN_DISSIMILARITIES USE_OUTPUT_DATA_FROM=data -ll2: LANDMARK_SELECT_FPS USE_OUTPUT_DATA_FROM=ss1 NLANDMARKS=300 +matrix: EUCLIDEAN_DISSIMILARITIES USE_OUTPUT_DATA_FROM=data +ll2: LANDMARK_SELECT_FPS USE_OUTPUT_DATA_FROM=matrix NLANDMARKS=300 mds: CLASSICAL_MDS USE_OUTPUT_DATA_FROM=ll2 NLOW_DIM=2 -osample: PROJECT_ALL_ANALYSIS_DATA USE_OUTPUT_DATA_FROM=ss1 PROJECTION=smap +osample: PROJECT_ALL_ANALYSIS_DATA USE_OUTPUT_DATA_FROM=matrix PROJECTION=smap \endverbatim -Notice here that the final command allows us to caluclate the projections of all the non-landmark points that were collected by the action with -label ss1. +Notice here that the final command allows us to calculate the projections of all the non-landmark points that were collected by the action with +label matrix. -Dimensionality can be more complicated, however, because the stress function that calculates \f$\chi^2\f$ has to optimised rather carefully using -a number of different algorithms. The various algorithms that can be used to optimise this function are described below +Dimensionality can be more complicated, however, because the stress function that calculates \f$\chi^2\f$ has to optimized rather carefully using +a number of different algorithms. The various algorithms that can be used to optimize this function are described below @DIMRED@ diff --git a/user-doc/CollectiveVariables.md b/user-doc/CollectiveVariables.md index f530f5278..378ba2d75 100644 --- a/user-doc/CollectiveVariables.md +++ b/user-doc/CollectiveVariables.md @@ -1,7 +1,7 @@ \page colvarintro Collective Variables Chemical systems contain an enormous number atoms, which, in most cases makes it simply impossible for -us to understand anything by monitoring the atom postions directly. Consquentially, +us to understand anything by monitoring the atom positions directly. Consequently, we introduce Collective variables (CVs) that describe the chemical processes we are interested in and monitor these simpler quantities instead. These CVs are used in many of the methods implemented in PLUMED - there values can be monitored using \ref PRINT, \ref Function of them can be calculated @@ -12,14 +12,14 @@ The simplest collective variables that are implemented in PLUMED take in a set of atomic positions and output one or multiple scalar CV values. Information on these variables is given on the page entitled \ref Colvar while information as to how sets of atoms can be selected can be found in the pages on \ref Group. Please be aware that PLUMED contains implementations of many other collective variables -but that the input for these variables may be less transparent when it is first encourntered. +but that the input for these variables may be less transparent when it is first encountered. In particular, the page on \ref dists describes the various ways that you can calculate the distance from a particular reference configuration. So you will find instructions on how to calculate the RMSD distance from the folded state of a protein here. Meanwhile, the page on \ref Function describes the various functions of collective variables that can be used in the code. This is a very powerful feature of PLUMED as you can use the \ref Function commands to calculate any function or combination of the simple collective variables listed on the page \ref Colvar. Lastly the page on \ref mcolv describes MultiColvars. MultiColvars allow you to use many different colvars and allow us to -implement all these collective variables without implementing having an unmanigiably large ammount of code. For some things (e.g. +implement all these collective variables without a large amount of code. For some things (e.g. \ref DISTANCES GROUPA=1 GROUPB=2-100 LESS_THAN={RATIONAL R_0=3}) there are more computationally efficient options available in plumed (e.g. \ref COORDINATION). However, MultiColvars are worth investigating as they provide a flexible syntax for many quite-complex CVs. @@ -38,19 +38,19 @@ The following list contains descriptions of a number of the colvars that are cur \page dists Distances from reference configurations -One colvar that has been shown to be very sucessful in studying protein folding is the distance between the instantaneous configuration +One colvar that has been shown to be very successful in studying protein folding is the distance between the instantaneous configuration and a reference configuration - often the structure of the folded state. When the free energy of a protein is shown as a function of this collective variable there is a minima for low values of the CV, which is due to the folded state of the protein. There is then a second minima at higher values of the CV, which is the minima corresponding to the unfolded state. A slight problem with this sort of collective variable is that there are many different ways of calculating the distance from a particular reference structure. The simplest - adding together the distances by which each of the atoms has been translated in -going from the reference configuration to the instantanous configuration - is not particularly sensible. A distance calculated -in this way does not neglect translation of the center of mass of the molecule and rotation of the frame of reference. A common practise +going from the reference configuration to the instantaneous configuration - is not particularly sensible. A distance calculated +in this way does not neglect translation of the center of mass of the molecule and rotation of the frame of reference. A common practice is thus to remove these components by calculating the \ref RMSD distance between the reference and instantaneous configurations. -This is not the only way to calculate the distance, however. One could also calculate the total ammount by which a large number -of collective variables change in moving from the reference to the instaneous configurations. One could even combine RMSD distances -with the ammount the collective variables change. A full list of the ways distances can be measured in PLUMED is given below: +This is not the only way to calculate the distance, however. One could also calculate the total amount by which a large number +of collective variables change in moving from the reference to the instantaneous configurations. One could even combine RMSD distances +with the amount the collective variables change. A full list of the ways distances can be measured in PLUMED is given below: @DCOLVAR@ @@ -67,12 +67,12 @@ function something like this: \f[ s = \sum_i g[f(\{X\}_i)] \f] -In this expression \f$g\f$ is a funciton that takes in one argument and \f$f\f$ is a function that takes a set of atomic positions +In this expression \f$g\f$ is a function that takes in one argument and \f$f\f$ is a function that takes a set of atomic positions as argument. The symbol \f$\{X\}_i\f$ is used to indicate the fact that the function \f$f\f$ is evaluated for a number of different sets of atoms. If you would just like to output the values of all the various \f$f\f$ functions you should use the command \ref DUMPMULTICOLVAR This functionality is useful if you need to calculate a minimum distance or the number of coordination numbers greater than a 3.0. -To avoid dupilcating the code to calculate an angle or distance many times and to make it easier to implement very complex collective +To avoid duplicating the code to calculate an angle or distance many times and to make it easier to implement very complex collective variables PLUMED provides these sort of collective variables using so-called MultiColvars. MultiColvars are named in this way because a single PLUMED action can be used to calculate a number of different collective variables. For instance the \ref DISTANCES action can be used to calculate the minimum distance, the number of distances less than a certain value, the number of @@ -112,8 +112,8 @@ with respect to these terms are essentially zero. By increasing the TOL paramet of the calculation. Be aware, however, that this increase in speed is only possible because you are lowering the accuracy with which you are computing the quantity of interest. -Once you have specified the base quanties that are to be calculated from the atoms involved and any parameters -you need to specify what function of these base quanties is to be calculated. For most multicolvars you can calculate +Once you have specified the base quantities that are to be calculated from the atoms involved and any parameters +you need to specify what function of these base quantities is to be calculated. For most multicolvars you can calculate the minimum, the number less than a target value, the number within a certain range, the number more than a target value and the average value directly. @@ -136,7 +136,7 @@ The idea with these methods is that function of the form: \f[ s = \sum_i w(\{X\}_i) g[f(\{X\}_i)] \f] -can be evaluated where once again \f$g\f$ is a function with one argumet and \f$g\f$ is a function of a set of atomic positions. +can be evaluated where once again \f$g\f$ is a function with one argument and \f$g\f$ is a function of a set of atomic positions. The difference from the more general function described earlier is that we now have a weight \f$w\f$ which is again a function of the atomic positions. This weight varies between zero and one and it is this weight that is calculated in the list of filtering methods and volume methods described in the lists above. @@ -175,8 +175,8 @@ The list of biases of this type are as follows: @MCOLVARB@ -Notice that (in theory) you could also use this functionality to add additional terms to your forcefield or to implement your -forcefield. +Notice that (in theory) you could also use this functionality to add additional terms to your force field or to implement your +force field. \section usingbase Extracting all the base quantities @@ -187,10 +187,10 @@ action. You can thus use the following command to extract this sort of informat \page contactmatrix Exploiting contact matrices A contact matrix is an \f$N \times N\f$ matrix in which the \f$i\f$th, \f$j\f$th element tells you whether or not the \f$i\f$th -and \f$j\f$th atoms/molecules from a set of \f$N\f$ atoms/molecules are adjacent or not. There are various ways of definining +and \f$j\f$th atoms/molecules from a set of \f$N\f$ atoms/molecules are adjacent or not. There are various ways of defining whether a pair of atoms/molecules are adjacent or not. For example we can say two atoms are adjacent if the distance between them is less than some cutoff. Alternatively, if we have a have a pair of molecules, we might state they are adjacent if their -centre's of mass are within a certain cutoff and if the two molecules have the same orientation. Two electronegative atoms +centers of mass are within a certain cutoff and if the two molecules have the same orientation. Two electronegative atoms might be said to be adjacent if there is a hydrogen bond between them. For these reasons then PLUMED contains all of the following methods for calculating an adjacency matrix diff --git a/user-doc/Files.md b/user-doc/Files.md index f7411c338..78e8254a8 100644 --- a/user-doc/Files.md +++ b/user-doc/Files.md @@ -1,7 +1,7 @@ \page Files Files We tried to design PLUMED in such a manner that input/output is done consistently -irrespectively of the file type. Most of the files written or read by PLUMED thus follow +irrespective of the file type. Most of the files written or read by PLUMED thus follow the very same conventions discussed below. \section Restart @@ -50,7 +50,7 @@ Notice that when PLUMED adds the replica suffix, it recognizes the file extensio extension. Before PLUMED 2.2, the only recognized suffix was ".gz". Since 2.2, any suffix with length less or equal to five letters is recognized. -This means that using in a multireplica context an input such as +This means that using in a multi-replica context an input such as \plumedfile d: DISTANCE ATOMS=1,2 PRINT ARG=d FILE=COLVAR.gz diff --git a/user-doc/Functions.md b/user-doc/Functions.md index 444ff57f2..37b5aec47 100644 --- a/user-doc/Functions.md +++ b/user-doc/Functions.md @@ -1,6 +1,6 @@ \page Function Functions -When performing biased dynamics or analysing a trajectory you may wish to analyse/bias the value of +When performing biased dynamics or analyzing a trajectory you may wish to analyze/bias the value of some function of a set of collective variables rather than the values of the collective variables directly. You can do this with PLUMED by using any one of the following list of functions. diff --git a/user-doc/GettingStarted.md b/user-doc/GettingStarted.md index 496c7a146..ed1048894 100644 --- a/user-doc/GettingStarted.md +++ b/user-doc/GettingStarted.md @@ -8,16 +8,16 @@ some free energy method. A very brief introduction to the syntax used in the PLU is provided in this <a href="http://www.youtube.com/watch?v=PxJP16qNCYs"> 10-minute video </a>. Within this input file every line is an instruction for PLUMED to perform some particular action. This could be - the calculation of a colvar, an occasional analysis of the trajectory or a biassing of the dynamics. The first + the calculation of a colvar, an occasional analysis of the trajectory or a biasing of the dynamics. The first word in these lines specify what particular action is to be performed. This is then followed by a number of keywords which provide PLUMED with more details as to how the action is to be performed. These keywords are either single words (in which they tell PLUMED to do the calculation in a particular way - for example NOPBC tells PLUMED to not use the periodic -bounadry conditions when calculating a particular colvar) or they can be words followed by an equals sign and a comma separated +boundary conditions when calculating a particular colvar) or they can be words followed by an equals sign and a comma separated list _with no spaces_ of numbers or characters (so for example ATOMS=1,2,3,4 tells PLUMED to use atom numbers 1,2,3 and 4 in the calculation of a particular colvar). The reason why spaces are not admitted is that PLUMED should be able to understand when the list of atoms ended and a new keyword should be expected. -Space separated lists can be used instead of commma separated list if the entire list +Space separated lists can be used instead of comma separated list if the entire list is enclosed in curly braces (e.g. ATOMS={1 2 3 4}). Please note that you can split commands over multiple lines by using \ref ContinuationLines. @@ -57,7 +57,7 @@ You can also add \ref comments to the input or set up your input over multiple f \ref includes. More information on the input syntax as well as details on the the various trajectory -analisys tools that come with PLUMED are given in: +analysis tools that come with PLUMED are given in: - \ref colvarintro tells you about the ways that you can calculate functions of the positions of the atoms. - \ref Analysis tells you about the various forms of analysis you can run on trajectories using PLUMED. diff --git a/user-doc/Group.md b/user-doc/Group.md index 46c470cb2..a7ecc2a9a 100644 --- a/user-doc/Group.md +++ b/user-doc/Group.md @@ -18,7 +18,7 @@ In addition, there are a few shortcuts that can be used: - `@mdatoms` indicate all the physical atoms present in the MD engine (e.g. `DUMPATOMS ATOMS=@mdatoms`). - `@allatoms` indicates all atoms, including \ref vatoms "those defined only in PLUMED" (e.g. `DUMPATOMS ATOMS=@allatoms`). -The list of the virtual atoms defined in PLUMED can be obtained dy difference with `GROUP ATOMS=@allatoms REMOVE=@mdatoms`. +The list of the virtual atoms defined in PLUMED can be obtained by using the command `GROUP ATOMS=@allatoms REMOVE=@mdatoms`. Other shortcuts are available if you loaded the structure of the molecule using the \ref MOLINFO command. @@ -28,7 +28,7 @@ DUMPATOMS ATOMS=1,2,10-20,40-60:5,100-70:-2 LABEL=g5 FILE=test.xyz \endplumedfile Some collective variable must accept a fixed number of atoms, for example a \ref DISTANCE is calculated -using two atoms only, an \ref ANGLE is calcuated using either 3 or 4 atoms and \ref TORSION is calculated using 4 atoms. +using two atoms only, an \ref ANGLE is calculated using either 3 or 4 atoms and \ref TORSION is calculated using 4 atoms. Additional material and examples can be also found in the tutorial \ref belfast-1. @@ -45,7 +45,7 @@ in the same manner as they would be treated in the host code. In some codes thi you are using involve some property of a molecule. These codes allow the atoms in the molecules to become separated by periodic boundaries, a fact which PLUMED could only deal with were the topology passed from the MD code to PLUMED. Making this work would involve a lot laborious programming and goes against our original aim of having a general patch that can be implemented -in a wide variety of MD codes. Consequentially, we have implemented a more pragmatic solution to this probem - the user specifies +in a wide variety of MD codes. Consequentially, we have implemented a more pragmatic solution to this problem - the user specifies in input any molecules (or parts of molecules) that must be kept in tact throughout the simulation run. In PLUMED 1 this was done using the ALIGN_ATOMS keyword. In PLUMED 2 the same effect can be achieved using the \subpage WHOLEMOLECULES command. diff --git a/user-doc/Installation.md b/user-doc/Installation.md index 9abb89223..8497f60bc 100644 --- a/user-doc/Installation.md +++ b/user-doc/Installation.md @@ -52,14 +52,14 @@ Some compilers do not declare full support, but implement anyway a number of C++ sufficient to compile PLUMED (this is the case for instance of intel 15 compiler). In case you see a warning about C++11 support during `./configure` please make sure that PLUMED compiles correctly and, if possible, execute the regtests -(using `make regtest`). Notice that we regularily test a number of compilers on travis-ci, +(using `make regtest`). Notice that we regularly test a number of compilers on travis-ci, and at least those compilers are guaranteed to be able to compile PLUMED correctly. \section ConfiguringPlumed Configuring PLUMED The `./configure` command just generates a Makefile.conf file and a sourceme.sh file. -In PLUMED 2.0 these files were pre-prepared and stored in the +In PLUMED 2.0 these files were prepared and stored in the directory configurations/. The new ones generated by ./configure are similar to the old ones but are not completely compatible. In particular, some of the -D options have been changed in version 2.2, @@ -85,9 +85,9 @@ avoid looking for a library using the "disable" syntax, e.g. > ./configure --disable-mpi --disable-xdrfile \endverbatim -Notice that when mpi search is enabled (by default) compilers +Notice that when MPI search is enabled (by default) compilers such as "mpic++" and "mpicxx" are searched for first. On the other hand, -if mpi search is disabled ("./configure --disable-mpi") non-mpi +if MPI search is disabled ("./configure --disable-mpi") non-mpi compilers are searched for. Notice that only a few of the possible compiler name are searched. Thus, compilers such as "g++-mp-4.8" should be explicitly requested with the CXX option. @@ -97,8 +97,8 @@ variables CXX and CC. E.g., to use Intel compilers use the following command: \verbatim > ./configure CXX=icpc CC=icc \endverbatim -Notice that we are using icpc in this example, which is not an mpi compiler as a -result mpi will not be enabled. Also consider that this is different with respect +Notice that we are using icpc in this example, which is not an MPI compiler as a +result MPI will not be enabled. Also consider that this is different with respect to what some other configure script does in that variables such as MPICXX are completely ignored here. In case you work on a machine where CXX is set to a serial compiler and MPICXX to a MPI compiler, to @@ -169,16 +169,16 @@ Notice that in this manner only libraries that are explicitly passed using the ` \verbatim > ./configure --disable-libsearch LIBS=-lxdrfile \endverbatim -will make sure that only xdrfile is linked and, for instance, blas and lapack libraries are not. +will make sure that only xdrfile is linked and, for instance, BLAS and LAPACK libraries are not. This might be useful when installing PLUMED within package managers such as MacPorts to make sure that only desired libraries are linked and thus to avoid to introduce spurious dependencies. The only exception to this rule is `-ldl`, which is anyway a system library on Linux. -\warning On OSX it is common practice to hardcode the full path +\warning On OSX it is common practice to hard code the full path to libraries in the libraries themselves. This means that, after having linked a shared library, that specific shared library will be searched in the same -place (we do the same for the `libplumed.dylib` library, which has an install name hardcoded). -On the other hand, on Linux it is common pratice not to hardcode the full path. +place (we do the same for the `libplumed.dylib` library, which has an install name hard coded). +On the other hand, on Linux it is common practice not to hard code the full path. This means that if you use the `LDFLAGS` option to specify the path to the libraries you want to link to PLUMED (e.g. `./configure LDFLAGS="-L/path"`) these libraries might not be found later. @@ -186,12 +186,12 @@ The visible symptom is that `src/lib/plumed-shared` will not be linked correctly Although the file 'src/lib/plumed-shared' is not necessary, being able to produce it means that it will be possible to link PLUMED dynamically with MD codes later. -The easiest solution is to hardcode the library search path in this way: +The easiest solution is to hard code the library search path in this way: \verbatim > ./configure LDFLAGS="-L/path -Wl,-rpath,/path" \endverbatim Notice that as of PLUMED v2.4 it is possible to use the configure option `--enable-rpath` -to automatically hardcode the path defined in `LIBRARY_PATH`: +to automatically hard code the path defined in `LIBRARY_PATH`: \verbatim > ./configure LIBRARY_PATH=/path --enable-rpath \endverbatim @@ -201,17 +201,17 @@ In a typical environment configured using module framework (http://modules.sourc `LIBRARY_PATH` will be a variable containing the path to all the modules loaded at compilation time. -PLUMED needs blas and lapack. These are treated slighty different from +PLUMED needs BLAS and LAPACK. These are treated slightly different from other libraries. The search is done in the usual way (i.e., first look for them without any link flag, then add "-lblas" and "-llapack", respectively). -As such if you want to use a specific version of blas or lapack +As such if you want to use a specific version of BLAS or LAPACK you can make them available to configure by using \verbatim > ./configure LDFLAGS=-L/path/to/blas/lib LIBS=-lnameoflib \endverbatim If the functions of these libraries are not found, the compiler looks for a version with a final underscore added. -Finally, since blas and lapack are compulsory in PLUMED, +Finally, since BLAS and LAPACK are compulsory in PLUMED, you can use a internal version of these libraries that comes as part of PLUMED. If all else fails the internal version of BLAS and LAPACK are the ones that will be used by PLUMED. @@ -220,14 +220,14 @@ If you wish to disable any search for external libraries \verbatim > ./configure --disable-external-blas \endverbatim -Notice that you can also disable external lapack only, that is use internal lapack with external blas +Notice that you can also disable external LAPACK only, that is use internal LAPACK with external BLAS using \verbatim > ./configure --disable-external-lapack \endverbatim -Since typically it is the blas library that can be heavily optimized, this configuration +Since typically it is the BLAS library that can be heavily optimized, this configuration should not provide significant slowing down and could be used on systems where -native lapack libraries have problems. +native LAPACK libraries have problems. As a final resort, you can also edit the resulting Makefile.conf file. @@ -244,7 +244,7 @@ files compatible with PLUMED 2.0. - CPPFLAGS : add here definition needed to enable specific optional functions; e.g. use -D__PLUMED_HAS_XDRFILE to enable the xdrfile library - SOEXT : this gives the extension for shared libraries in your system, typically -"so" on unix, "dylib" on mac; If your system does not support dynamic libraries or, for some other reason, you would like only static executables you can +"so" on UNIX, "dylib" on mac; If your system does not support dynamic libraries or, for some other reason, you would like a static executable you can just set this variable to a blank ("SOEXT="). \subsection BlasAndLapack BLAS and LAPACK @@ -253,7 +253,7 @@ We tried to keep PLUMED as independent as possible from external libraries and a that require external libraries (e.g. Matheval) are optional. However, to have a properly working version of plumed PLUMED you need BLAS and LAPACK libraries. We would strongly recommend you download these libraries and install them separately so as to have the most efficient possible implementations of the functions contained within -them. However, if you cannot install blas and lapack, you can use the internal ones. +them. However, if you cannot install BLAS and LAPACK, you can use the internal ones. Since version 2.1, PLUMED uses a configure script to detect libraries. In case system LAPACK or BLAS are not found on your system, PLUMED will use the internal replacement. @@ -265,24 +265,24 @@ correctly. Notice first of all that the DYNAMIC_LIB variable in the Makefile.conf should contain the flag necessary to load the BLAS and LAPACK libraries. Typically this will be -llapack -lblas, in some case followed by -lgfortran. Full path specification with -L may be necessary -and on some machines the blas and lapack libraries may not be called -llapack and -lblas. +and on some machines the BLAS and LAPACK libraries may not be called -llapack and -lblas. Everything will depend on your system configuration. Some simple to fix further problems include: -- If the linker complains and suggests recompiling lapack with -fPIC, it means that you have static lapack libraries. Either install dynamic lapack libraries -or switch to static compilation of PLUMED by unsetting the SOEXT variable +- If the linker complains and suggests recompiling LAPACK with -fPIC, it means that you have static LAPACK libraries. Either install dynamic LAPACK libraries +or switch to static compilation of PLUMED by stopping to set the SOEXT variable in the configuration file. - If the linker complains about other missing functions (typically starting with "for_" prefix) then you should also link some Fortran libraries. PLUMED is written in C++ and often C++ linkers do not include Fortran libraries by default. - These libraries are required for lapack and blas to work. Please check the documentation of your compiler. + These libraries are required for LAPACK and BLAS to work. Please check the documentation of your compiler. - If the linker complains that dsyevr_ cannot be found, try adding -DF77_NO_UNDERSCORE to CPPFLAGS Notice that "./configure" should automatically try this solution. \subsection installation-vmdplugins VMD trajectory plugins -If you configure PLUMED with VMD's plugins you will be able to read +If you configure PLUMED with the VMD plugins you will be able to read many more trajectory formats. To this aim, you need to download the SOURCE of VMD, which contains a plugins directory. Adapt build.sh and compile it. At @@ -383,7 +383,7 @@ Notice that the compiled executable, which now sits in 'src/lib/plumed', relies on other resource files present in the compilation directory. This directory should thus stay in the correct place. One should thus not rename or delete it. In fact the path to the PLUMED root directory is -hardcoded in the plumed executable as can be verified using +hard coded in the plumed executable as can be verified using \verbatim > plumed info --root \endverbatim @@ -431,7 +431,7 @@ only be relinked if one changes the install prefix during when typing `make inst If root user does not have access to compilers, "sudo -E make install" might solve the issue. -Upon install, executables are copied to $prefix/bin, libraries to $prefix/lib, +Upon install, the executable is copied to $prefix/bin, libraries to $prefix/lib, include files to $prefix/include, and documentation to $prefix/shared/doc/plumed. Additionally, a directory $prefix/lib/plumed is created containing several other files, including @@ -444,7 +444,7 @@ the module framework (http://modules.sourceforge.net). An ad hoc generated module file for PLUMED can be found in $prefix/lib/plumed/src/lib/modulefile Just edit it as you wish and put it in your modulefile directory. This will also allow you to install multiple PLUMED versions on your machine and to -switch amongst them. If you do not want to use modules, you can +switch among them. If you do not want to use modules, you can still have a look at the modulefile we did so as to know which environment variables should be set for PLUMED to work correctly. @@ -487,12 +487,12 @@ In the section \subpage CodeSpecificNotes you can find information specific for To patch your MD code, you should have already installed PLUMED properly. This is necessary as you need to have the command "plumed" in your execution -path. As described above this executible will be in your paths if plumed was +path. As described above this executable will be in your paths if plumed was installed or if you have run sourceme.sh Once you have a compiled and working version of plumed, follow these steps to add it to an MD code -- Configure and compile your MD enginge (look for the instructions in its documentation). +- Configure and compile your MD engine (look for the instructions in its documentation). - Test if the MD code is working properly. - Go to the root directory for the source code of the MD engine. - Patch with PLUMED using: @@ -586,7 +586,7 @@ due to impossibility to use the system c++ library. For this reason, only clang Variants can be also used to compile with debug flags (`+debug`), to pick a linear algebra library (e.g. `+openblas`) and to enable all optional modules (`+allmodules`). -Notice that the default variant installed with `sudo port install plumed` is shipped as a precompiled +Notice that the default variant installed with `sudo port install plumed` is shipped as a compiled binary, which is significantly faster to install. In addition, we provide a developer version (typically: a later version not yet considered as stable) @@ -615,7 +615,7 @@ true, compilation will fail. Also notice that gromacs is patched with plumed in runtime mode but that the path of libplumedKernel.dylib in the MacPorts tree -is hardcoded. As a consequence: +is hard coded. As a consequence: - If gromacs is run with `PLUMED_KERNEL` environment variable unset (or set to empty), then the MacPorts plumed is used. @@ -634,7 +634,7 @@ consider the following suggestions. First of all, we highly recommend using the module file that PLUMED provides to set up the environment. Just edit it as necessary to make it suitable for your environment. -Notice that PLUMED can take advantage of many additionaly features if specific libraries are available upon +Notice that PLUMED can take advantage of many additional features if specific libraries are available upon compiling it. If someone uses gromacs, install libxdrfile first and check if PLUMED `./configure` is detecting it. PLUMED will be able to write trr/xtc file, simplifying analysis. diff --git a/user-doc/Introduction.md b/user-doc/Introduction.md index a64705261..2d26e817e 100644 --- a/user-doc/Introduction.md +++ b/user-doc/Introduction.md @@ -1,7 +1,7 @@ \mainpage Introduction PLUMED is a plugin that works with a large number of molecular dynamics codes (\ref codes ). -It can be used to analyse features of the dynamics on-the-fly or to perform a wide variety of free energy methods. +It can be used to analyze features of the dynamics on-the-fly or to perform a wide variety of free energy methods. PLUMED can also work as a \ref tools to perform analysis on trajectories saved in most of the existing formats. If PLUMED is useful for your work please read and cite \cite plumed2, if you are interested in the PLUMED 1 original publication please read and cite \cite plumed1 . @@ -23,13 +23,13 @@ This is the user manual - if you want to modify PLUMED or to understand how it \section codes Codes interfaced with PLUMED -PLUMED can be incorporated into an MD code and used to analyse or bias a molecular dynamics run on the fly. +PLUMED can be incorporated into an MD code and used to analyze or bias a molecular dynamics run on the fly. Some MD code could already include calls to the PLUMED library and be PLUMED-ready in its original distribution. As far as we know, the following MD codes can be used with PLUMED out of the box: - [AmberTools](http://ambermd.org/), sander module, since version 15. - [CP2K](http://www.cp2k.org), since Feb 2015. -- [ESPResSo](http://espressomd.org), in a Plumedized version that can be found +- [ESPResSo](http://espressomd.org), in a version that has been patched with PLUMED can be found [here](http://davidebr.github.io/espresso/). - [PINY-MD](http://github.com/TuckermanGroup/PINY), in its plumed branch. - [IPHIGENIE](http://sourceforge.net/projects/iphigenie/). @@ -49,7 +49,7 @@ Additionally, we provide patching procedures for the following codes: @CODESL@ Alternatively, one -can use PLUMED as a \ref tools for postprocessing the results from molecular dynamics +can use PLUMED as a \ref tools for post processing the results from molecular dynamics or enhanced sampling calculations. Notice that PLUMED can be used as an analysis tool also from the following packages: - [PLUMED-GUI](http://github.com/tonigi/vmd_plumed) is a [VMD](http://www.ks.uiuc.edu/Research/vmd/) plugin that computes PLUMED collective variables. diff --git a/user-doc/Miscelaneous.md b/user-doc/Miscelaneous.md index 3742fca0a..80684c757 100644 --- a/user-doc/Miscelaneous.md +++ b/user-doc/Miscelaneous.md @@ -16,7 +16,7 @@ \page comments Comments -If you are an organised sort of person who likes to remember what the hell you were trying to do when you ran a +If you are an organized sort of person who likes to remember what the hell you were trying to do when you ran a particular simulation you might find it useful to put comments in your input file. In PLUMED you can do this as comments can be added using a # sign. On any given line everything after the # sign is ignored so erm... yes add lines of comments or trailing comments to your hearts content as shown below (using Shakespeare is optional): @@ -161,7 +161,7 @@ be able to benefit of these features without ever changing your bash configurati In case you have multiple versions of PLUMED installed in separate env modules there is nothing more to do. However, if you have have multiple versions of PLUMED installed with different suffixes you should -consistently add more lines to your profile file. For instance, if you installed executables named +consistently add more lines to your profile file. For instance, if you installed two executables named `plumed` and `plumed_mpi` your configuration file should look like: \verbatim _plumed() { eval "$(plumed --no-mpi completion 2>/dev/null)";} @@ -172,7 +172,7 @@ complete -F _plumed_mpi -o default plumed_mpi \page VimSyntax Using VIM syntax file -For the impatients: +For the impatient use: - Add the following to your .vimrc file: \verbatim " Enable syntax @@ -192,7 +192,7 @@ For the impatients: :set ft=plumed \endverbatim This will also enable autocompletion. Use `<CTRL-X><CTRL-O>` to autocomplete a word. -- If you want to fold multiline statements, type +- If you want to fold multi-line statements, type \verbatim :setlocal foldmethod=syntax \endverbatim @@ -201,7 +201,7 @@ This will also enable autocompletion. Use `<CTRL-X><CTRL-O>` to autocomplete a w Typing `:PHelp` again (or pushing `<F2>`) you will close that window. With `<CTRL-W><CTRL-W>` you go back and forth between the two windows. - When you open a file starting with `#! FIELDS`, VIM will automatically understand it - is a PLUMED outpt file (VIM filetype = plumedf) and will color fields and data columns with + is a PLUMED output file (VIM filetype = plumedf) and will color fields and data columns with alternating colors. Typing `:PPlus` and `:PMinus` (or pushing `<F3>` and `<F4>`) you can move a highlighted column. @@ -233,7 +233,7 @@ we recommend the following procedure: :let &runtimepath.=','.$PLUMED_VIMPATH \endverbatim -The modulefile provided with PLUMED should set the PLUMED_VIMPATH environemnt variable +The modulefile provided with PLUMED should set the PLUMED_VIMPATH environment variable to the proper path. Thus, when working with a given PLUMED module loaded, you should be able to enable to proper syntax by just typing @@ -250,7 +250,7 @@ the following command would give you the optimal flexibility: :let &runtimepath.=','.$PLUMED_VIMPATH.',/opt/local/lib/plumed/vim/' \endverbatim The environment variable `PLUMED_VIMPATH`, if set, will take the precedence. -Otherwise, vim will resort to the hardcoded path. +Otherwise, vim will resort to the hard coded path. In this case we assumed that there is a PLUMED installed in `/opt/local/` (e.g. using MacPorts), but you can override it sourcing a `sourceme.sh` file in the compilation directory or loading a PLUMED module with `module load plumed`. @@ -274,7 +274,7 @@ Now, every time you open this file, you will see it highlighted. The syntax file contains a definition of all possible PLUMED actions and keywords. It is designed to allow for a quick validation of the PLUMED input file before running it. As such, all the meaningful words in the input should be highlighted: -- Valid action names (such as `METAD`) and labels (such as `metad:` or `LABEL=metad`) will be +- Valid action names (such as `METAD`) and labels (such as `m:` or `LABEL=m`) will be highlighted in the brightest way (`Type` in VIM). Those are the most important words. - Keyword and flag names (such as `ATOMS=` or `COMPONENTS` when part of the action \ref DISTANCE) will be highlighted with a different color (`Statement` in VIM). @@ -432,7 +432,7 @@ It is also possible to highlight a specific field of the file. Typing :5PCol \endverbatim you will highlight the fifth field. Notice that in the `FIELDS` line (the first line of the file) -the 7th word of the line will be highlighted, which is the one containing the name of the field. +the seventh word of the line will be highlighted, which is the one containing the name of the field. This allows for easy matching of values shown in the file and tags provided in the `FIELDS` line. The highlighted column can be moved back and forth using `:PPlus` and `:PMinus`. @@ -479,7 +479,7 @@ plus a "toBeIncluded.dat" file RESTRAINT ARG=dist \endplumedfile -However, when you do this it is important to recognise that \ref INCLUDE is a real directive that is only resolved +However, when you do this it is important to recognize that \ref INCLUDE is a real directive that is only resolved after all the \ref comments have been stripped and the \ref ContinuationLines have been unrolled. This means it is not possible to do things like: @@ -493,8 +493,8 @@ RESTRAINT ARG=dist You can introduce new functionality into PLUMED by placing it directly into the src directory and recompiling the PLUMED libraries. Alternatively, if you want to keep your code independent from the rest of PLUMED (perhaps -so you can release it independely - we won't be offended), then you can create your own dynamic library. To use this -in conjuction with PLUMED you can then load it at runtime by using the \subpage LOAD keyword as shown below: +so you can release it independently - we won't be offended), then you can create your own dynamic library. To use this +in conjunction with PLUMED you can then load it at runtime by using the \subpage LOAD keyword as shown below: \plumedfile LOAD FILE=library.so @@ -514,7 +514,7 @@ very intensive development of the code of if you are running on a computer with \page exchange-patterns Changing exchange patterns in replica exchange -Using the \subpage RANDOM_EXCHANGES keyword it is possible to make exchanges betweem randomly +Using the \subpage RANDOM_EXCHANGES keyword it is possible to make exchanges between randomly chosen replicas. This is useful e.g. for bias exchange metadynamics \cite piana. \page special-replica-syntax Special replica syntax @@ -620,7 +620,7 @@ RESTRAINT ... ... \endplumedfile -In short, whenever there are keywords that should vary across replicas, you should set them usign the `@replicas:` keyword. +In short, whenever there are keywords that should vary across replicas, you should set them using the `@replicas:` keyword. As mentioned above, you can always use the old syntax with separate input file, and this is recommended when the number of keywords that are different is large. @@ -660,7 +660,7 @@ yet to integer numbers (e.g.: the PACE argument of \ref METAD). </TR> <TR> <TD WIDTH="5%"> -\subpage Files </TD><TD> </TD><TD> Dealing with Input/Outpt +\subpage Files </TD><TD> </TD><TD> Dealing with Input/Output </TD> </TR> </TABLE> diff --git a/user-doc/Modules.md b/user-doc/Modules.md index 83f7d4751..65565657e 100644 --- a/user-doc/Modules.md +++ b/user-doc/Modules.md @@ -2,7 +2,7 @@ The functionality in PLUMED 2 is divided into a small number of modules. Some users may only wish to use a subset of the functionality available within the -code while others may wish to use some of PLUMED's more complicated features. +code while others may wish to use some of the more complicated features that are available. For this reason the plumed source code is divided into modules, which users can activate or deactivate to their hearts content. @@ -35,8 +35,8 @@ If you repeat the `--enable-modules` keyword only the last instance will be used There are also some shortcuts available: - `./configure --enable-modules=all` to enable all optional modules. This includes the maximum number of features in PLUMED, including modules that might not be properly functional. -- `./configure --enable-modules=none` or `./configure --disable-modules` to disable all optional modules. This produces a minimalistic -PLUMED which can be used as a library but has no command line tools and no collective variables or biasing methods. +- `./configure --enable-modules=none` or `./configure --disable-modules` to disable all optional modules. This produces a minimal +PLUMED which can be used as a library but which has no command line tools and no collective variables or biasing methods. - `./configure --enable-modules=reset` or `./configure --enable-modules` to enable the default modules. The two kinds of syntax can be combined and, for example, `./configure --enable-modules=none:colvar` will result diff --git a/user-doc/Performances.md b/user-doc/Performances.md index c45a7350b..a963d9c62 100644 --- a/user-doc/Performances.md +++ b/user-doc/Performances.md @@ -7,14 +7,14 @@ variables, bias, on-the-fly analysis, etc in a way that is compatible with a num different molecular dynamics codes. This means that there cannot be a single strategy to speed up all the possible calculations. -PLUMED makes use of MPI and OpenMP to parallelise some of its functions, try to always -compile it with these features enabled. Furthermore, newer compilers with proper optimisation -flags can provide a drammatic boost to performances. +PLUMED makes use of MPI and OpenMP to parallelize some of its functions, try to always +compile it with these features enabled. Furthermore, newer compilers with proper optimization +flags can provide a dramatic boost to performances. -PLUMED collects atoms from an external code and sends back forces, so it is key to minimise +PLUMED collects atoms from an external code and sends back forces, so it is key to minimize the effect of PLUMED on highly parallel calculations to keep to the minimum the number of atoms used by PLUMED at every calculation step. The less is the number of atoms you need to send -to PLUMED the less will be the overhead in the comunication between PLUMED and the code. +to PLUMED the less will be the overhead in the communication between PLUMED and the code. In the following you can find specific strategies for specific calculations, these could help in taking the most by using PLUMED for your simulations. @@ -35,13 +35,13 @@ Since version 4.6.x GROMACS can run in an hybrid mode making use of both your CPU and your GPU (either using CUDA or OpenCL for newer versions of GROMACS). The calculation of the short-range non-bonded interactions is performed on the GPU while long-range and bonded interactions are at the -same time calculated on the CPU. By varing the cut-off for short-range -interactions GROMACS can optimise the balance between GPU/CPU loading +same time calculated on the CPU. By varying the cut-off for short-range +interactions GROMACS can optimize the balance between GPU/CPU loading and obtain amazing performances. GROMACS patched with PLUMED takes into account PLUMED in its load-balancing, adding the PLUMED timings to the one resulting from bonded interactions and long- -range interactions. This means that the CPU/GPU balance will be optimised +range interactions. This means that the CPU/GPU balance will be optimized automatically to take into account PLUMED! It is important to notice that the optimal setup to use GROMACS alone @@ -143,20 +143,20 @@ Whenever you have a multicolvar action such as: COORDINATIONNUMBER SPECIES=1-100 SWITCH={RATIONAL R_0=1. D_MAX=3.0} MORE_THAN={RATIONAL R_0=6.0 NN=6 MM=12 D_0=0} \endplumedfile -You will get a collosal speedup by specifying the D_MAX keyword in all switching functions that act on distances. +You will get a colossal speedup by specifying the D_MAX keyword in all switching functions that act on distances. D_MAX tells PLUMED that the switching function is strictly zero if the distance is greater than this value. As a result -PLUMED knows that it does not need to calculate these zero terms in what are essentially sums with a very lage number of terms. +PLUMED knows that it does not need to calculate these zero terms in what are essentially sums with a very large number of terms. In fact when D_MAX is set PLUMED uses linked lists when calculating these coordination numbers, which is what gives you such a dramatic increase in performance. -\page Neighbour Neighbour Lists +\page Neighbour Neighbor Lists -Collective variables that can be speed up making us of neighbour lists: +Collective variables that can be speed up making us of neighbor lists: - \ref COORDINATION - \ref DHENERGY - \ref PATHMSD -By tuning the cut-off for the neighbour list and the frequency for the recalculation of the list it is +By tuning the cut-off for the neighbor list and the frequency for the recalculation of the list it is possible to balance between accuracy and performances. Notice that for \ref COORDINATION and \ref DHENERGY using a neighbor list could imply that a smaller @@ -165,7 +165,7 @@ number of atoms are requested to the host MD engine. This is typically true when When the neighbor list is used, only the water atoms close to the ligand will be requested at each step. \warning -Notice that the calculation of the neighbour list is not not parallelized for \ref COORDINATION and \ref DHENERGY. +Notice that the calculation of the neighbor list is not not parallelized for \ref COORDINATION and \ref DHENERGY. As a consequence, if you run with many processors and/or OpenMP threads, the neighbor list might even make the calculation slower. @@ -192,20 +192,20 @@ or as well mdrun -plumed -ntomp 8 \endverbatim -In the first case the number of OpenMP threads used by plumed is 8 while the one used by gromacs can be 1 or something else, this is usually suboptimal. +In the first case the number of OpenMP threads used by plumed is 8 while the one used by gromacs can be 1 or something else, this is usually sub optimal. In the second case GROMACS and plumed will use the same number of OpenMP threads. Notice that: - This option is likely to improve the performance, but could also slow down the code in some case. -- Results could be slightly different because of numerical roundoff and +- Results could be slightly different because of numerical round off and different order in summations. This should be harmless. - The optimum number of threads is not necessary "all of them", nor should be equal to the number of threads used to parallelize MD. -- Only a few CVs are parallelized with opemMP (currently, \ref COORDINATION and +- Only a few CVs are parallelized with openMP (currently, \ref COORDINATION and \ref DHENERGY). - You might want to tune also the environmental variable PLUMED_CACHELINE_SIZE, - by default 512, to set the size of cachelines on your machine. This is used + by default 512, to set the size of cache lines on your machine. This is used by PLUMED to decrease the number of threads to be used in each loop so as to avoid clashes in memory access. This variable is expected to affect performance only, not results. @@ -214,8 +214,8 @@ Notice that: \page Secondary Secondary Structure Secondary Structure collective variables (\ref ALPHARMSD, \ref PARABETARMSD and \ref ANTIBETARMSD) -can be particulary demanding if you want to calculate them for all the residues of a protein. -This is particularty true for the calculation of beta structures. +can be particularly demanding if you want to calculate them for all the residues of a protein. +This is particularly true for the calculation of beta structures. The FIRST thing to speed up \ref PARABETARMSD and \ref ANTIBETARMSD is to use the keyword STRANDS_CUTOFF (i.e. STRANDS_CUTOFF=1), in this way only a subset of possible fragments, the one @@ -292,7 +292,7 @@ Notice the usage of `x2` as a variable for the switching function (see \ref swit Once you have prepared your plumed input file you can run a test simulation, or use driver, to see which collective variable, function, bias or analysis is consuming more time and can thus be the target for a different definition (use less atoms, change relevant parameters, -or just use somenthing else) +or just use something else) To have an accurate timing of your input you can use the \ref DEBUG DETAILED_TIMERS. diff --git a/user-doc/Regex.md b/user-doc/Regex.md index 29c610abb..34b596b15 100644 --- a/user-doc/Regex.md +++ b/user-doc/Regex.md @@ -22,7 +22,7 @@ will cause both the d1.x and d1.y components of the DISTANCE action to be printe Notice that selection does not happen in alphabetic order, nor in the order in which `[xy]` are listed, but rather in the order in which the two variables have been created by PLUMED. Also notice that the -`.` character must be escaped as `\.` in order to interpret it as a literal `.`. An unescaped dot is a wildcard which is matched by any character, +`.` character must be escaped as `\.` in order to interpret it as a literal `.`. An un-escaped dot is a wildcard which is matched by any character, So as an example \plumedfile d1: DISTANCE ATOMS=1,2 COMPONENTS @@ -63,7 +63,7 @@ PRINT ARG=(d1\.[xy]|t[0-9]) STRIDE=100 FILE=colvar FMT=%8.4f this selects the same set of arguments as the previous example. \note -Be careful you do not confuse regular expressions, which are triggered by the parethesis `()` and only available when +Be careful you do not confuse regular expressions, which are triggered by the parenthesis `()` and only available when PLUMED has been compiled with the regex library, with the capability of PLUMED to use `*` as a wildcard in arguments: \plumedfile d1: DISTANCE ATOMS=1,2 COMPONENTS diff --git a/user-doc/VES.md b/user-doc/VES.md index 76fd7c1af..1156422c8 100644 --- a/user-doc/VES.md +++ b/user-doc/VES.md @@ -11,7 +11,7 @@ based on _Variationally Enhanced Sampling_ (VES) \cite Valsson-PRL-2014. The VES code is developed by [Omar Valsson](http://www.valsson.info), see the [homepage of the VES code](http://www.ves-code.org) for further information. -The VES code is an optional module that needes to be enabled when configuring the +The VES code is an optional module that needs to be enabled when configuring the compilation of PLUMED by using the '\-\-enable-modules=ves' (or '\-\-enable-modules=all') flag when running the 'configure' script. @@ -95,15 +95,15 @@ Lugano, February 14-17, 2017. Metadynamics: -[Enhancing Important Fluctuations: Rare Events and Metadynamics from a Conceptual Viewpoint](https://doi.org/10.1146/annurev-physchem-040215-112229), Annu. Rev. Phys. Chem. 2016 +[Enhancing Important Fluctuations: Rare Events and Metadynamics from a Conceptual Viewpoint](https://doi.org/10.1146/annurev-physchem-040215-112229), Annual Reviews in Physical Chemistry 2016 Variationally Enhanced Sampling: -[Variational Approach to Enhanced Sampling and Free Energy Calculations](https://doi.org/10.1103/PhysRevLett.113.090601), Phys. Rev. Lett. 2014 +[Variational Approach to Enhanced Sampling and Free Energy Calculations](https://doi.org/10.1103/PhysRevLett.113.090601), Physical Review Letters 2014 -[Variationally Optimized Free-Energy Flooding for Rate Calculation](https://doi.org/10.1103/PhysRevLett.115.070601), Phys. Rev. Lett. 2015 +[Variationally Optimized Free-Energy Flooding for Rate Calculation](https://doi.org/10.1103/PhysRevLett.115.070601), Physical Review Letters 2015 diff --git a/user-doc/bibliography.bib b/user-doc/bibliography.bib index 15561746e..16689de15 100644 --- a/user-doc/bibliography.bib +++ b/user-doc/bibliography.bib @@ -2808,5 +2808,16 @@ year = {2013}, doi = {10.1063/1.4818005} } +@article{dimer-metad, +author = {Marco Nava and Ferruccio Palazzesi and Claudio Perego and Michele Parrinello }, +title = {Dimer Metadynamics}, +journal = {Journal of Chemical Theory and Computation}, +volume = {13}, +number = {2}, +pages = {425-430}, +year = {2017}, +doi = {10.1021/acs.jctc.6b00691} +} + @Comment{jabref-meta: databaseType:bibtex;} diff --git a/user-doc/tutorials/others/isdb-1.txt b/user-doc/tutorials/others/isdb-1.txt index 7ffe98eba..64f951afc 100644 --- a/user-doc/tutorials/others/isdb-1.txt +++ b/user-doc/tutorials/others/isdb-1.txt @@ -1,9 +1,9 @@ /** -\page isdb-1 ISDB: setting up a Metadynamic Metainference simulation +\page isdb-1 ISDB: setting up a Metadynamics Metainference simulation \section isdb-1-aims Aims -The aim of this tutorial is to introduce the users to the ISDB module and in particular to Metadynamic Metainfenrence \cite Bonomi:2016ip \cite Bonomi:2016ge ensemble determination. +The aim of this tutorial is to introduce the users to the ISDB module and in particular to Metadynamics Metainfenrence \cite Bonomi:2016ip \cite Bonomi:2016ge ensemble determination. We will reproduce the setup of the simulation for a simple system \cite Lohr:2017gc . For a general overview of the problem of ensembles determination please read \cite Bonomi:2017dn . \section isdb-1-objectives Objectives @@ -58,7 +58,7 @@ This can be run using gromacs (unfortunately recent versions of gromacs do not s gmx_mpi mdrun -s run.tpr -table table.xvg -tablep table.xvg -plumed plumed-eef1.dat -v \endverbatim -In order to have a converged sampling for this reference ensemble calculation it is usefull to setup a Metadynamics calculation. In particular we will use \ref PBMETAD because it is then a natural choice for Metadynamic Metainference later. The following input file is meant to be appended to the former. +In order to have a converged sampling for this reference ensemble calculation it is usefull to setup a Metadynamics calculation. In particular we will use \ref PBMETAD because it is then a natural choice for Metadynamics Metainference later. The following input file is meant to be appended to the former. \plumedfile # CVs, Psi9, Phi1 are not defined @@ -116,7 +116,7 @@ In this case we are running a multiple-replica simulation where the sampling is mpiexec -np 14 gmx_mpi mdrun -s topolnew -multi 14 -plumed plumed-eef1-pbmetad.dat -table table.xvg -tablep table.xvg >& log.out & \endverbatim -\section isdb-1-m_m Metadynamic Metainference +\section isdb-1-m_m Metadynamics Metainference The former simulations should provide a converged (check for this) ensemble for the peptide. As shown in \cite Lohr:2017gc the agreement with the multiple avaible NMR experimental data is not perfect. In order to generate an ensemble compatible with most of the available experimetnal data it is possible to include them in the simulation using \ref METAINFERENCE . To do so the forward models for the data sets should be defined in the input file. In this case we have backbone chemical shifts, \ref CS2BACKBONE ; residual dipolar couplings for two bonds, \ref RDC ; and J-couplings for multiple atoms, \ref JCOUPLING. Once the forward models are defined for the data sets, the calculated data together with the corresponding experimental values can be used to calculate the metainference score. The metainference score is additive so it can be splitted into multiple \ref METAINFERENCE entries. In this case we are using two metainference entries for the two sets of RDCs because these are compared with the experimental data modulo a constant that should be unique each data set. Then we use one metainference for all the jcouplings and another one for the chemical shifts. In this latter case we use a different noise model, i.e. NOISE=MOUTLIERS because the forward model for chemical shifts can result in systematic errors for some of them. @@ -332,6 +332,6 @@ mpiexec -np 14 gmx_mpi mdrun -s topolnew -multi 14 -plumed plumed-eef1-pbmetad-m link: @subpage isdb-1 -description: This tutorial show an example on how to use PLUMED-ISDB to run Metadynamic Metainference +description: This tutorial show an example on how to use PLUMED-ISDB to run Metadynamics Metainference additional-files: isdb-1 -- GitLab