Skip to content

Commit

Permalink
Switch to the new generalized interface to LBFGS. Bug fix in lbfgsb.p…
Browse files Browse the repository at this point in the history
…y: eliminate duplicate calculation of function and gradients. Erradicate redundant instances of lbfgs classes in several modulas.
  • Loading branch information
pafonine committed Nov 21, 2024
1 parent a4bb792 commit b178f40
Show file tree
Hide file tree
Showing 8 changed files with 127 additions and 310 deletions.
62 changes: 19 additions & 43 deletions cctbx/maptbx/bcr/bcr.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from libtbx import adopt_init_args

from cctbx.array_family import flex
from mmtbx.ncs import tncs
import scitbx.minimizers

# Adapted by Pavel Afonine. This is a verbatim copy of the original code
# supplied by A. Urzhumtsev on September 12, 2024; dec3D, version 7.6
Expand Down Expand Up @@ -48,47 +48,21 @@ def chi(B, R, r, b_iso):

class calculator(object):

def __init__(self, npeak,dens,yc,dist,nfmes, x, mdist,edist):
def __init__(self, npeak,dens,yc,dist,nfmes, x, mdist,edist,
bound_flags, lower_bound, upper_bound):
adopt_init_args(self, locals())
self.x = flex.double(x)

def target_and_gradients(self, x):
def update(self, x):
self.x = x
target = FuncFit(self.x, self.npeak, self.dens, self.yc, self.dist,
self.mdist,self.edist, self.nfmes)
gradients = GradFit(self.x, self.npeak, self.dens, self.yc, self.dist,

def gradients(self):
return flex.double(GradFit(self.x, self.npeak, self.dens, self.yc, self.dist,
self.mdist,self.edist, self.nfmes))

def target(self):
return FuncFit(self.x, self.npeak, self.dens, self.yc, self.dist,
self.mdist,self.edist, self.nfmes)
return target, flex.double(gradients)

class minimizer_bound(object):
def __init__(self,
calculator,
use_bounds,
lower_bound,
upper_bound,
max_iterations):
adopt_init_args(self, locals())
self.x = self.calculator.x
self.n = self.x.size()
self.n_func_evaluations = 0
self.max_iterations = max_iterations

def run(self):
self.minimizer = tncs.lbfgs_run(
target_evaluator = self,
use_bounds = self.use_bounds,
lower_bound = self.lower_bound,
upper_bound = self.upper_bound,
max_iterations = self.max_iterations)
self()
return self

def __call__(self):
self.n_func_evaluations += 1
f, g = self.calculator.target_and_gradients(x = self.x)
self.f = f
self.g = g
return self.x, self.f, self.g

#============================
def get_BCR(dens,dist,dmax,mxp,epsc,epsp=0.000,edist=1.0E-13,kpres=1,kprot=3,nfmes=None):
Expand Down Expand Up @@ -233,18 +207,20 @@ def RefineBCR(dens,dist,mdist,edist,bpeak,cpeak,rpeak,npeak,bmin,cmin,rmin,nfmes
for it in range(1,3):
if it > 1:
xc = res.x
CALC = calculator(npeak,dens,yc,dist,nfmes, xc, mdist,edist)
lbound = []
ubound = []
for b in bcrbounds:
lbound.append(b[0])
ubound.append(b[1])
res = minimizer_bound(
calculator = CALC,
use_bounds = 2,

CALC = calculator(npeak,dens,yc,dist,nfmes, xc, mdist,edist,
bound_flags = flex.int(len(xc), 2),
lower_bound = lbound,
upper_bound = ubound,
max_iterations = 500).run().calculator
upper_bound = ubound)

res = scitbx.minimizers.lbfgs(
mode='lbfgsb', max_iterations=500, calculator=CALC)

res.x = list(res.x)

if 0: # SciPy analogue. Works with Python 3 only.
Expand Down
79 changes: 17 additions & 62 deletions mmtbx/bulk_solvent/mosaic.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
asu_map_ext = bp.import_ext("cctbx_asymmetric_map_ext")
from libtbx import group_args
from mmtbx import bulk_solvent
from mmtbx.ncs import tncs
from collections import OrderedDict
import mmtbx.f_model
import sys
Expand All @@ -24,6 +23,8 @@
ext = bp.import_ext("mmtbx_masks_ext")
mosaic_ext = bp.import_ext("mmtbx_mosaic_ext")

from scitbx import minimizers

APPLY_SCALE_K1_TO_FOBS = False

def moving_average(x, n):
Expand All @@ -40,29 +41,6 @@ def moving_average(x, n):

# Utilities used by algorithm 2 ------------------------------------------------

class minimizer(object):
def __init__(self, max_iterations, calculator):
adopt_init_args(self, locals())
self.x = self.calculator.x
self.cntr=0
exception_handling_params = scitbx.lbfgs.exception_handling_parameters(
ignore_line_search_failed_step_at_lower_bound=True,
)
self.minimizer = scitbx.lbfgs.run(
target_evaluator=self,
exception_handling_params=exception_handling_params,
termination_params=scitbx.lbfgs.termination_parameters(
max_iterations=max_iterations))

def compute_functional_and_gradients(self):
self.cntr+=1
self.calculator.update_target_and_grads(x=self.x)
t = self.calculator.target()
g = self.calculator.gradients()
#print "step: %4d"%self.cntr, "target:", t, "params:", \
# " ".join(["%10.6f"%i for i in self.x]), math.log(t)
return t,g

class minimizer2(object):

def __init__(self, calculator, min_iterations=0, max_iterations=2000):
Expand Down Expand Up @@ -101,13 +79,17 @@ def __call__(self, requests_f_and_g, requests_diag):
return self.x, self.f, self.g, self.d

class tg(object):
def __init__(self, x, i_obs, F, use_curvatures):
def __init__(self, x, i_obs, F, use_curvatures,
bound_flags=None, lower_bound=None, upper_bound=None):
self.x = x
self.i_obs = i_obs
self.F = F
self.t = None
self.g = None
self.d = None
self.bound_flags=bound_flags
self.lower_bound=lower_bound
self.upper_bound=upper_bound
# Needed to do sums from small to large to prefent loss
s = flex.sort_permutation(self.i_obs.data())
self.i_obs = self.i_obs.select(s)
Expand Down Expand Up @@ -1094,15 +1076,11 @@ def algorithm_2(i_obs, F, x, use_curvatures, use_lbfgsb, macro_cycles=10,
elif(use_curvatures is False): # No curvatures at all
calculator = tg(i_obs = i_obs, F=F, x = x, use_curvatures=False)
if(use_lbfgsb is True):
m = tncs.minimizer(
potential = calculator,
use_bounds = 2,
lower_bound = lower,
upper_bound = upper,
max_iterations = max_iterations,
initial_values = x).run()
m = scitbx.minimizers.lbfgs(
mode='lbfgsb', max_iterations=max_iterations, calculator=calculator)
else:
m = minimizer(max_iterations=max_iterations, calculator=calculator)
m = scitbx.minimizers.lbfgs(
mode='lbfgs', max_iterations=max_iterations, calculator=calculator)
x = m.x
x = x.set_selected(x<0,1.e-6)
elif(use_curvatures is None):
Expand All @@ -1111,39 +1089,16 @@ def algorithm_2(i_obs, F, x, use_curvatures, use_lbfgsb, macro_cycles=10,
max_iterations=max_iterations, calculator=calculator).run(use_curvatures=True)
x = m.x
x = x.set_selected(x<0,1.e-6)
calculator = tg(i_obs = i_obs, F=F, x = x, use_curvatures=False)
calculator = tg(i_obs = i_obs, F=F, x = x, use_curvatures=False,
bound_flags=flex.int(x.size(),2), lower_bound=lower, upper_bound=upper)
if(use_lbfgsb is True):
m = tncs.minimizer(
potential = calculator,
use_bounds = 2,
lower_bound = lower,
upper_bound = upper,
max_iterations = max_iterations,
initial_values = x).run()
m = scitbx.minimizers.lbfgs(
mode='lbfgsb', max_iterations=max_iterations, calculator=calculator)
else:
m = minimizer(max_iterations=max_iterations, calculator=calculator)
m = scitbx.minimizers.lbfgs(
mode='lbfgs', max_iterations=max_iterations, calculator=calculator)
x = m.x
x = x.set_selected(x<0,1.e-6)

# calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=True)
# m = minimizer2(max_iterations=100, calculator=calculator).run(use_curvatures=True)
#
# calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=False)
# m = tncs.minimizer(
# potential = calculator,
# use_bounds = 2,
# lower_bound = lower,
# upper_bound = upper,
# max_iterations = 100,
# initial_values = calculator.x).run()

#if(use_curvatures):
# for it in range(10):
# m = minimizer(max_iterations=100, calculator=calculator)
# calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures)
# m.x = m.x.set_selected(m.x<0,1.e-3)
# m = minimizer2(max_iterations=100, calculator=calculator).run(use_curvatures=True)
# calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures)
return m.x

def algorithm_3(i_obs, fc, f_masks):
Expand Down
Loading

0 comments on commit b178f40

Please sign in to comment.