1 """This is a helper module to make use of mcmc.py and mcmc_big.py.
2 It allows you to conveniently run a Monte-Carlo simulation of any
3 kind until it converges (L{stepper.run_to_bottom}) or until it
4 has explored a large chunk of parameter space (L{stepper.run_to_ergodic}).
5
6 It also helps you with logging the process.
7 """
8
9 import sys
10 import pypar as mpi
11 import operator
12 import random
13 from gmisclib import die
14 from gmisclib import Num
15 from gmisclib import mcmc
16 from gmisclib import mcmc_helper as MCH
17
18 from gmisclib.mcmc_helper import TooManyLoops, warnevery, test, logger_template
19
20
24
26 self.count += accepted
27
30
31
33 REDUCE = 2287
34 REDUCE2 = 228
35 q = mpi.broadcast(444, 0)
36 assert q == 444
37 mpi.send(x, root, tag=REDUCE)
38 s = -1
39 if mpi.rank() == 0:
40 for i in range(mpi.size()):
41 s += mpi.receive(i, tag=REDUCE)
42 return mpi.broadcast(s, root)
43
44
45
48 self.erg = 0.0
49 self.last_resetid = -1
50
52 """Increment an accumulator, but reset it to
53 zero if the underlying stepper has called reset().
54 The increment is by the ergodicity estimate, so
55 C{self.erg} should approximate how many times the
56 MCMC process had wandered across the probability
57 distribution.
58 """
59 tmp = x.reset_id()
60 if tmp != self.last_resetid:
61 self.erg = 0.0
62 self.last_resetid = tmp
63 else:
64 self.erg += x.ergodic()
65
66
69
70
72 - def __init__(self, x, maxloops=-1, logger=None):
77
78
80 assert maxloops == -1
81 self.maxloops = -1
82
83
103
104
105 MPIID = 1241
106
107
108
109
110
111
112
113
114
116 mytmp = nc.get() < ncw
117 self.note('_nye pre')
118 ntrue = Reduce(int(mytmp), 0)
119 self.note('_nye post')
120 return ntrue*2 >= mpi.size()
121
122
123 - def _not_at_bottom(self, xchanged, nchg, es, dotchanged, ndot, update_T):
124 mytmp = (Num.sometrue(Num.less(xchanged,nchg))
125 or es<1.0 or dotchanged<ndot
126 or (update_T and self.x.T>1.5)
127 )
128 self.note('_nab pre')
129 ntrue = Reduce(int(mytmp), 0)
130 self.note('_nab post')
131 return ntrue*4 >= mpi.size()
132
133
134 - def join(self, id):
135 self.note('pre join %s' % id)
136 rootid = mpi.broadcast(id, 0)
137 assert rootid == id
138 self.note('post join %s' % id)
139
144
145
147 """Does a parallel evaluation of logp for all items in lop.
148 """
149 nper = len(lop)//mpi.size()
150 r = mpi.rank()
151 mychunk = lop[r*nper:(r+1)*nper]
152 for p in mychunk:
153 q = p.logp()
154 print 'logp=', q, 'for rank', r
155 for r in range(mpi.size()):
156 nc = mpi.broadcast(mychunk, r)
157 lop[r*nper:(r+1)*nper] = nc
158 mpi.broadcast(0, 0)
159
160
163
166
167
169 def test_logp(x, c):
170
171 return -(x[0]-x[1]**2)**2 + 0.001*x[1]**2
172 x = mcmc.bootstepper(test_logp, Num.array([0.0,2.0]),
173 Num.array([[1.0,0],[0,2.0]]))
174 print 'rank=', mpi.rank()
175 thr = stepper(x)
176
177
178
179 for i in range(2):
180 print 'RTC'
181 thr.run_to_change(2)
182 print 'RTE'
183 thr.run_to_ergodic(1.0)
184 print 'DONE'
185 thr.close()
186
187
188 if __name__ == '__main__':
189 test_opt()
190