Package gmisclib :: Module mcmc_idxr
[frames] | no frames]

Source Code for Module gmisclib.mcmc_idxr

  1  import math 
  2  import random 
  3  import numpy 
  4   
  5  import mcmc     # from gmisclib 
  6  import mcmc_indexclass as IC 
7 8 -class probdist_c(object):
9 - def __call__(self):
10 """Sample from the probability distribution. 11 @return: the sample 12 @rtype: C{float} 13 """ 14 raise RuntimeError, "Virtual function"
15
16 - def logdens(self, x):
17 """Return the log(density) of the probability distribution at x. 18 @param x: the position where the density should be evaluated. 19 @type x: C{float} 20 @return: C{log(density)} 21 @rtype: C{float} 22 @raise mcmc.NotGoodPosition: when the density is not defined at C{x}. 23 """ 24 raise RuntimeError, "Virtual function"
25
26 27 -class Fixed(probdist_c):
28 """This is essentially a delta-function probability distribution. 29 Use this for fixed parameters. 30 @note: Software that uses this should ignore C{logdens}. 31 """ 32
33 - def __init__(self, value):
34 self.v = value
35
36 - def __call__(self):
37 return self.v
38 39 logdens = None 40 41 @property
42 - def __doc__(self):
43 return "Fixed value %s" % str(self.v)
44
45 -class Weibull(probdist_c):
46 """Weibull distribution"""
47 - def __init__(self, scale, shape):
48 if not ( shape >= 0.0 and scale>=0.0): 49 raise ValueError, "Weibull: shape=%s; scale=%s" % (str(shape), str(scale)) 50 self.shape = shape 51 self.scale = scale
52
53 - def __call__(self):
54 return random.weibullvariate(self.scale, self.shape)
55
56 - def logdens(self, x):
57 assert x >= 0.0 58 x /= self.scale 59 return -x**self.shape + math.log(x)*(self.shape-1) + math.log(self.shape/self.scale)
60 61 @property
62 - def __doc__(self):
63 return "Weibull(%g,%s)" % (self.scale, self.shape)
64
65 66 -class Expo(probdist_c):
67 """Exponential distribution""" 68
69 - def __init__(self, lmbda):
70 """@type lmbda: float 71 @param lmbda: M{1/the_mean} (also M{1/the_standard_deviation} ) 72 @except ValueError: Bad parameters for the distribution. 73 """ 74 if not (lmbda > 0.0): 75 raise ValueError, "Expo: lmbda=%s" % str(lmbda) 76 self.lmbda = lmbda
77
78 - def __call__(self):
79 return random.expovariate(self.lmbda)
80
81 - def logdens(self, x):
82 if not (x > 0.0): 83 raise mcmc.NotGoodPosition, "Expo: x=%s" % str(x) 84 return -self.lmbda*x + math.log(self.lmbda)
85 86 @property
87 - def __doc__(self):
88 return "Exponential(%g)" % self.lmbda
89
90 91 -class Normal(probdist_c):
92 """Normal distribution""" 93
94 - def __init__(self, mu, sigma):
95 if sigma <= 0.0: 96 raise mcmc.NotGoodPosition, "Normal: sigma=%s" % str(sigma) 97 self.mu = mu 98 self.sigma = sigma
99
100 - def __call__(self):
101 return random.normalvariate(self.mu, self.sigma)
102
103 - def logdens(self, x):
104 return -(((x-self.mu)/self.sigma)**2)/2.0 - math.log(math.sqrt(2*math.pi)*self.sigma)
105 106 @property
107 - def __doc__(self):
108 return "Normal(%g+-%g)" % (self.mu, self.sigma)
109
110 111 112 -class Uniform(probdist_c):
113 """Uniform distribution 114 """
115 - def __init__(self, low, high):
116 """ 117 @except ValueError: Bad parameters for the distribution. 118 """ 119 if not (low < high): 120 raise ValueError, "Uniform: low=%s >= high=%s" % (low, high) 121 self.low = low 122 self.high = high
123
124 - def __call__(self):
125 return random.uniform(self.low, self.high)
126
127 - def logdens(self, x):
128 return 1.0/(self.high-self.low)
129 130 @property
131 - def __doc__(self):
132 return "Uniform(%g,%g)" % (self.low, self.high)
133
134 135 -class LogNormal(probdist_c):
136 """Lognormal distribution 137 """
138 - def __init__(self, mu, sigma):
139 """ 140 @param sigma: The standard deviation of the log of the resulting distribution. 141 @param mu: The median of the resulting distribution, I{not} the log of the median. 142 @type mu: float 143 @type sigma: float 144 @except ValueError: Bad parameters for the distribution. 145 """ 146 if sigma <= 0.0 or not (mu != 0.0): 147 raise ValueError, "LogNormal: sigma=%s, mu=%s" % (str(sigma), str(mu)) 148 self.mu = mu 149 self.sigma = sigma
150
151 - def __call__(self):
152 return self.mu*math.exp(random.normalvariate(0.0, self.sigma))
153
154 - def logdens(self, x):
155 if (x>0.0) != (self.mu>0.0) or not (x != 0.0): 156 raise mcmc.NotGoodPosition, "LogNormal: x=%s, mu=%s" % (str(x), str(self.mu)) 157 x = math.log(x/self.mu) 158 return -((x/self.sigma)**2)/2.0 - math.log(math.sqrt(2*math.pi)*self.sigma)
159 160 @property
161 - def __doc__(self):
162 return "Lognormal(%g+-%g)" % (self.mu, self.sigma)
163
164 165 166 167 -def logp_prior_normalized(x, guess_prob_dist):
168 """ 169 @type guess_prob_dist: list((str, probdist_c)) 170 @type x: newstem2.indexclass.index_base 171 @rtype: float 172 @return: the log of the prior probability. 173 @except L{mcmc.NotGoodPosition}: When raised by any of the underlying probability distributions. 174 """ 175 lp = 0.0 176 gpd = IC.PriorProbDist(guess_prob_dist) 177 for key in x.map.keys(): 178 v = x.p(*key) 179 180 vv = None 181 fkey = x._fmt(key) 182 vv = gpd[fkey] 183 try: 184 logp = vv.logdens(v) 185 except mcmc.NotGoodPosition, ex: 186 raise ex.add_args("fkey=%s" % fkey, "v=%s" % str(v), "doc=%s" % vv.__doc__) 187 lp += logp 188 return lp
189
190 191 -class problem_definition(mcmc.problem_definition):
192 - def __init__(self ):
193 mcmc.problem_definition.__init__(self) 194 self.idxr = None 195 self.cached = -1e30 196 self.ckey = None
197 198
199 - def set_idxr(self, idxr):
200 self.idxr = idxr
201 202
203 - def plot(self, idxr, arg, pylab, inum):
204 raise RuntimeError, "Virtual Function"
205 206
207 - def do_print(self, idxr, arg, iter):
208 raise RuntimeError, "Virtual Function"
209 210
211 - def logp(self, x):
212 assert self.idxr is not None, "You need to call self.set_idxr()." 213 if self.ckey is not None and numpy.equal(self.ckey, x).all(): 214 return self.cached 215 self.idxr.clear() 216 self.idxr.set_prms(x) 217 prob, constraint, logprior = self.logp_guts(self.idxr) 218 self.cached = prob - constraint + logprior 219 self.ckey = numpy.array(self.idxr.get_prms(), copy=True) 220 return self.cached
221 222
223 - def logp_data_normalized(self, x):
224 """Only define this if you can compute the actual probability 225 density for the data given model & parameters, 226 not just something proportional to it! 227 To do this 228 function, you need to be able to do the full multidimensional 229 integral over the probability distribution! 230 231 NOTE that x is an indexer! 232 NOTE that this is not the full logp function: it doesn't contain the prior! 233 """ 234 prob, constraint, logprior = self.logp_guts(x) 235 return prob
236 237
238 - def fixer(self, x):
239 assert self.idxr is not None, "You need to call self.set_idxr()." 240 # print 'fixer in', hash(x.tostring()) 241 # print '\tPrms', ' '.join(['%.3f' % q for q in x]) 242 self.idxr.set_prms(x) 243 prob, constraint, logprior = self.logp_guts(self.idxr) 244 # print '\tProb, constraint', prob, constraint 245 self.cached = prob - constraint + logprior 246 self.ckey = numpy.array(self.idxr.get_prms(), copy=True) 247 # print 'fixer out', hash(self.ckey.tostring()) 248 return self.ckey
249 250
251 - def logp_prior_normalized(self, x):
253 254 PriorProbDist = None #: Data for guessing. 255
256 - def logp_guts(self, idxr, data_sink=None):
257 raise RuntimeError, "Virtual method"
258