Package Bio :: Module MaxEntropy
[hide private]
[frames] | no frames]

Source Code for Module Bio.MaxEntropy

  1  # Copyright 2001 by Jeffrey Chang.  All rights reserved. 
  2  # This code is part of the Biopython distribution and governed by its 
  3  # license.  Please see the LICENSE file that should have been included 
  4  # as part of this package. 
  5   
  6  """Maximum Entropy code. 
  7   
  8  Uses Improved Iterative Scaling. 
  9  """ 
 10  #TODO Define terminology 
 11   
 12  from __future__ import print_function 
 13  from functools import reduce 
 14   
 15  from Bio._py3k import map 
 16   
 17  import numpy 
 18   
 19   
20 -class MaxEntropy(object):
21 """Holds information for a Maximum Entropy classifier. 22 23 Members: 24 classes List of the possible classes of data. 25 alphas List of the weights for each feature. 26 feature_fns List of the feature functions. 27 28 """
29 - def __init__(self):
30 self.classes = [] 31 self.alphas = [] 32 self.feature_fns = []
33 34
35 -def calculate(me, observation):
36 """calculate(me, observation) -> list of log probs 37 38 Calculate the log of the probability for each class. me is a 39 MaxEntropy object that has been trained. observation is a vector 40 representing the observed data. The return value is a list of 41 unnormalized log probabilities for each class. 42 43 """ 44 scores = [] 45 assert len(me.feature_fns) == len(me.alphas) 46 for klass in me.classes: 47 lprob = 0.0 48 for fn, alpha in zip(me.feature_fns, me.alphas): 49 lprob += fn(observation, klass) * alpha 50 scores.append(lprob) 51 return scores
52 53
54 -def classify(me, observation):
55 """classify(me, observation) -> class 56 57 Classify an observation into a class. 58 59 """ 60 scores = calculate(me, observation) 61 max_score, klass = scores[0], me.classes[0] 62 for i in range(1, len(scores)): 63 if scores[i] > max_score: 64 max_score, klass = scores[i], me.classes[i] 65 return klass
66 67
68 -def _eval_feature_fn(fn, xs, classes):
69 """_eval_feature_fn(fn, xs, classes) -> dict of values 70 71 Evaluate a feature function on every instance of the training set 72 and class. fn is a callback function that takes two parameters: a 73 training instance and a class. Return a dictionary of (training 74 set index, class index) -> non-zero value. Values of 0 are not 75 stored in the dictionary. 76 77 """ 78 values = {} 79 for i in range(len(xs)): 80 for j in range(len(classes)): 81 f = fn(xs[i], classes[j]) 82 if f != 0: 83 values[(i, j)] = f 84 return values
85 86
87 -def _calc_empirical_expects(xs, ys, classes, features):
88 """_calc_empirical_expects(xs, ys, classes, features) -> list of expectations 89 90 Calculate the expectation of each function from the data. This is 91 the constraint for the maximum entropy distribution. Return a 92 list of expectations, parallel to the list of features. 93 94 """ 95 # E[f_i] = SUM_x,y P(x, y) f(x, y) 96 # = 1/N f(x, y) 97 class2index = {} 98 for index, key in enumerate(classes): 99 class2index[key] = index 100 ys_i = [class2index[y] for y in ys] 101 102 expect = [] 103 N = len(xs) 104 for feature in features: 105 s = 0 106 for i in range(N): 107 s += feature.get((i, ys_i[i]), 0) 108 expect.append(float(s) / N) 109 return expect
110 111
112 -def _calc_model_expects(xs, classes, features, alphas):
113 """_calc_model_expects(xs, classes, features, alphas) -> list of expectations. 114 115 Calculate the expectation of each feature from the model. This is 116 not used in maximum entropy training, but provides a good function 117 for debugging. 118 119 """ 120 # SUM_X P(x) SUM_Y P(Y|X) F(X, Y) 121 # = 1/N SUM_X SUM_Y P(Y|X) F(X, Y) 122 p_yx = _calc_p_class_given_x(xs, classes, features, alphas) 123 124 expects = [] 125 for feature in features: 126 sum = 0.0 127 for (i, j), f in feature.items(): 128 sum += p_yx[i][j] * f 129 expects.append(sum/len(xs)) 130 return expects
131 132
133 -def _calc_p_class_given_x(xs, classes, features, alphas):
134 """_calc_p_class_given_x(xs, classes, features, alphas) -> matrix 135 136 Calculate P(y|x), where y is the class and x is an instance from 137 the training set. Return a XSxCLASSES matrix of probabilities. 138 139 """ 140 prob_yx = numpy.zeros((len(xs), len(classes))) 141 142 # Calculate log P(y, x). 143 assert len(features) == len(alphas) 144 for feature, alpha in zip(features, alphas): 145 for (x, y), f in feature.items(): 146 prob_yx[x][y] += alpha * f 147 # Take an exponent to get P(y, x) 148 prob_yx = numpy.exp(prob_yx) 149 # Divide out the probability over each class, so we get P(y|x). 150 for i in range(len(xs)): 151 z = sum(prob_yx[i]) 152 prob_yx[i] = prob_yx[i] / z 153 154 #prob_yx = [] 155 #for i in range(len(xs)): 156 # z = 0.0 # Normalization factor for this x, over all classes. 157 # probs = [0.0] * len(classes) 158 # for j in range(len(classes)): 159 # log_p = 0.0 # log of the probability of f(x, y) 160 # for k in range(len(features)): 161 # log_p += alphas[k] * features[k].get((i, j), 0.0) 162 # probs[j] = numpy.exp(log_p) 163 # z += probs[j] 164 # # Normalize the probabilities for this x. 165 # probs = map(lambda x, z=z: x/z, probs) 166 # prob_yx.append(probs) 167 return prob_yx
168 169
170 -def _calc_f_sharp(N, nclasses, features):
171 """_calc_f_sharp(N, nclasses, features) -> matrix of f sharp values.""" 172 # f#(x, y) = SUM_i feature(x, y) 173 f_sharp = numpy.zeros((N, nclasses)) 174 for feature in features: 175 for (i, j), f in feature.items(): 176 f_sharp[i][j] += f 177 return f_sharp
178 179
180 -def _iis_solve_delta(N, feature, f_sharp, empirical, prob_yx, 181 max_newton_iterations, newton_converge):
182 # Solve delta using Newton's method for: 183 # SUM_x P(x) * SUM_c P(c|x) f_i(x, c) e^[delta_i * f#(x, c)] = 0 184 delta = 0.0 185 iters = 0 186 while iters < max_newton_iterations: # iterate for Newton's method 187 f_newton = df_newton = 0.0 # evaluate the function and derivative 188 for (i, j), f in feature.items(): 189 prod = prob_yx[i][j] * f * numpy.exp(delta * f_sharp[i][j]) 190 f_newton += prod 191 df_newton += prod * f_sharp[i][j] 192 f_newton, df_newton = empirical - f_newton / N, -df_newton / N 193 194 ratio = f_newton / df_newton 195 delta -= ratio 196 if numpy.fabs(ratio) < newton_converge: # converged 197 break 198 iters = iters + 1 199 else: 200 raise RuntimeError("Newton's method did not converge") 201 return delta
202 203
204 -def _train_iis(xs, classes, features, f_sharp, alphas, e_empirical, 205 max_newton_iterations, newton_converge):
206 """Do one iteration of hill climbing to find better alphas (PRIVATE).""" 207 # This is a good function to parallelize. 208 209 # Pre-calculate P(y|x) 210 p_yx = _calc_p_class_given_x(xs, classes, features, alphas) 211 212 N = len(xs) 213 newalphas = alphas[:] 214 for i in range(len(alphas)): 215 delta = _iis_solve_delta(N, features[i], f_sharp, e_empirical[i], p_yx, 216 max_newton_iterations, newton_converge) 217 newalphas[i] += delta 218 return newalphas
219 220
221 -def train(training_set, results, feature_fns, update_fn=None, 222 max_iis_iterations=10000, iis_converge=1.0e-5, 223 max_newton_iterations=100, newton_converge=1.0e-10):
224 """Train a maximum entropy classifier, returns MaxEntropy object. 225 226 Train a maximum entropy classifier on a training set. 227 training_set is a list of observations. results is a list of the 228 class assignments for each observation. feature_fns is a list of 229 the features. These are callback functions that take an 230 observation and class and return a 1 or 0. update_fn is a 231 callback function that is called at each training iteration. It is 232 passed a MaxEntropy object that encapsulates the current state of 233 the training. 234 235 The maximum number of iterations and the convergence criterion for IIS 236 are given by max_iis_iterations and iis_converge, respectively, while 237 max_newton_iterations and newton_converge are the maximum number 238 of iterations and the convergence criterion for Newton's method. 239 """ 240 if not training_set: 241 raise ValueError("No data in the training set.") 242 if len(training_set) != len(results): 243 raise ValueError("training_set and results should be parallel lists.") 244 245 # Rename variables for convenience. 246 xs, ys = training_set, results 247 248 # Get a list of all the classes that need to be trained. 249 classes = sorted(set(results)) 250 251 # Cache values for all features. 252 features = [_eval_feature_fn(fn, training_set, classes) 253 for fn in feature_fns] 254 # Cache values for f#. 255 f_sharp = _calc_f_sharp(len(training_set), len(classes), features) 256 257 # Pre-calculate the empirical expectations of the features. 258 e_empirical = _calc_empirical_expects(xs, ys, classes, features) 259 260 # Now train the alpha parameters to weigh each feature. 261 alphas = [0.0] * len(features) 262 iters = 0 263 while iters < max_iis_iterations: 264 nalphas = _train_iis(xs, classes, features, f_sharp, 265 alphas, e_empirical, 266 max_newton_iterations, newton_converge) 267 diff = map(lambda x, y: numpy.fabs(x-y), alphas, nalphas) 268 diff = reduce(lambda x, y: x+y, diff, 0) 269 alphas = nalphas 270 271 me = MaxEntropy() 272 me.alphas, me.classes, me.feature_fns = alphas, classes, feature_fns 273 if update_fn is not None: 274 update_fn(me) 275 276 if diff < iis_converge: # converged 277 break 278 else: 279 raise RuntimeError("IIS did not converge") 280 281 return me
282 283 284 if __name__ == "__main__": 285 #Car data from example Naive Bayes Classifier example by Eric Meisner November 22, 2003 286 #http://www.inf.u-szeged.hu/~ormandi/teaching/mi2/02-naiveBayes-example.pdf 287 288 xcar=[ 289 ['Red', 'Sports', 'Domestic'], 290 ['Red', 'Sports', 'Domestic'], 291 ['Red', 'Sports', 'Domestic'], 292 ['Yellow', 'Sports', 'Domestic'], 293 ['Yellow', 'Sports', 'Imported'], 294 ['Yellow', 'SUV', 'Imported'], 295 ['Yellow', 'SUV', 'Imported'], 296 ['Yellow', 'SUV', 'Domestic'], 297 ['Red', 'SUV', 'Imported'], 298 ['Red', 'Sports', 'Imported'] 299 ] 300 301 ycar=[ 302 'Yes', 303 'No', 304 'Yes', 305 'No', 306 'Yes', 307 'No', 308 'Yes', 309 'No', 310 'No', 311 'Yes' 312 ] 313 314 #Requires some rules or features
315 - def udf1(ts, cl):
316 if ts[0] =='Red': 317 return 0 318 else: 319 return 1
320
321 - def udf2(ts, cl):
322 if ts[1] =='Sports': 323 return 0 324 else: 325 return 1
326
327 - def udf3(ts, cl):
328 if ts[2] =='Domestic': 329 return 0 330 else: 331 return 1
332 333 user_functions=[udf1, udf2, udf3] # must be an iterable type 334 335 xe=train(xcar, ycar, user_functions) 336 for xv, yv in zip(xcar, ycar): 337 xc=classify(xe, xv) 338 print('Pred: %s gives %s y is %s' % (xv, xc, yv)) 339