import numpy import urllib import scipy.optimize import random from math import exp from math import log def parseData(fname): for l in urllib.urlopen(fname): yield eval(l) print "Reading data..." data = Read in the data print "done" def inner(x,y): return sum([x[i]*y[i] for i in range(len(x))]) def sigmoid(x): return 1.0 / (1 + exp(-x)) # NEGATIVE Log-likelihood def f(theta, X, y, lam): loglikelihood = 0 for i in range(len(X)): logit = inner(X[i], theta) loglikelihood -= log(1 + exp(-logit)) if not y[i]: loglikelihood -= logit for k in range(len(theta)): loglikelihood -= lam * theta[k]*theta[k] print "ll =", loglikelihood return -loglikelihood # NEGATIVE Derivative of log-likelihood def fprime(theta, X, y, lam): dl = [0.0]*len(theta) for i in range(len(X)): # Fill in code for the derivative pass # Negate the return value since we're doing gradient *ascent* return numpy.array([-x for x in dl]) X = # Extract features and labels from the data y = X_train = X[:len(X)/2] X_test = X[len(X)/2:] # If we wanted to split with a validation set: #X_valid = X[len(X)/2:3*len(X)/4] #X_test = X[3*len(X)/4:] # Use a library function to run gradient descent (or you can implement yourself!) theta,l,info = scipy.optimize.fmin_l_bfgs_b(f, [0]*len(X[0]), fprime, args = (X_train, y_train, 1.0)) print "Final log likelihood =", -l print "Accuracy = " # Compute the accuracy