#%pylab inline
import os
import gzip
import sys
import numpy as np
import pandas as pd
import csv

trL = pd.DataFrame()

seed = 128
rng = np.random.RandomState(seed)

dSet = pd.read_csv(os.environ['LAV_DIR']+"/log/sociodemo.csv.gz", compression='gzip',header=0, sep=',', quotechar='"')
print '-----------splitting and removing tails------------------'

##----------------------------feature selection---------------------------
##dSet_y = dSet['gender'].map(str)# + ' ' + dSet['age_range'].map(str)
dSet_y = dSet.ix[:,range(1,3)]
dSet_x = dSet.ix[:,range(3,dSet.shape[1])]
del dSet
Nmin, Nmax = 25,75
dSum_r = dSet_x.sum(axis=1).fillna(0)
lim_r = np.percentile(dSum_r,[Nmin,Nmax])
rowSel = dSum_r[(dSum_r > lim_r[0]) & (dSum_r < lim_r[1])]
dSet_x = dSet_x.iloc[rowSel.index]
dSet_y = dSet_y.iloc[rowSel.index]
dSum_c = dSet_x.sum(axis=0).fillna(0)
lim_c = np.percentile(dSum_c,[Nmin,Nmax])
colSel = dSum_c[(dSum_c > lim_c[0])]
dSet_x = pd.DataFrame(dSet_x,columns=colSel.index)
##df.iloc[rows][cols]
N = dSet_y.shape[0]
print dSet_x.shape

print '-------------write----------------------'

# with gzip.open(os.environ['LAV_DIR']+"/log/socio_y.csv.gz","w") as f :
#     csv_w = csv.writer(f)
#     for row in dSet_y :
#         csv_w.writerow(row)

dSet_x.to_csv(os.environ['LAV_DIR']+"log/socio_x.csv.gz",compression='gzip',sep=',',quotechar='"')
dSet_y.to_csv(os.environ['LAV_DIR']+"log/socio_y.csv.gz",compression='gzip',sep=',',quotechar='"')
			

Clean dataset

Remove the least relevant features from the learning set

#%pylab inline
import os
import gzip
import sys
import numpy as np
import pandas as pd
import scipy as sp
import random
import matplotlib.pyplot as plt
from sklearn.externals import joblib
from sklearn.grid_search import GridSearchCV 

seed = 128
rng = np.random.RandomState(seed)
dSet_x = pd.read_csv(os.environ['LAV_DIR']+"/log/socio_x.csv.gz",compression='gzip',sep=',',quotechar='"',index_col=0)
dSet_x.fillna(0,inplace=True)
dSet_y = pd.read_csv(os.environ['LAV_DIR']+"/log/socio_y.csv.gz",compression='gzip',sep=',', quotechar='"')
dSet_y = dSet_y.ix[:,[1]]
dSet_y = pd.get_dummies(dSet_y)
dSet_y = dSet_y.ix[:,[0]]##M
# dSet_y.loc[dSet_y['gender'] == 'M',['gender']] = 1
# dSet_y.loc[dSet_y['gender'] == 'F',['gender']] = 0

N = dSet_y.shape[0]
shuffleL = random.sample(range(N),N)
partS = [0,int(N*.9),N]
y_train = np.asarray(dSet_y.iloc[shuffleL[partS[0]:partS[1]]],dtype=np.int32)
y_test = np.asarray(dSet_y.iloc[shuffleL[partS[1]:partS[2]]],dtype=np.int32)
x_train = np.asarray(dSet_x.iloc[shuffleL[partS[0]:partS[1]]],dtype=np.int32)
x_test = np.asarray(dSet_x.iloc[shuffleL[partS[1]:partS[2]]],dtype=np.int32)
joblib.dump(x_test,os.environ['LAV_DIR']+"/train/"+'x_test'+'.pkl',compress=1)
joblib.dump(y_test,os.environ['LAV_DIR']+"/train/"+'y_test'+'.pkl',compress=1)

import sklearn as sk
import sklearn.ensemble
import sklearn.tree
import sklearn.neural_network
import sklearn.svm
import sklearn.gaussian_process
import sklearn.ensemble
import sklearn.naive_bayes
import sklearn.discriminant_analysis
import sklearn.dummy
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF

h = .02  # step size in the mesh

sData = {'n_estimator':100,"random_state":10,"verbose":0}

classifiers = [
    ##random forest
    sk.ensemble.RandomForestClassifier(n_estimators=100,criterion='entropy',max_features='sqrt',max_depth=5,bootstrap=True,oob_score=True,n_jobs=12,random_state=33)
    ##random forest 2
    ,sk.ensemble.RandomForestClassifier(n_estimators=100,criterion='gini',n_jobs=12,max_depth=15,max_features='auto',min_samples_split=2,random_state=None)
    ##decision tree
    ,sk.tree.DecisionTreeClassifier(criterion="gini",random_state=100,max_depth=10,min_samples_leaf=5)
    ##dummy variables
    ,sk.dummy.DummyClassifier(strategy='stratified',random_state=10)
    ##logistic regression
    ,sk.linear_model.LogisticRegression(C=100.0,class_weight=None,dual=False,fit_intercept=True,intercept_scaling=1,max_iter=100, multi_class='ovr',n_jobs=12,penalty='l2',random_state=None,solver='liblinear',tol=0.0001,verbose=0,warm_start=False)
    ##gradient boost
    ,sk.ensemble.GradientBoostingClassifier(criterion='friedman_mse',init=None,learning_rate=0.1, loss='deviance', max_depth=3,max_features=None, max_leaf_nodes=None,min_impurity_split=1e-07, min_samples_leaf=1,min_samples_split=2, min_weight_fraction_leaf=0.0,n_estimators=100, presort='auto', random_state=10,subsample=1.0, verbose=0, warm_start=False)
    ##neural network
    ,sk.neural_network.MLPClassifier(activation='logistic', alpha=0.0001, batch_size='auto', beta_1=0.9,beta_2=0.999, early_stopping=False, epsilon=1e-08,hidden_layer_sizes=(100,), learning_rate='constant',learning_rate_init=0.001, max_iter=200, momentum=0.9,nesterovs_momentum=True, power_t=0.5, random_state=None,shuffle=True, solver='adam', tol=0.0001, validation_fraction=0.1,verbose=False, warm_start=False)
    ##support vector
    ,sk.svm.SVC(C=1.0,cache_size=200,class_weight=None,coef0=0.0,decision_function_shape=None,degree=3,gamma='auto',kernel='rbf',max_iter=-1,probability=True,random_state=0,shrinking=True,tol=0.001,verbose=False)
    ##support vector
    ,sk.svm.SVR(kernel='rbf', degree=3, gamma='auto', coef0=0.0, tol=0.001, C=1.0, epsilon=0.1, shrinking=True, cache_size=200, verbose=False, max_iter=-1)
    ##k-neighbors
    ,sk.neighbors.KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski',metric_params=None, n_jobs=1, n_neighbors=3, p=2,weights='uniform')
    ##gaussian process
    ,sk.gaussian_process.GaussianProcessClassifier(copy_X_train=True,kernel=1**2 * RBF(length_scale=1), max_iter_predict=100,multi_class='one_vs_rest', n_jobs=1, n_restarts_optimizer=0,optimizer='fmin_l_bfgs_b', random_state=None, warm_start=True)
    ##ada boost
    ,sk.ensemble.AdaBoostClassifier(algorithm='SAMME.R', base_estimator=None,learning_rate=1.0, n_estimators=50, random_state=None)
    ##naive bayesias
    ,sk.naive_bayes.GaussianNB(priors=None)
    ##quadratic discriminant
    ,sk.discriminant_analysis.QuadraticDiscriminantAnalysis(priors=None, reg_param=0.0,store_covariances=False, tol=0.0001)
]


modL = pd.read_csv(os.environ['LAV_DIR']+"train/modelSelection.csv")
for index, row in modL.iterrows():
    if not row['state']:
        continue
    print row['model']
    clf = classifiers[index]
    model = clf.fit(x_train,y_train.ravel())
    joblib.dump(model,os.environ['LAV_DIR']+"/train/"+'lookAlike'+str(index)+'.pkl',compress=1)
    print 'done'
    

if False: ##single trial
    clf = sk.ensemble.GradientBoostingClassifier(criterion='friedman_mse',init=None,learning_rate=0.1,loss='deviance', max_depth=3,max_features=None,max_leaf_nodes=None,min_impurity_split=1e-07,min_samples_leaf=1,min_samples_split=2, min_weight_fraction_leaf=0.0,n_estimators=100, presort='auto', random_state=10,subsample=1.0, verbose=0, warm_start=False)
    model = clf.fit(x_train,y_train.ravel())
    fpr, tpr, _ = skm.roc_curve(y_test,model.predict_proba(x_test)[:,1])
    roc_auc = skm.auc(fpr,tpr)## = np.trapz(fpr,tpr)
    fsc = skm.f1_score(y_test,model.predict(x_test))
    acc = skm.accuracy_score(y_test,model.predict(x_test))
    plt.clf()
    plt.plot([0, 1],[0, 1],'k--',label="model | auc  fsc  acc")
    plt.plot(fpr,tpr,label='%s | %0.2f %0.2f %0.2f ' % (row['model'],roc_auc,fsc,acc))
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.0])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver operating characteristic')
    plt.legend(loc="lower right",prop={'size':12})#,'family':'monospace'})
    plt.show()



    
print 'grid grad boost'
param_test1 = {'n_estimators':range(20,81,10)}
param_test2 = {'max_depth':range(5,16,2), 'min_samples_split':range(200,1001,200)}
param_test3 = {'min_samples_split':range(1000,2100,200),'min_samples_leaf':range(30,71,10)}
param_test4 = {'max_features':range(7,20,2)}
param_test5 = {'subsample':[0.6,0.7,0.75,0.8,0.85,0.9]}
gsearch1 = GridSearchCV(
    estimator=sk.ensemble.GradientBoostingClassifier(learning_rate=0.1,min_samples_split=500,min_samples_leaf=50,max_depth=8,max_features='sqrt',subsample=0.8,random_state=10)
    ,param_grid=param_test1,scoring='roc_auc',n_jobs=4,iid=False,cv=5)
# model = gsearch1.fit(x_train,y_train.ravel())
# joblib.dump(model,os.environ['LAV_DIR']+"/train/"+'lookAlike'+str(5)+'.pkl',compress=1)

    
skm.cross_val_score(clf,x_train,y_train,scoring='neg_log_loss')
print "Confusion matrix " , skm.confusion_matrix(y_test,model.predict(x_test))

from sklearn.metrics import fbeta_score, make_scorer
ftwo_scorer = make_scorer(fbeta_score,beta=2)
grid = GridSearchCV(sk.svm.LinearSVC(),param_grid={'C':[1,10]},scoring=ftwo_scorer)
def my_custom_loss_func(ground_truth,predictions):
    diff = np.abs(ground_truth - predictions).max()
    return np.log(1 + diff)
score = make_scorer(my_custom_loss_func,greater_is_better=True)
print score(model,x_train,y_train.ravel())
			

Sklearn kit training

Training different models on the same dataset: random forest, swm, decision tree, dummy classifier

import os
import gzip
import sys
import numpy as np
import pandas as pd
from scipy.misc import imread
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
import six.moves.cPickle as pickle
import timeit
import theano
import theano.tensor as T
from keras.models import Sequential
from keras.layers import Dense
from scipy import interpolate
import tensorflow as tf
import keras
import pylab
import random
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc

seed = 128
rng = np.random.RandomState(seed)

dSet_x = pd.read_csv(os.environ['LAV_DIR']+"/log/socio_x.csv.gz", compression='gzip', sep=',',quotechar='"',index_col=0)
dSet_x.fillna(0,inplace=True)
dSet_y = pd.read_csv(os.environ['LAV_DIR']+"/log/socio_y.csv.gz", compression='gzip', sep=',', quotechar='"')
dSet_y = dSet_y.ix[:,[1]]
dSet_y = pd.get_dummies(dSet_y)
dSet_y = dSet_y.ix[:,[0]]##M
# dSet_y.loc[dSet_y['gender'] == 'M',['gender']] = 1
# dSet_y.loc[dSet_y['gender'] == 'F',['gender']] = 0

N = dSet_y.shape[0]
shuffleL = random.sample(range(N),N)

partS = [0,int(N*.8),int(N*.9),N]
y_train = np.asarray(dSet_y.iloc[shuffleL[partS[0]:partS[1]]],dtype=np.int32)
y_test = np.asarray(dSet_y.iloc[shuffleL[partS[1]:partS[2]]],dtype=np.int32)
y_valid = np.asarray(dSet_y.iloc[shuffleL[partS[2]:partS[3]]],dtype=np.int32)
x_train = np.asarray(dSet_x.iloc[shuffleL[partS[0]:partS[1]]],dtype=np.int32)
x_test = np.asarray(dSet_x.iloc[shuffleL[partS[1]:partS[2]]],dtype=np.int32)
x_valid = np.asarray(dSet_x.iloc[shuffleL[partS[2]:partS[3]]],dtype=np.int32)

Nent = y_train.shape[0]
Nneu = x_train.shape[1]
Ncat = y_train.shape[1]

model = Sequential()
model.add(Dense(Nneu,input_dim=Nneu,activation='relu'))
keras.layers.core.Dropout(rate=0.2)
model.add(Dense(50,input_dim=50,activation='softmax'))
# keras.layers.core.Dropout(rate=0.2)
# model.add(Dense(50,input_dim=Nneu,activation='softmax'))
keras.layers.core.Dropout(rate=0.2)
model.add(Dense(Ncat,input_dim=50,activation='softmax'))
#model.compile(loss='categorical_crossentropy',optimizer="adam",metrics=['accuracy'])
#model.compile(loss='mean_squared_error',optimizer="adam",metrics=['accuracy'])
model.compile(loss='mean_squared_error',optimizer="sgd",metrics=['accuracy'])
model.fit(x_train,y_train,epochs=5,batch_size=128,validation_data=(x_test,y_test))

pred = model.predict_classes(x_valid)
print pred

net1 = NeuralNet(
    layers=[('input',layers.InputLayer),
            ('hidden',layers.DenseLayer),
            ('output',layers.DenseLayer),
    ],
    input_shape=(None,Ncat),
    hidden_num_units=3,  # number of units in 'hidden' layer
    output_nonlinearity=lasagne.nonlinearities.softmax,
    output_num_units=10,  # 10 target values for the digits 0, 1, 2, ..., 9
    # optimization method:
    update=nesterov_momentum,
    update_learning_rate=0.01,
    update_momentum=0.9,
        max_epochs=10,
        verbose=1,
    )

net1.fit(x_train,y_train)
print("Predicted: %s" % str(net1.predict(x_test)))
			

Neural network training

Training with neural networks and keras

import os
import gzip
import sys
import numpy as np
import pandas as pd
import scipy as sp
import random
import matplotlib.pyplot as plt
from sklearn.externals import joblib
from treeinterpreter import treeinterpreter as ti
import sklearn as sk
import sklearn.metrics as skm
import sklearn.tree as skt
from sklearn import cross_validation, metrics 

x_test = joblib.load(os.environ['LAV_DIR']+"/train/"+'x_test'+'.pkl')
y_test = joblib.load(os.environ['LAV_DIR']+"/train/"+'y_test'+'.pkl')

modL = pd.read_csv(os.environ['LAV_DIR']+"train/modelSelection.csv")
model = dict()
plt.clf()
plt.plot([0, 1],[0, 1],'k--',label="model | auc  fsc  acc")
for index, row in modL.iterrows():
    if not row['state']:
        continue
    print row['model']
    fName = os.environ['LAV_DIR']+"train/"+'lookAlike'+str(index)+'.pkl'
    if not os.path.isfile(fName):
        continue
    model[index] = joblib.load(fName)
    fpr, tpr, _ = skm.roc_curve(y_test,model[index].predict_proba(x_test)[:,1])
    roc_auc = skm.auc(fpr,tpr)## = np.trapz(fpr,tpr)
    fsc = skm.f1_score(y_test,model[index].predict(x_test))
    acc = skm.accuracy_score(y_test,model[index].predict(x_test))
    ##cv_score = cross_validation.cross_val_score(model[i],x_train,y_train.ravel(),cv=5,scoring='roc_auc')
    plt.plot(fpr,tpr,label='%s | %0.2f %0.2f %0.2f ' % (row['model'],roc_auc,fsc,acc))
    
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right",prop={'size':12})#,'family':'monospace'})
plt.show()
    
y_prob = model[5].predict_proba(x_test)[:,1]
y_pred = np.asarray([x for x in y_prob],dtype=np.float32)
thMod = [x*0.05+0.5 for x in range(11)]
accL = []
for th in thMod:
    thSet = y_prob>=th 
    y_pred[thSet] = 1
    y_pred[thSet==False] = 0
    if thSet.sum() == 0 :
        continue
    accL.append([th,skm.accuracy_score(y_test,y_pred),skm.accuracy_score(y_test[thSet],y_pred[thSet]),float(2*thSet.sum())/y_test.shape[0]])

xnew = np.arange(min([x[0] for x in accL]),max([x[0] for x in accL]),0.005) 
y_reach = sp.interpolate.interp1d([x[0] for x in accL],[x[3] for x in accL])(xnew)
f_reach_inv = sp.interpolate.interp1d([x[3] for x in accL],[x[0] for x in accL])(y_reach)
y_acc = sp.interpolate.interp1d([x[0] for x in accL],[x[2] for x in accL])(f_reach_inv)
plt.plot(100.*y_reach,y_acc,'-k',label='acc1')
plt.xlabel('reach %')
plt.ylabel('accuracy')
plt.title("male classification")
plt.legend()
plt.show()

			

Evaluate and compare

Compare different models based on accuracy, fscore, feature importance, roc