Skip to content

Commit

Permalink
ok
Browse files Browse the repository at this point in the history
  • Loading branch information
tqchen committed May 16, 2014
1 parent c22df2b commit a7f3d7e
Show file tree
Hide file tree
Showing 4 changed files with 107 additions and 0 deletions.
1 change: 1 addition & 0 deletions demo/kaggle-higgs/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
This is the folder giving example of how to use XGBoost to run Kaggle Higgs competition
50 changes: 50 additions & 0 deletions demo/kaggle-higgs/higgs-numpy.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
#!/usr/bin/python
# this is the example script to use xgboost to train
import sys
import numpy as np
# add path of xgboost python module
sys.path.append('../../python/')
import xgboost as xgb

test_size = 550000

# path to where the data lies
dpath = 'data'

# load in training data, directly use numpy
dtrain = np.loadtxt( dpath+'/training.csv', delimiter=',', skiprows=1, converters={32: lambda x:int(x=='s') } )
label = dtrain[:,32]
data = dtrain[:,1:31]
# rescale weight to make it same as test set
weight = dtrain[:,31] * float(test_size) / len(label)

sum_wpos = sum( weight[i] for i in xrange(len(label)) if label[i] == 1.0 )
sum_wneg = sum( weight[i] for i in xrange(len(label)) if label[i] == 0.0 )

# print weight statistics
print 'weight statistics: wpos=%g, wneg=%g, ratio=%g' % ( sum_wpos, sum_wneg, sum_wneg/sum_wpos )

# construct xgboost.DMatrix from numpy array, treat -999.0 as missing value
xtrain = xgb.DMatrix( data, label=label, missing = -999.0 )

# setup parameters for xgboost
params = {}
# use logistic regression loss
param['loss_type'] = 3
# scale weight of positive examples
param['scale_pos_weight'] = sum_wpos/sum_wpos
param['bst:eta'] = 0.1
param['bst:max_depth'] = 6
param['eval_metric'] = '[email protected]'
param['silent'] = 1
param['eval_train'] = 1
param['nthread'] = 16

# boost 120 tres
num_round = 120
print 'loading data end, start to boost trees'
bst = xgb.train( xtrain, param, num_round );
# save out model
bst.save_model('higgs.model')

print 'finish training'
52 changes: 52 additions & 0 deletions demo/kaggle-higgs/higgs-pred.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
#!/usr/bin/python
# this is the example script to use xgboost to train
import sys
import numpy as np
# add path of xgboost python module
sys.path.append('../../python/')
import xgboost as xgb

# path to where the data lies
dpath = 'data'

modelfile = 'higgs.model'
outfile = 'higgs.pred.csv'
# make top 15% as positive
threshold_ratio = 0.15

# load in training data, directly use numpy
dtest = np.loadtxt( dpath+'/test.csv', delimiter=',', skiprows=1 )
data = dtest[:,1:31]
idx = dtest[:,1]

xtest = xgb.DMatrix( data, missing = -999.0 )
bst = xgb.Booster()
bst.load_model( modelfile )

ypred = bst.predict( dtest )
res = [ ( int(idx[i]), ypred[i] ) for i in xrange(len(ypred)) ]

rorder = {}
for k, v in sorted( res, key = lambda x:-x[1] ):
rorder[ k ] = len(rorder) + 1

# write out predictions
ntop = int( ratio * len(rorder ) )
fo = open(outfile, 'w')
nhit = 0
ntot = 0
fo.write('EventId,RankOrder,Class\n')
for k, v in res:
if rorder[k] <= ntop:
lb = 's'
nhit += 1
else:
lb = 'b'
fo.write('%s,%d,%s\n' % ( k, rorder[k], lb ) )
ntot += 1
fo.close()

print 'finished writing into model file'



4 changes: 4 additions & 0 deletions demo/kaggle-higgs/run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#!/bin/bash

./higgs-numpy.py
./higgs-pred.py

0 comments on commit a7f3d7e

Please sign in to comment.