+ model selection scripts are ready
[qpalma.git] / scripts / Experiment.py
1 ###############################################################################
2 #
3 # This file contains settings for one experiment
4 #
5 # The general idea is as follows:
6 #
7 # Suppose you have an machine learning algorithm you want to perform model
8 # selection with. Then for each different value of for example C for a C-SVM this
9 # script generates a Run object a subclass of dict storing the parameters.
10 #
11 ###############################################################################
12
13 import qpalma.Configuration as Conf
14 from Run import *
15 import pdb
16 import os
17 import os.path
18
19 def createRuns():
20 # specify n for n-fold cross validation
21 numFolds=5
22
23 # the main directory where all results are stored
24 experiment_dir = '/fml/ag-raetsch/home/fabio/tmp/QPalma'
25
26 assert os.path.exists(experiment_dir), 'toplevel dir for experiment does not exist!'
27
28 # list of regularization parameters and additional flags for different runs
29 # for example:
30 # - with quality scores
31 # - without quality scores
32 #
33 bool2str = ['-','+']
34
35 allRuns = []
36
37 #dataset_filename = '/fml/ag-raetsch/home/fabio/svn/projects/QPalma/scripts/dataset_10k'
38 dataset_filename = '/fml/ag-raetsch/home/fabio/svn/projects/QPalma/scripts/new_dataset_100'
39
40 for QFlag in [True,False]:
41 for SSFlag in [True,False]:
42 for ILFlag in [True]:
43
44 # create a new Run object
45 currentRun = Run()
46
47 # global settings for all runs
48 currentRun['anzpath'] = Conf.anzpath
49 currentRun['iter_steps'] = Conf.iter_steps
50 currentRun['matchmatrixRows'] = Conf.sizeMatchmatrix[0]
51 currentRun['matchmatrixCols'] = Conf.sizeMatchmatrix[1]
52 currentRun['mode'] = Conf.mode
53 currentRun['numFeatures'] = Conf.numFeatures
54 currentRun['numConstraintsPerRound'] = Conf.numConstraintsPerRound
55
56 currentRun['remove_duplicate_scores'] = Conf.remove_duplicate_scores
57 currentRun['print_matrix'] = Conf.print_matrix
58 currentRun['read_size'] = Conf.read_size
59
60 currentRun['numLengthSuppPoints'] = Conf.numLengthSuppPoints
61 currentRun['numDonSuppPoints'] = Conf.numDonSuppPoints
62 currentRun['numAccSuppPoints'] = Conf.numAccSuppPoints
63
64 currentRun['numQualPlifs'] = Conf.numQualPlifs
65 currentRun['numQualSuppPoints'] = Conf.numQualSuppPoints
66 currentRun['totalQualSuppPoints'] = Conf.totalQualSuppPoints
67
68 # run-specific settings
69
70 currentRun['training_begin'] = Conf.training_begin
71 currentRun['training_end'] = Conf.training_end
72 currentRun['prediction_begin'] = Conf.prediction_begin
73 currentRun['prediction_end'] = Conf.prediction_end
74
75 currentRun['enable_quality_scores'] = QFlag
76 currentRun['enable_splice_signals'] = SSFlag
77 currentRun['enable_intron_length'] = ILFlag
78
79 currentName = '%s_quality_%s_splicesignals_%s_intron_len' %\
80 (bool2str[QFlag],bool2str[SSFlag],bool2str[ILFlag])
81
82 currentRun['C'] = Conf.C
83
84 currentRun['name'] = currentName
85 currentRun['dataset_filename'] = dataset_filename
86 currentRun['experiment_path'] = '/fml/ag-raetsch/home/fabio/tmp/QPalma'
87
88
89 allRuns.append(currentRun)
90
91 #
92 # check for valid paths / options etc
93 #
94 for currentRun in allRuns:
95
96 assert 0 < currentRun['anzpath'] < 100
97 assert 0 <= currentRun['training_begin'] < currentRun['training_end']
98 assert currentRun['training_end'] <= currentRun['prediction_begin'] < currentRun['prediction_end']
99
100 assert currentRun['iter_steps']
101
102 #assert currentRun['matchmatrixCols']
103 #assert currentRun['matchmatrixRows']
104
105 assert currentRun['mode'] in ['normal','using_quality_scores']
106
107 #assert currentRun['numConstraintsPerRound']
108
109 assert 0 < currentRun['numFeatures'] < 10000
110
111 # assert currentRun['numLengthSuppPoints']
112 # assert currentRun['numDonSuppPoints']
113 # assert currentRun['numAccSuppPoints']
114 #assert currentRun['numQualPlifs']
115 #assert currentRun['numQualSuppPoints']
116
117 assert currentRun['print_matrix'] in [True,False]
118 assert 0 < currentRun['read_size'] < 100
119 assert currentRun['remove_duplicate_scores'] in [True,False]
120
121 assert currentRun['enable_quality_scores'] in [True,False]
122 assert currentRun['enable_splice_signals'] in [True,False]
123 assert currentRun['enable_intron_length'] in [True,False]
124
125 #assert currentRun['totalQualSuppPoints']
126 assert os.path.exists(currentRun['dataset_filename'])
127 assert os.path.exists(currentRun['experiment_path'])
128
129 return allRuns
130
131 if __name__ == '__main__':
132 allRuns = createRuns()
133 pdb.set_trace()
134