+ using new parsers now
[qpalma.git] / scripts / Experiment.py
1 ###############################################################################
2 #
3 # This file contains settings for one experiment
4 #
5 # The general idea is as follows:
6 #
7 # Suppose you have an machine learning algorithm you want to perform model
8 # selection with. Then for each different value of for example C for a C-SVM this
9 # script generates a Run object a subclass of dict storing the parameters.
10 #
11 ###############################################################################
12
13 import qpalma.Configuration as Conf
14 from Run import *
15 import pdb
16 import os
17 import os.path
18
19 def createRuns():
20 # specify n for n-fold cross validation
21 numFolds=5
22
23 # the main directory where all results are stored
24 experiment_dir = '/fml/ag-raetsch/home/fabio/tmp/QPalmaTraining'
25
26 assert os.path.exists(experiment_dir), 'toplevel dir for experiment does not exist!'
27
28 # list of regularization parameters and additional flags for different runs
29 # for example:
30 # - with quality scores
31 # - without quality scores
32 #
33 bool2str = ['-','+']
34
35 allRuns = []
36
37 dataset_filename = '/fml/ag-raetsch/home/fabio/svn/projects/QPalma/scripts/dataset_remapped_test_new'
38
39 for QFlag in [True,False]:
40 for SSFlag in [True,False]:
41 for ILFlag in [True,False]:
42
43 # create a new Run object
44 currentRun = Run()
45
46 # global settings for all runs
47 currentRun['anzpath'] = Conf.anzpath
48 currentRun['iter_steps'] = Conf.iter_steps
49 currentRun['matchmatrixRows'] = Conf.sizeMatchmatrix[0]
50 currentRun['matchmatrixCols'] = Conf.sizeMatchmatrix[1]
51 currentRun['mode'] = Conf.mode
52 currentRun['numConstraintsPerRound'] = Conf.numConstraintsPerRound
53
54 currentRun['remove_duplicate_scores'] = Conf.remove_duplicate_scores
55 currentRun['print_matrix'] = Conf.print_matrix
56 currentRun['read_size'] = Conf.read_size
57
58
59 currentRun['numLengthSuppPoints'] = 10 #Conf.numLengthSuppPoints
60
61 # if we are not using an intron length model at all we do not need the support points
62 if ILFlag == False:
63 currentRun['numLengthSuppPoints'] = 2 #Conf.numLengthSuppPoints
64
65 currentRun['numDonSuppPoints'] = 10
66 currentRun['numAccSuppPoints'] = 10
67
68 currentRun['numQualPlifs'] = Conf.numQualPlifs
69 currentRun['numQualSuppPoints'] = 10
70 currentRun['totalQualSuppPoints'] = currentRun['numQualPlifs']*currentRun['numQualSuppPoints']
71
72 currentRun['numFeatures'] = currentRun['numLengthSuppPoints']\
73 + currentRun['numDonSuppPoints'] + currentRun['numAccSuppPoints']\
74 + currentRun['matchmatrixRows'] * currentRun['matchmatrixCols']\
75 + currentRun['totalQualSuppPoints']
76
77 # run-specific settings
78 currentRun['training_begin'] = Conf.training_begin
79 currentRun['training_end'] = Conf.training_end
80 currentRun['prediction_begin'] = Conf.prediction_begin
81 currentRun['prediction_end'] = Conf.prediction_end
82
83 currentRun['enable_quality_scores'] = QFlag
84 currentRun['enable_splice_signals'] = SSFlag
85 currentRun['enable_intron_length'] = ILFlag
86
87 currentName = 'run_%s_quality_%s_splicesignals_%s_intron_len' %\
88 (bool2str[QFlag],bool2str[SSFlag],bool2str[ILFlag])
89
90 currentRun['C'] = 100
91
92 currentRun['name'] = currentName
93 currentRun['dataset_filename'] = dataset_filename
94 currentRun['experiment_path'] = experiment_dir
95
96 currentRun['min_intron_len'] = 20
97 currentRun['max_intron_len'] = 2000
98
99 #currentRun['min_intron_len'] = 10
100 #currentRun['max_intron_len'] = 100
101
102 currentRun['min_svm_score'] = 0.0
103 currentRun['max_svm_score'] = 1.0
104
105 currentRun['min_qual'] = -5
106 currentRun['max_qual'] = 40
107
108 currentRun['dna_flat_files'] = Conf.dna_flat_fn
109
110 allRuns.append(currentRun)
111
112 #
113 # check for valid paths / options etc
114 #
115 for currentRun in allRuns:
116
117 assert 0 < currentRun['anzpath'] < 100
118 assert 0 <= currentRun['training_begin'] < currentRun['training_end']
119 assert currentRun['training_end'] <= currentRun['prediction_begin'] < currentRun['prediction_end']
120
121 assert currentRun['iter_steps']
122
123 #assert currentRun['matchmatrixCols']
124 #assert currentRun['matchmatrixRows']
125
126 assert currentRun['mode'] in ['normal','using_quality_scores']
127
128 #assert currentRun['numConstraintsPerRound']
129
130 assert 0 < currentRun['numFeatures'] < 10000
131
132 # assert currentRun['numLengthSuppPoints']
133 # assert currentRun['numDonSuppPoints']
134 # assert currentRun['numAccSuppPoints']
135 #assert currentRun['numQualPlifs']
136 #assert currentRun['numQualSuppPoints']
137 #assert numQualPlifs >= 0
138 #assert numDonSuppPoints > 1
139 #assert numAccSuppPoints > 1
140 #assert numLengthSuppPoints > 1
141 #assert numQualSuppPoints > 1
142
143 assert currentRun['print_matrix'] in [True,False]
144 assert 0 < currentRun['read_size'] < 100
145 assert currentRun['remove_duplicate_scores'] in [True,False]
146
147 assert currentRun['enable_quality_scores'] in [True,False]
148 assert currentRun['enable_splice_signals'] in [True,False]
149 assert currentRun['enable_intron_length'] in [True,False]
150
151 #assert currentRun['totalQualSuppPoints']
152 assert os.path.exists(currentRun['dataset_filename'])
153 assert os.path.exists(currentRun['experiment_path'])
154
155 return allRuns
156
157 if __name__ == '__main__':
158 allRuns = createRuns()
159 pdb.set_trace()