+ added two scripts for the psl2gff step
[qpalma.git] / scripts / grid_predict.py
index 50657c9..3d962f7 100644 (file)
@@ -43,10 +43,10 @@ def makeJobs(run,dataset_fn,chunks,param):
 
    jobs=[]
 
-   for c_name,current_chunk in chunks[:1]:
+   for c_name,current_chunk in chunks:
       current_job = KybJob(grid_predict.g_predict,[run,dataset_fn,current_chunk,param,c_name])
-      current_job.h_vmem = '5.0G'
-      current_job.express = 'True'
+      current_job.h_vmem = '30.0G'
+      #current_job.express = 'True'
 
       print "job #1: ", current_job.nativeSpecification
 
@@ -62,19 +62,21 @@ def create_and_submit():
 
    jp = os.path.join
 
-   run_dir = '/fml/ag-raetsch/home/fabio/tmp/newest_run/alignment/run_enable_quality_scores_+_enable_splice_signals_+_enable_intron_length_+'
+   run_dir = '/fml/ag-raetsch/home/fabio/tmp/newest_run/alignment/saved_run'
 
    run   = cPickle.load(open(jp(run_dir,'run_obj.pickle')))
+   run['name'] = 'saved_run'
+
    param = cPickle.load(open(jp(run_dir,'param_526.pickle')))
 
-   dataset_fn           = '/fml/ag-raetsch/home/fabio/svn/projects/QPalma/scripts/dataset_12_05_08.test.pickle'
-   prediction_keys_fn   = '/fml/ag-raetsch/home/fabio/svn/projects/QPalma/scripts/dataset_12_05_08.test_keys.pickle'
+   dataset_fn           = '/fml/ag-raetsch/home/fabio/tmp/transcriptome_data/dataset_transcriptome_run_1.pickle'
+   prediction_keys_fn   = '/fml/ag-raetsch/home/fabio/tmp/transcriptome_data/dataset_transcriptome_run_1.keys.pickle'
 
    prediction_keys = cPickle.load(open(prediction_keys_fn))
 
    print 'Found %d keys for prediction.' % len(prediction_keys)
 
-   num_splits = 12
+   num_splits = 25
    slices = get_slices(len(prediction_keys),num_splits)
    chunks = []
    for idx,slice in enumerate(slices):