+ svn file moving
authorfabio <fabio@e1793c9e-67f9-0310-80fc-b846ff1f7b36>
Wed, 23 Jan 2008 11:17:17 +0000 (11:17 +0000)
committerfabio <fabio@e1793c9e-67f9-0310-80fc-b846ff1f7b36>
Wed, 23 Jan 2008 11:17:17 +0000 (11:17 +0000)
git-svn-id: http://svn.tuebingen.mpg.de/ag-raetsch/projects/QPalma@7540 e1793c9e-67f9-0310-80fc-b846ff1f7b36

qpalma/DataProc.py [new file with mode: 0644]
qpalma/paths_load_data.py
qpalma/paths_load_data_pickle.py
scripts/qpalma.py [deleted file]
scripts/qpalma_predict.py
scripts/qpalma_train.py [new file with mode: 0644]

diff --git a/qpalma/DataProc.py b/qpalma/DataProc.py
new file mode 100644 (file)
index 0000000..f5af3f5
--- /dev/null
@@ -0,0 +1,266 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from numpy.matlib import mat,zeros,ones,inf
+import cPickle
+import pdb
+import Configuration as Conf
+from tools.PyGff import *
+import io_pickle
+import scipy.io
+import pdb
+
+
+def paths_load_data_solexa(expt,genome_info,PAR):
+   # expt can be 'training','validation' or 'test'
+   assert expt in ['training','validation','test']
+
+   dna_filename = Conf.dna_filename
+   est_filename = Conf.est_filename
+   tair7_seq_filename = Conf.tair7_seq_filename 
+
+   tair7_seq = cPickle.load(open(tair7_seq_filename))
+   allGenes  = cPickle.load(open(dna_filename))
+
+   Sequences = []
+   Acceptors = []
+   Donors = []
+   Exons = []
+   Ests = []
+   Qualities = []
+
+   for line in open(est_filename):
+      line = line.strip()
+      chr,strand,seq, splitpos, length, prb, cal, chastity, gene_id, exon_idx = line.split()
+      splitpos = int(splitpos)
+      length = int(length)
+      prb = [ord(elem)-50 for elem in prb]
+      cal = [ord(elem)-64 for elem in cal]
+      chastity = [ord(elem)+10 for elem in chastity]
+
+      assert len(prb) == len(seq)
+
+      currentGene = allGenes[gene_id]
+      seq = seq.lower()
+
+      try:
+         currentSeq = tair7_seq[gene_id+'.1']['sequence'].lower()
+      except:
+         continue
+         
+      #assert currentSize == len(Sequences[-1]), 'gene_size %d / seq size %d' % (currentSize,len(Sequences[-1]))
+      #Acceptors.append([-inf]*currentSize)
+      #Donors.append([-inf]*currentSize)
+
+      exon_idx = int(exon_idx)
+      currentExons = zeros((len(currentGene.exons),2))
+      #for idx in range(len(currentGene.exons)):
+      #   currentExons[idx,0] = currentGene.exons[idx][0]-currentGene.start
+      #   currentExons[idx,1] = currentGene.exons[idx][1]-currentGene.start
+
+      currentExons[0,0] = currentGene.exons[exon_idx-1][0]-currentGene.start
+      currentExons[0,1] = currentGene.exons[exon_idx-1][1]-currentGene.start
+
+      currentExons[1,0] = currentGene.exons[exon_idx][0]-currentGene.start
+      currentExons[1,1] = currentGene.exons[exon_idx][1]-currentGene.start
+
+      cut_offset = 500
+      up_cut   = currentExons[0,0] - cut_offset
+      down_cut = currentExons[1,1] + cut_offset
+
+      if up_cut < 0:
+         up_cut = 0
+
+      if down_cut > len(currentSeq):
+         down_cut = len(currentSeq)
+
+      currentSeq = currentSeq[up_cut:down_cut]
+
+      Sequences.append(currentSeq)
+
+      currentSize = len(Sequences[-1])
+      Acceptors.append([0]*currentSize)
+      Donors.append([0]*currentSize)
+
+      Exons.append(currentExons)
+      Ests.append(seq)
+      Qualities.append(prb)
+
+      SplitPositions.append(int(splitpos))
+
+      if len(Sequences[-1]) == 2755:
+         Sequences = Sequences[:-1]
+         Acceptors = Acceptors[:-1]
+         Donors    = Donors[:-1]
+         Exons     = Exons[:-1]
+         Ests      = Ests[:-1]
+         Qualities = Qualities[:-1]
+   
+   print 'found %d examples' % len(Sequences)
+
+   return Sequences, Acceptors, Donors, Exons, Ests, Qualities
+
+
+def paths_load_data_pickle(expt,genome_info,PAR):
+   """
+      
+   """
+
+   # function [Sequences, Acceptors, Donors, Exons, Ests, Noises] = paths_load_data(expt,genome_info,PAR)
+   # Load the relevant file and return the alignment data
+
+   # expt can be 'training','validation' or 'test'
+
+   assert expt in ['training','validation','test']
+
+   tmp_dir = '/fml/ag-raetsch/home/fabio/tmp'
+
+   Noises = [];
+
+   if expt == 'training':
+      if PAR.microexon:
+         if PAR.LOCAL_ALIGN: # local version
+
+            train_data = '%s/microexon_train_data.pickle' % genome_info.basedir
+            data = cPickle.load(open(train_data))
+
+         else: # global version
+            pass
+
+
+      else:
+         train_data = '%s/exons_train_local.pickle' % genome_info.basedir
+         data = cPickle.load(open(train_data))
+
+      print 'train_data is %s' % train_data
+
+      Sequences = data['Train']       # dna sequences
+      Acceptors = data['TrainAcc']    # acceptor scores
+      Donors    = data['TrainDon']    # donor scores
+      Exons     = data['TrainExon']   # exon boundaries
+      Ests      = data['TrainEsts']   # est sequences
+     
+   # Lower all indices by one to convert matlab 
+   # to python indices
+
+   Exons -= 1
+
+   return Sequences, Acceptors, Donors, Exons, Ests, Noises
+
+def paths_load_data(expt,genome_info,PAR):
+   """
+      
+   """
+
+   # function [Sequences, Acceptors, Donors, Exons, Ests, Noises] = paths_load_data(expt,genome_info,PAR)
+   # Load the relevant file and return the alignment data
+
+   # expt can be 'training','validation' or 'test'
+
+   assert expt in ['training','validation','test']
+
+   tmp_dir = '/fml/ag-raetsch/home/fabio/tmp'
+
+   Noises = [];
+
+   if expt == 'training':
+      if PAR.microexon:
+         if PAR.LOCAL_ALIGN: # local version
+
+            train_data = '%s/microexon_train_data_cut_local.mat' % genome_info.basedir
+            train_data = '%s/microexon_train_data.mat' % genome_info.basedir
+            #train_data_pickle = '%s/microexon_train_data_cut_local.pickle'% tmp_dir
+            #io_pickle.convert_v6(train_data,train_data_pickle)
+            #train_data = io_pickle.load(train_data_pickle)
+            data = scipy.io.loadmat(train_data)
+
+         else: # global version
+
+            train_data = '%s/microexon_train_data_cut_ip=%1.3f_dp=%1.3f_mp=%1.3f.mat' %\
+               (genome_info.basedir, PAR.insertion_prob, PAR.deletion_prob, PAR.mutation_prob)
+
+            train_data = '%s/microexon_train_data.mat' % genome_info.basedir
+            #train_data_pickle = '%s/microexon_train_data_cut_ip=%1.3f_dp=%1.3f_mp=%1.3f.pickle' %\
+            #   (tmp_dir, PAR.insertion_prob, PAR.deletion_prob, PAR.mutation_prob)
+
+            #io_pickle.convert_v6(train_data,train_data_pickle)
+            #train_data = io_pickle.load(train_data_pickle)
+            data = scipy.io.loadmat(train_data)
+            Noises = data['TrainNoise'] # substitution matrix
+
+      else:
+         train_data = '%s/exons_train_local.mat' % genome_info.basedir
+         #train_data_pickle = '%s/exons_train_local.pickle'% tmp_dir
+         #io_pickle.convert_v6(train_data,train_data_pickle)
+         #microexon_train_data = io_pickle.load(train_data_pickle)
+         data = scipy.io.loadmat(train_data)
+
+      print 'train_data is %s' % train_data
+
+      Sequences = data['Train']       # dna sequences
+      Acceptors = data['TrainAcc']    # acceptor scores
+      Donors    = data['TrainDon']    # donor scores
+      Exons     = data['TrainExon']   # exon boundaries
+      Ests      = data['TrainEsts']   # est sequences
+     
+   #elif expt == 'validation':
+   #  print('Loading validation data\n') ;
+   #  if PAR.microexon,
+   #    if PAR.LOCAL_ALIGN
+   #      %local version
+   #      load(sprintf('%s/microexon_val_data_cut_local_ip=%1.3f_dp=%1.3f_mp=%1.3f.mat', ...
+   #         genome_info.basedir, PAR.insertion_prob, PAR.deletion_prob, PAR.mutation_prob),  ...
+   #      'ValEsts', 'ValExon', 'Val', 'ValAcc', 'ValDon') ;
+   #    else
+   #      %global version
+   #      load(sprintf('%s/microexon_val_data_cut_ip=%1.3f_dp=%1.3f_mp=%1.3f.mat', ...
+   #         genome_info.basedir, PAR.insertion_prob, PAR.deletion_prob, PAR.mutation_prob),  ...
+   #      'ValEsts', 'ValExon', 'Val', 'ValAcc', 'ValDon') ;
+   #    end
+   #  else
+   #    load(sprintf('%s/exons_val_ip=%1.3f_dp=%1.3f_mp=%1.3f.mat', ...
+   #       genome_info.basedir, PAR.insertion_prob, PAR.deletion_prob, PAR.mutation_prob),  ...
+   #    'ValEsts', 'ValExon', 'Val', 'ValAcc', 'ValDon') ;
+   #  end
+   #  
+   #  Sequences = Val ;      % dna sequences
+   #  Acceptors = ValAcc ;   % acceptor scores
+   #  Donors    = ValDon ;   % donor scores
+   #  Exons     = ValExon ;  % exon boundaries
+   #  Ests      = ValEsts ;  % est sequences
+
+
+
+   #elif expt == 'test':
+   #  fprintf('Loading test data\n') ;
+   #  if PAR.microexon,
+   #    if PAR.LOCAL_ALIGN
+   #      %local version
+   #      load(sprintf('%s/microexon_test_data_cut_local_ip=%1.3f_dp=%1.3f_mp=%1.3f.mat', ...
+   #         genome_info.basedir, PAR.insertion_prob, PAR.deletion_prob, PAR.mutation_prob), ...
+   #      'TestEsts', 'TestExon', 'Test', 'TestAcc', 'TestDon') ;
+   #    else
+   #      %global version
+   #      load(sprintf('%s/microexon_test_data_cut_ip=%1.3f_dp=%1.3f_mp=%1.3f.mat', ...
+   #         genome_info.basedir, PAR.insertion_prob, PAR.deletion_prob, PAR.mutation_prob),  ...
+   #      'TestEsts', 'TestExon', 'Test','TestAcc', 'TestDon', 'TestNoise') ;
+   #      Noises    = TestNoise ; % substitution matrix
+   #      end
+   #      else
+   #    load(sprintf('%s/exons_test_ip=%1.3f_dp=%1.3f_mp=%1.3f.mat', ...
+   #       genome_info.basedir, PAR.insertion_prob, PAR.deletion_prob, PAR.mutation_prob), ...
+   #    'TestEsts', 'TestExon', 'Test', 'TestAcc', 'TestDon') ;
+   #  end
+   #  
+   #  Sequences = Test ;      % dna sequences
+   #  Acceptors = TestAcc ;   % acceptor scores
+   #  Donors    = TestDon ;   % donor scores
+   #  Exons     = TestExon ;  % exon boundaries
+   #  Ests      = TestEsts ;  % est sequences
+
+   # Lower all indices by one to convert matlab 
+   # to python indices
+
+   Exons -= 1
+
+   return Sequences, Acceptors, Donors, Exons, Ests, Noises
index fef599c..586a5fd 100644 (file)
@@ -1,124 +1,4 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
-import io_pickle
-import scipy.io
-import pdb
 
-def paths_load_data(expt,genome_info,PAR):
-   """
-      
-   """
-
-   # function [Sequences, Acceptors, Donors, Exons, Ests, Noises] = paths_load_data(expt,genome_info,PAR)
-   # Load the relevant file and return the alignment data
-
-   # expt can be 'training','validation' or 'test'
-
-   assert expt in ['training','validation','test']
-
-   tmp_dir = '/fml/ag-raetsch/home/fabio/tmp'
-
-   Noises = [];
-
-   if expt == 'training':
-      if PAR.microexon:
-         if PAR.LOCAL_ALIGN: # local version
-
-            train_data = '%s/microexon_train_data_cut_local.mat' % genome_info.basedir
-            train_data = '%s/microexon_train_data.mat' % genome_info.basedir
-            #train_data_pickle = '%s/microexon_train_data_cut_local.pickle'% tmp_dir
-            #io_pickle.convert_v6(train_data,train_data_pickle)
-            #train_data = io_pickle.load(train_data_pickle)
-            data = scipy.io.loadmat(train_data)
-
-         else: # global version
-
-            train_data = '%s/microexon_train_data_cut_ip=%1.3f_dp=%1.3f_mp=%1.3f.mat' %\
-               (genome_info.basedir, PAR.insertion_prob, PAR.deletion_prob, PAR.mutation_prob)
-
-            train_data = '%s/microexon_train_data.mat' % genome_info.basedir
-            #train_data_pickle = '%s/microexon_train_data_cut_ip=%1.3f_dp=%1.3f_mp=%1.3f.pickle' %\
-            #   (tmp_dir, PAR.insertion_prob, PAR.deletion_prob, PAR.mutation_prob)
-
-            #io_pickle.convert_v6(train_data,train_data_pickle)
-            #train_data = io_pickle.load(train_data_pickle)
-            data = scipy.io.loadmat(train_data)
-            Noises = data['TrainNoise'] # substitution matrix
-
-      else:
-         train_data = '%s/exons_train_local.mat' % genome_info.basedir
-         #train_data_pickle = '%s/exons_train_local.pickle'% tmp_dir
-         #io_pickle.convert_v6(train_data,train_data_pickle)
-         #microexon_train_data = io_pickle.load(train_data_pickle)
-         data = scipy.io.loadmat(train_data)
-
-      print 'train_data is %s' % train_data
-
-      Sequences = data['Train']       # dna sequences
-      Acceptors = data['TrainAcc']    # acceptor scores
-      Donors    = data['TrainDon']    # donor scores
-      Exons     = data['TrainExon']   # exon boundaries
-      Ests      = data['TrainEsts']   # est sequences
-     
-   #elif expt == 'validation':
-   #  print('Loading validation data\n') ;
-   #  if PAR.microexon,
-   #    if PAR.LOCAL_ALIGN
-   #      %local version
-   #      load(sprintf('%s/microexon_val_data_cut_local_ip=%1.3f_dp=%1.3f_mp=%1.3f.mat', ...
-   #         genome_info.basedir, PAR.insertion_prob, PAR.deletion_prob, PAR.mutation_prob),  ...
-   #      'ValEsts', 'ValExon', 'Val', 'ValAcc', 'ValDon') ;
-   #    else
-   #      %global version
-   #      load(sprintf('%s/microexon_val_data_cut_ip=%1.3f_dp=%1.3f_mp=%1.3f.mat', ...
-   #         genome_info.basedir, PAR.insertion_prob, PAR.deletion_prob, PAR.mutation_prob),  ...
-   #      'ValEsts', 'ValExon', 'Val', 'ValAcc', 'ValDon') ;
-   #    end
-   #  else
-   #    load(sprintf('%s/exons_val_ip=%1.3f_dp=%1.3f_mp=%1.3f.mat', ...
-   #       genome_info.basedir, PAR.insertion_prob, PAR.deletion_prob, PAR.mutation_prob),  ...
-   #    'ValEsts', 'ValExon', 'Val', 'ValAcc', 'ValDon') ;
-   #  end
-   #  
-   #  Sequences = Val ;      % dna sequences
-   #  Acceptors = ValAcc ;   % acceptor scores
-   #  Donors    = ValDon ;   % donor scores
-   #  Exons     = ValExon ;  % exon boundaries
-   #  Ests      = ValEsts ;  % est sequences
-
-
-
-   #elif expt == 'test':
-   #  fprintf('Loading test data\n') ;
-   #  if PAR.microexon,
-   #    if PAR.LOCAL_ALIGN
-   #      %local version
-   #      load(sprintf('%s/microexon_test_data_cut_local_ip=%1.3f_dp=%1.3f_mp=%1.3f.mat', ...
-   #         genome_info.basedir, PAR.insertion_prob, PAR.deletion_prob, PAR.mutation_prob), ...
-   #      'TestEsts', 'TestExon', 'Test', 'TestAcc', 'TestDon') ;
-   #    else
-   #      %global version
-   #      load(sprintf('%s/microexon_test_data_cut_ip=%1.3f_dp=%1.3f_mp=%1.3f.mat', ...
-   #         genome_info.basedir, PAR.insertion_prob, PAR.deletion_prob, PAR.mutation_prob),  ...
-   #      'TestEsts', 'TestExon', 'Test','TestAcc', 'TestDon', 'TestNoise') ;
-   #      Noises    = TestNoise ; % substitution matrix
-   #      end
-   #      else
-   #    load(sprintf('%s/exons_test_ip=%1.3f_dp=%1.3f_mp=%1.3f.mat', ...
-   #       genome_info.basedir, PAR.insertion_prob, PAR.deletion_prob, PAR.mutation_prob), ...
-   #    'TestEsts', 'TestExon', 'Test', 'TestAcc', 'TestDon') ;
-   #  end
-   #  
-   #  Sequences = Test ;      % dna sequences
-   #  Acceptors = TestAcc ;   % acceptor scores
-   #  Donors    = TestDon ;   % donor scores
-   #  Exons     = TestExon ;  % exon boundaries
-   #  Ests      = TestEsts ;  % est sequences
-
-   # Lower all indices by one to convert matlab 
-   # to python indices
-
-   Exons -= 1
-
-   return Sequences, Acceptors, Donors, Exons, Ests, Noises
index 112ec5f..586a5fd 100644 (file)
@@ -1,50 +1,4 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
-import cPickle
 
-def paths_load_data_pickle(expt,genome_info,PAR):
-   """
-      
-   """
-
-   # function [Sequences, Acceptors, Donors, Exons, Ests, Noises] = paths_load_data(expt,genome_info,PAR)
-   # Load the relevant file and return the alignment data
-
-   # expt can be 'training','validation' or 'test'
-
-   assert expt in ['training','validation','test']
-
-   tmp_dir = '/fml/ag-raetsch/home/fabio/tmp'
-
-   Noises = [];
-
-   if expt == 'training':
-      if PAR.microexon:
-         if PAR.LOCAL_ALIGN: # local version
-
-            train_data = '%s/microexon_train_data.pickle' % genome_info.basedir
-            data = cPickle.load(open(train_data))
-
-         else: # global version
-            pass
-
-
-      else:
-         train_data = '%s/exons_train_local.pickle' % genome_info.basedir
-         data = cPickle.load(open(train_data))
-
-      print 'train_data is %s' % train_data
-
-      Sequences = data['Train']       # dna sequences
-      Acceptors = data['TrainAcc']    # acceptor scores
-      Donors    = data['TrainDon']    # donor scores
-      Exons     = data['TrainExon']   # exon boundaries
-      Ests      = data['TrainEsts']   # est sequences
-     
-   # Lower all indices by one to convert matlab 
-   # to python indices
-
-   Exons -= 1
-
-   return Sequences, Acceptors, Donors, Exons, Ests, Noises
diff --git a/scripts/qpalma.py b/scripts/qpalma.py
deleted file mode 100644 (file)
index 412a67b..0000000
+++ /dev/null
@@ -1,444 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-###########################################################
-#
-# 
-#
-###########################################################
-
-import sys
-import subprocess
-import scipy.io
-import pdb
-import os.path
-
-from numpy.matlib import mat,zeros,ones,inf
-from numpy.linalg import norm
-
-import QPalmaDP
-from SIQP_CPX import SIQPSolver
-
-from paths_load_data_pickle import *
-from paths_load_data_solexa import *
-
-from generateEvaluationData import *
-
-from computeSpliceWeights import *
-from set_param_palma import *
-from computeSpliceAlignWithQuality import *
-from penalty_lookup_new import *
-from compute_donacc import *
-from TrainingParam import Param
-from export_param import *
-
-import Configuration
-from Plif import Plf
-from Helpers import *
-
-def getQualityFeatureCounts(qualityPlifs):
-   weightQuality = qualityPlifs[0].penalties
-   for currentPlif in qualityPlifs[1:]:
-      weightQuality = numpy.vstack([weightQuality, currentPlif.penalties])
-
-   return weightQuality 
-
-class QPalma:
-   """
-   A training method for the QPalma project
-   """
-   
-   def __init__(self):
-      self.ARGS = Param()
-
-      self.logfh = open('qpalma.log','w+')
-      gen_file= '%s/genome.config' % self.ARGS.basedir
-
-      ginfo_filename = 'genome_info.pickle'
-      self.genome_info = fetch_genome_info(ginfo_filename)
-
-      self.plog('genome_info.basedir is %s\n'%self.genome_info.basedir)
-
-      #self.ARGS.train_with_splicesitescoreinformation = False
-
-   def plog(self,string):
-      self.logfh.write(string)
-      self.logfh.flush()
-
-   def run(self):
-      # Load the whole dataset 
-      if Configuration.mode == 'normal':
-         #Sequences, Acceptors, Donors, Exons, Ests, Noises = paths_load_data_pickle('training',self.genome_info,self.ARGS)
-         Sequences, Acceptors, Donors, Exons, Ests, Qualities = loadArtificialData(1000)
-         use_quality_scores = False
-      elif Configuration.mode == 'using_quality_scores':
-         Sequences, Acceptors, Donors, Exons, Ests, Qualities = paths_load_data_solexa('training',self.genome_info,self.ARGS)
-
-         #Sequences, Acceptors, Donors, Exons, Ests, Qualities = loadArtificialData(1000)
-
-         #Sequences_, Acceptors_, Donors_, Exons_, Ests_, Qualities_ = generateData(100)
-
-         #Sequences, Acceptors, Donors, Exons, Ests, Noises = paths_load_data_pickle('training',self.genome_info,self.ARGS)
-         #Qualities = []
-         #for i in range(len(Ests)):
-         #   Qualities.append([40]*len(Ests[i]))
-         use_quality_scores = True
-      else:
-         assert(False)
-
-      # number of training instances
-      N = len(Sequences) 
-      self.numExamples = N
-      assert N == len(Acceptors) and N == len(Acceptors) and N == len(Exons)\
-      and N == len(Ests), 'The Seq,Accept,Donor,.. arrays are of different lengths'
-      self.plog('Number of training examples: %d\n'% N)
-      print 'Number of features: %d\n'% Configuration.numFeatures
-
-      iteration_steps         = Configuration.iter_steps ; #upper bound on iteration steps
-      remove_duplicate_scores = Configuration.remove_duplicate_scores 
-      print_matrix            = Configuration.print_matrix 
-      anzpath                 = Configuration.anzpath 
-
-      # Initialize parameter vector  / param = numpy.matlib.rand(126,1)
-      param = Configuration.fixedParam 
-
-      # Set the parameters such as limits penalties for the Plifs
-      [h,d,a,mmatrix,qualityPlifs] = set_param_palma(param,self.ARGS.train_with_intronlengthinformation)
-
-      # delete splicesite-score-information
-      if not self.ARGS.train_with_splicesitescoreinformation:
-         for i in range(len(Acceptors)):
-            if Acceptors[i] > -20:
-               Acceptors[i] = 1
-            if Donors[i] >-20:
-               Donors[i] = 1
-
-      # Initialize solver 
-      if Configuration.USE_OPT:
-         self.plog('Initializing problem...\n')
-         solver = SIQPSolver(Configuration.numFeatures,self.numExamples,Configuration.C,self.logfh)
-
-      # stores the number of alignments done for each example (best path, second-best path etc.)
-      num_path = [anzpath]*N 
-      # stores the gap for each example
-      gap      = [0.0]*N
-
-      #############################################################################################
-      # Training
-      #############################################################################################
-      self.plog('Starting training...\n')
-
-      donSP       = Configuration.numDonSuppPoints
-      accSP       = Configuration.numAccSuppPoints
-      lengthSP    = Configuration.numLengthSuppPoints
-      mmatrixSP   = Configuration.sizeMatchmatrix[0]\
-      *Configuration.sizeMatchmatrix[1]
-      numq        = Configuration.numQualSuppPoints
-      totalQualSP = Configuration.totalQualSuppPoints
-
-      currentPhi = zeros((Configuration.numFeatures,1))
-      totalQualityPenalties = zeros((totalQualSP,1))
-
-      iteration_nr = 0
-      param_idx = 0
-      const_added_ctr = 0
-      while True:
-         if iteration_nr == iteration_steps:
-            break
-
-         for exampleIdx in range(self.numExamples):
-
-            if (exampleIdx%10) == 0:
-               print 'Current example nr %d' % exampleIdx
-
-            dna = Sequences[exampleIdx] 
-            est = Ests[exampleIdx] 
-
-            if Configuration.mode == 'normal':
-               quality = [40]*len(est)
-
-            if Configuration.mode == 'using_quality_scores':
-               quality = Qualities[exampleIdx]
-
-            exons = Exons[exampleIdx] 
-            # NoiseMatrix = Noises[exampleIdx] 
-            don_supp = Donors[exampleIdx] 
-            acc_supp = Acceptors[exampleIdx] 
-
-            if exons[-1,1] > len(dna):
-               continue
-
-            # Berechne die Parameter des wirklichen Alignments (but with untrained d,a,h ...)    
-            trueSpliceAlign, trueWeightMatch, trueWeightQuality = computeSpliceAlignWithQuality(dna, exons)
-            
-            # Calculate the weights
-            trueWeightDon, trueWeightAcc, trueWeightIntron = computeSpliceWeights(d, a, h, trueSpliceAlign, don_supp, acc_supp)
-            trueWeight = numpy.vstack([trueWeightIntron, trueWeightDon, trueWeightAcc, trueWeightMatch, trueWeightQuality])
-
-            currentPhi[0:donSP]                                               = mat(d.penalties[:]).reshape(donSP,1)
-            currentPhi[donSP:donSP+accSP]                                     = mat(a.penalties[:]).reshape(accSP,1)
-            currentPhi[donSP+accSP:donSP+accSP+lengthSP]                      = mat(h.penalties[:]).reshape(lengthSP,1)
-            currentPhi[donSP+accSP+lengthSP:donSP+accSP+lengthSP+mmatrixSP]   = mmatrix[:]
-
-            if Configuration.mode == 'using_quality_scores':
-               totalQualityPenalties = param[-totalQualSP:]
-               currentPhi[donSP+accSP+lengthSP+mmatrixSP:]                    = totalQualityPenalties[:]
-
-            # Calculate w'phi(x,y) the total score of the alignment
-            trueAlignmentScore = (trueWeight.T * currentPhi)[0,0]
-
-            # The allWeights vector is supposed to store the weight parameter
-            # of the true alignment as well as the weight parameters of the
-            # num_path[exampleIdx] other alignments
-            allWeights = zeros((Configuration.numFeatures,num_path[exampleIdx]+1))
-            allWeights[:,0] = trueWeight[:,0]
-
-            AlignmentScores = [0.0]*(num_path[exampleIdx]+1)
-            AlignmentScores[0] = trueAlignmentScore
-
-            ################## Calculate wrong alignment(s) ######################
-
-            # Compute donor, acceptor with penalty_lookup_new
-            # returns two double lists
-            donor, acceptor = compute_donacc(don_supp, acc_supp, d, a)
-
-            #myalign wants the acceptor site on the g of the ag
-            acceptor = acceptor[1:]
-            acceptor.append(-inf)
-
-            # for now we don't use donor/acceptor scores
-
-            #donor = [-inf] * len(donor)
-            #acceptor = [-inf] * len(donor)
-
-            dna = str(dna)
-            est = str(est)
-            dna_len = len(dna)
-            est_len = len(est)
-
-            ps = h.convert2SWIG()
-
-            prb = QPalmaDP.createDoubleArrayFromList(quality)
-            chastity = QPalmaDP.createDoubleArrayFromList([.0]*est_len)
-
-            matchmatrix = QPalmaDP.createDoubleArrayFromList(mmatrix.flatten().tolist()[0])
-            mm_len = Configuration.sizeMatchmatrix[0]*Configuration.sizeMatchmatrix[1]
-
-            d_len = len(donor)
-            donor = QPalmaDP.createDoubleArrayFromList(donor)
-            a_len = len(acceptor)
-            acceptor = QPalmaDP.createDoubleArrayFromList(acceptor)
-
-            # Create the alignment object representing the interface to the C/C++ code.
-            currentAlignment = QPalmaDP.Alignment(Configuration.numQualPlifs,Configuration.numQualSuppPoints, use_quality_scores)
-
-            c_qualityPlifs = QPalmaDP.createPenaltyArrayFromList([elem.convert2SWIG() for elem in qualityPlifs])
-
-            #print 'Calling myalign...'
-            # calculates SpliceAlign, EstAlign, weightMatch, Gesamtscores, dnaest
-            currentAlignment.myalign( num_path[exampleIdx], dna, dna_len,\
-             est, est_len, prb, chastity, ps, matchmatrix, mm_len, donor, d_len,\
-             acceptor, a_len, c_qualityPlifs, remove_duplicate_scores,
-             print_matrix)
-
-            #print 'After calling myalign...'
-            #print 'Calling getAlignmentResults...'
-
-            c_SpliceAlign       = QPalmaDP.createIntArrayFromList([0]*(dna_len*num_path[exampleIdx]))
-            c_EstAlign          = QPalmaDP.createIntArrayFromList([0]*(est_len*num_path[exampleIdx]))
-            c_WeightMatch       = QPalmaDP.createIntArrayFromList([0]*(mm_len*num_path[exampleIdx]))
-            c_DPScores   = QPalmaDP.createDoubleArrayFromList([.0]*num_path[exampleIdx])
-
-            c_qualityPlifsFeatures = QPalmaDP.createDoubleArrayFromList([.0]*(Configuration.totalQualSuppPoints*num_path[exampleIdx]))
-
-            currentAlignment.getAlignmentResults(c_SpliceAlign, c_EstAlign,\
-            c_WeightMatch, c_DPScores, c_qualityPlifsFeatures)
-
-            #print 'After calling getAlignmentResults...'
-
-            newSpliceAlign = zeros((num_path[exampleIdx]*dna_len,1))
-            newEstAlign    = zeros((est_len*num_path[exampleIdx],1))
-            newWeightMatch = zeros((num_path[exampleIdx]*mm_len,1))
-            newDPScores    = zeros((num_path[exampleIdx],1))
-            newQualityPlifsFeatures = zeros((Configuration.totalQualSuppPoints*num_path[exampleIdx],1))
-
-            #print 'newSpliceAlign'
-            for i in range(dna_len*num_path[exampleIdx]):
-               newSpliceAlign[i] = c_SpliceAlign[i]
-            #   print '%f' % (spliceAlign[i])
-
-            #print 'newEstAlign'
-            for i in range(est_len*num_path[exampleIdx]):
-               newEstAlign[i] = c_EstAlign[i]
-            #   print '%f' % (spliceAlign[i])
-
-            #print 'weightMatch'
-            for i in range(mm_len*num_path[exampleIdx]):
-               newWeightMatch[i] = c_WeightMatch[i]
-            #   print '%f' % (weightMatch[i])
-
-            #print 'ViterbiScores'
-            for i in range(num_path[exampleIdx]):
-               newDPScores[i] = c_DPScores[i]
-
-
-            if use_quality_scores:
-               for i in range(Configuration.totalQualSuppPoints*num_path[exampleIdx]):
-                  newQualityPlifsFeatures[i] = c_qualityPlifsFeatures[i]
-
-            #  equals palma up to here
-
-            #print "Calling destructors"
-            del c_SpliceAlign
-            del c_EstAlign
-            del c_WeightMatch
-            del c_DPScores
-            del c_qualityPlifsFeatures
-            del currentAlignment
-
-            newSpliceAlign = newSpliceAlign.reshape(num_path[exampleIdx],dna_len)
-            newWeightMatch = newWeightMatch.reshape(num_path[exampleIdx],mm_len)
-            # Calculate weights of the respective alignments Note that we are
-            # calculating n-best alignments without hamming loss, so we
-            # have to keep track which of the n-best alignments correspond to
-            # the true one in order not to incorporate a true alignment in the
-            # constraints. To keep track of the true and false alignments we
-            # define an array true_map with a boolean indicating the
-            # equivalence to the true alignment for each decoded alignment.
-            true_map = [0]*(num_path[exampleIdx]+1)
-            true_map[0] = 1
-            path_loss   = [0]*(num_path[exampleIdx])
-
-            for pathNr in range(num_path[exampleIdx]):
-               weightDon, weightAcc, weightIntron = computeSpliceWeights(d, a, h, newSpliceAlign[pathNr,:].flatten().tolist()[0], don_supp, acc_supp)
-
-               decodedQualityFeatures = zeros((Configuration.totalQualSuppPoints,1))
-               for qidx in range(Configuration.totalQualSuppPoints):
-                  decodedQualityFeatures[qidx] = newQualityPlifsFeatures[(pathNr*Configuration.totalQualSuppPoints)+qidx]
-
-               #pdb.set_trace()
-
-               path_loss[pathNr] = 0
-               # sum up positionwise loss between alignments
-               for alignPosIdx in range(newSpliceAlign[pathNr,:].shape[1]):
-                  if newSpliceAlign[pathNr,alignPosIdx] != trueSpliceAlign[alignPosIdx]:
-                     path_loss[pathNr] += 1
-
-               # Gewichte in restliche Zeilen der Matrix speichern
-               wp = numpy.vstack([weightIntron, weightDon, weightAcc, newWeightMatch[pathNr,:].T, decodedQualityFeatures])
-               allWeights[:,pathNr+1] = wp
-
-               hpen = mat(h.penalties).reshape(len(h.penalties),1)
-               dpen = mat(d.penalties).reshape(len(d.penalties),1)
-               apen = mat(a.penalties).reshape(len(a.penalties),1)
-               features = numpy.vstack([hpen, dpen, apen, mmatrix[:], totalQualityPenalties])
-
-               AlignmentScores[pathNr+1] = (allWeights[:,pathNr+1].T * features)[0,0]
-
-               # Check wether scalar product + loss equals viterbi score
-               print 'Example nr.: %d, path nr. %d, scores: %f vs %f' % (exampleIdx,pathNr,newDPScores[pathNr,0], AlignmentScores[pathNr+1])
-
-               distinct_scores = False
-               if math.fabs(AlignmentScores[pathNr] - AlignmentScores[pathNr+1]) > 1e-5:
-                  distinct_scores = True
-               
-               #if not math.fabs(newDPScores[pathNr,0] - AlignmentScores[pathNr+1]) + [0,1][distinct_scores and (pathNr>0)] <= 1e-5:
-               if not math.fabs(newDPScores[pathNr,0] - AlignmentScores[pathNr+1]) <= 1e-5:
-                  pdb.set_trace()
-
-               #  # if the pathNr-best alignment is very close to the true alignment consider it as true
-               if norm( allWeights[:,0] - allWeights[:,pathNr+1] ) < 1e-5:
-                  true_map[pathNr+1] = 1
-
-               # assert AlignmentScores[0] > max(AlignmentScores[1:]) + 1e-6, pdb.set_trace()
-
-               # the true label sequence should not have a larger score than the maximal one WHYYYYY?
-               # this means that all n-best paths are to close to each other 
-               # we have to extend the n-best search to a (n+1)-best
-               if len([elem for elem in true_map if elem == 1]) == len(true_map):
-                  num_path[exampleIdx] = num_path[exampleIdx]+1
-
-               # Choose true and first false alignment for extending A
-               firstFalseIdx = -1
-               for map_idx,elem in enumerate(true_map):
-                  if elem == 0:
-                     firstFalseIdx = map_idx
-                     break
-
-               # if there is at least one useful false alignment add the
-               # corresponding constraints to the optimization problem
-               if firstFalseIdx != -1:
-                  trueWeights       = allWeights[:,0]
-                  firstFalseWeights = allWeights[:,firstFalseIdx]
-                  differenceVector  = trueWeights - firstFalseWeights
-                  #pdb.set_trace()
-
-                  if Configuration.USE_OPT:
-                     const_added = solver.addConstraint(differenceVector, exampleIdx)
-                     const_added_ctr += 1
-               #
-               # end of one example processing 
-               #
-
-            # call solver every nth example //added constraint
-            if exampleIdx != 0 and exampleIdx % 20 == 0 and Configuration.USE_OPT:
-               objValue,w,self.slacks = solver.solve()
-      
-               print "objValue is %f" % objValue
-
-               sum_xis = 0
-               for elem in self.slacks:
-                  sum_xis +=  elem
-   
-               for i in range(len(param)):
-                  param[i] = w[i]
-
-               #pdb.set_trace()
-               cPickle.dump(param,open('param_%d.pickle'%param_idx,'w+'))
-               param_idx += 1
-               [h,d,a,mmatrix,qualityPlifs] = set_param_palma(param,self.ARGS.train_with_intronlengthinformation)
-
-         #
-         # end of one iteration through all examples
-         #
-         iteration_nr += 1
-
-      #
-      # end of optimization 
-      #  
-      print 'Training completed'
-
-      pa = para()
-      pa.h = h
-      pa.d = d
-      pa.a = a
-      pa.mmatrix = mmatrix
-      pa.qualityPlifs = qualityPlifs
-
-      cPickle.dump(param,open('param_%d.pickle'%param_idx,'w+'))
-      #cPickle.dump(pa,open('elegans.param','w+'))
-      self.logfh.close()
-
-def fetch_genome_info(ginfo_filename):
-   if not os.path.exists(ginfo_filename):
-      cmd = ['']*4
-      cmd[0] = 'addpath /fml/ag-raetsch/home/fabio/svn/tools/utils'
-      cmd[1] = 'addpath /fml/ag-raetsch/home/fabio/svn/tools/genomes'
-      cmd[2] = 'genome_info = init_genome(\'%s\')' % gen_file
-      cmd[3] = 'save genome_info.mat genome_info'  
-      full_cmd = "matlab -nojvm -nodisplay -r \"%s; %s; %s; %s; exit\"" % (cmd[0],cmd[1],cmd[2],cmd[3])
-
-      obj = subprocess.Popen(full_cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
-      out,err = obj.communicate()
-      assert err == '', 'An error occured!\n%s'%err
-
-      ginfo = scipy.io.loadmat('genome_info.mat')
-      cPickle.dump(self.genome_info,open(ginfo_filename,'w+'))
-      return ginfo['genome_info']
-
-   else:
-      return cPickle.load(open(ginfo_filename))
-
-if __name__ == '__main__':
-   qpalma = QPalma()
-   qpalma.run()
index db3c1cc..771d4d6 100644 (file)
@@ -19,8 +19,7 @@ from numpy.linalg import norm
 import QPalmaDP
 from SIQP_CPX import SIQPSolver
 
-from paths_load_data_pickle import *
-from paths_load_data_solexa import *
+from qpalma.DataProc import *
 
 from generateEvaluationData import *
 
diff --git a/scripts/qpalma_train.py b/scripts/qpalma_train.py
new file mode 100644 (file)
index 0000000..766ca0e
--- /dev/null
@@ -0,0 +1,443 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+###########################################################
+#
+# 
+#
+###########################################################
+
+import sys
+import subprocess
+import scipy.io
+import pdb
+import os.path
+
+from numpy.matlib import mat,zeros,ones,inf
+from numpy.linalg import norm
+
+import QPalmaDP
+from SIQP_CPX import SIQPSolver
+
+from qpalma.DataProc import *
+
+from generateEvaluationData import *
+
+from computeSpliceWeights import *
+from set_param_palma import *
+from computeSpliceAlignWithQuality import *
+from penalty_lookup_new import *
+from compute_donacc import *
+from TrainingParam import Param
+from export_param import *
+
+import Configuration
+from Plif import Plf
+from Helpers import *
+
+def getQualityFeatureCounts(qualityPlifs):
+   weightQuality = qualityPlifs[0].penalties
+   for currentPlif in qualityPlifs[1:]:
+      weightQuality = numpy.vstack([weightQuality, currentPlif.penalties])
+
+   return weightQuality 
+
+class QPalma:
+   """
+   A training method for the QPalma project
+   """
+   
+   def __init__(self):
+      self.ARGS = Param()
+
+      self.logfh = open('qpalma.log','w+')
+      gen_file= '%s/genome.config' % self.ARGS.basedir
+
+      ginfo_filename = 'genome_info.pickle'
+      self.genome_info = fetch_genome_info(ginfo_filename)
+
+      self.plog('genome_info.basedir is %s\n'%self.genome_info.basedir)
+
+      #self.ARGS.train_with_splicesitescoreinformation = False
+
+   def plog(self,string):
+      self.logfh.write(string)
+      self.logfh.flush()
+
+   def run(self):
+      # Load the whole dataset 
+      if Configuration.mode == 'normal':
+         #Sequences, Acceptors, Donors, Exons, Ests, Noises = paths_load_data_pickle('training',self.genome_info,self.ARGS)
+         Sequences, Acceptors, Donors, Exons, Ests, Qualities = loadArtificialData(1000)
+         use_quality_scores = False
+      elif Configuration.mode == 'using_quality_scores':
+         Sequences, Acceptors, Donors, Exons, Ests, Qualities = paths_load_data_solexa('training',self.genome_info,self.ARGS)
+
+         #Sequences, Acceptors, Donors, Exons, Ests, Qualities = loadArtificialData(1000)
+
+         #Sequences_, Acceptors_, Donors_, Exons_, Ests_, Qualities_ = generateData(100)
+
+         #Sequences, Acceptors, Donors, Exons, Ests, Noises = paths_load_data_pickle('training',self.genome_info,self.ARGS)
+         #Qualities = []
+         #for i in range(len(Ests)):
+         #   Qualities.append([40]*len(Ests[i]))
+         use_quality_scores = True
+      else:
+         assert(False)
+
+      # number of training instances
+      N = len(Sequences) 
+      self.numExamples = N
+      assert N == len(Acceptors) and N == len(Acceptors) and N == len(Exons)\
+      and N == len(Ests), 'The Seq,Accept,Donor,.. arrays are of different lengths'
+      self.plog('Number of training examples: %d\n'% N)
+      print 'Number of features: %d\n'% Configuration.numFeatures
+
+      iteration_steps         = Configuration.iter_steps ; #upper bound on iteration steps
+      remove_duplicate_scores = Configuration.remove_duplicate_scores 
+      print_matrix            = Configuration.print_matrix 
+      anzpath                 = Configuration.anzpath 
+
+      # Initialize parameter vector  / param = numpy.matlib.rand(126,1)
+      param = Configuration.fixedParam 
+
+      # Set the parameters such as limits penalties for the Plifs
+      [h,d,a,mmatrix,qualityPlifs] = set_param_palma(param,self.ARGS.train_with_intronlengthinformation)
+
+      # delete splicesite-score-information
+      if not self.ARGS.train_with_splicesitescoreinformation:
+         for i in range(len(Acceptors)):
+            if Acceptors[i] > -20:
+               Acceptors[i] = 1
+            if Donors[i] >-20:
+               Donors[i] = 1
+
+      # Initialize solver 
+      if Configuration.USE_OPT:
+         self.plog('Initializing problem...\n')
+         solver = SIQPSolver(Configuration.numFeatures,self.numExamples,Configuration.C,self.logfh)
+
+      # stores the number of alignments done for each example (best path, second-best path etc.)
+      num_path = [anzpath]*N 
+      # stores the gap for each example
+      gap      = [0.0]*N
+
+      #############################################################################################
+      # Training
+      #############################################################################################
+      self.plog('Starting training...\n')
+
+      donSP       = Configuration.numDonSuppPoints
+      accSP       = Configuration.numAccSuppPoints
+      lengthSP    = Configuration.numLengthSuppPoints
+      mmatrixSP   = Configuration.sizeMatchmatrix[0]\
+      *Configuration.sizeMatchmatrix[1]
+      numq        = Configuration.numQualSuppPoints
+      totalQualSP = Configuration.totalQualSuppPoints
+
+      currentPhi = zeros((Configuration.numFeatures,1))
+      totalQualityPenalties = zeros((totalQualSP,1))
+
+      iteration_nr = 0
+      param_idx = 0
+      const_added_ctr = 0
+      while True:
+         if iteration_nr == iteration_steps:
+            break
+
+         for exampleIdx in range(self.numExamples):
+
+            if (exampleIdx%10) == 0:
+               print 'Current example nr %d' % exampleIdx
+
+            dna = Sequences[exampleIdx] 
+            est = Ests[exampleIdx] 
+
+            if Configuration.mode == 'normal':
+               quality = [40]*len(est)
+
+            if Configuration.mode == 'using_quality_scores':
+               quality = Qualities[exampleIdx]
+
+            exons = Exons[exampleIdx] 
+            # NoiseMatrix = Noises[exampleIdx] 
+            don_supp = Donors[exampleIdx] 
+            acc_supp = Acceptors[exampleIdx] 
+
+            if exons[-1,1] > len(dna):
+               continue
+
+            # Berechne die Parameter des wirklichen Alignments (but with untrained d,a,h ...)    
+            trueSpliceAlign, trueWeightMatch, trueWeightQuality = computeSpliceAlignWithQuality(dna, exons)
+            
+            # Calculate the weights
+            trueWeightDon, trueWeightAcc, trueWeightIntron = computeSpliceWeights(d, a, h, trueSpliceAlign, don_supp, acc_supp)
+            trueWeight = numpy.vstack([trueWeightIntron, trueWeightDon, trueWeightAcc, trueWeightMatch, trueWeightQuality])
+
+            currentPhi[0:donSP]                                               = mat(d.penalties[:]).reshape(donSP,1)
+            currentPhi[donSP:donSP+accSP]                                     = mat(a.penalties[:]).reshape(accSP,1)
+            currentPhi[donSP+accSP:donSP+accSP+lengthSP]                      = mat(h.penalties[:]).reshape(lengthSP,1)
+            currentPhi[donSP+accSP+lengthSP:donSP+accSP+lengthSP+mmatrixSP]   = mmatrix[:]
+
+            if Configuration.mode == 'using_quality_scores':
+               totalQualityPenalties = param[-totalQualSP:]
+               currentPhi[donSP+accSP+lengthSP+mmatrixSP:]                    = totalQualityPenalties[:]
+
+            # Calculate w'phi(x,y) the total score of the alignment
+            trueAlignmentScore = (trueWeight.T * currentPhi)[0,0]
+
+            # The allWeights vector is supposed to store the weight parameter
+            # of the true alignment as well as the weight parameters of the
+            # num_path[exampleIdx] other alignments
+            allWeights = zeros((Configuration.numFeatures,num_path[exampleIdx]+1))
+            allWeights[:,0] = trueWeight[:,0]
+
+            AlignmentScores = [0.0]*(num_path[exampleIdx]+1)
+            AlignmentScores[0] = trueAlignmentScore
+
+            ################## Calculate wrong alignment(s) ######################
+
+            # Compute donor, acceptor with penalty_lookup_new
+            # returns two double lists
+            donor, acceptor = compute_donacc(don_supp, acc_supp, d, a)
+
+            #myalign wants the acceptor site on the g of the ag
+            acceptor = acceptor[1:]
+            acceptor.append(-inf)
+
+            # for now we don't use donor/acceptor scores
+
+            #donor = [-inf] * len(donor)
+            #acceptor = [-inf] * len(donor)
+
+            dna = str(dna)
+            est = str(est)
+            dna_len = len(dna)
+            est_len = len(est)
+
+            ps = h.convert2SWIG()
+
+            prb = QPalmaDP.createDoubleArrayFromList(quality)
+            chastity = QPalmaDP.createDoubleArrayFromList([.0]*est_len)
+
+            matchmatrix = QPalmaDP.createDoubleArrayFromList(mmatrix.flatten().tolist()[0])
+            mm_len = Configuration.sizeMatchmatrix[0]*Configuration.sizeMatchmatrix[1]
+
+            d_len = len(donor)
+            donor = QPalmaDP.createDoubleArrayFromList(donor)
+            a_len = len(acceptor)
+            acceptor = QPalmaDP.createDoubleArrayFromList(acceptor)
+
+            # Create the alignment object representing the interface to the C/C++ code.
+            currentAlignment = QPalmaDP.Alignment(Configuration.numQualPlifs,Configuration.numQualSuppPoints, use_quality_scores)
+
+            c_qualityPlifs = QPalmaDP.createPenaltyArrayFromList([elem.convert2SWIG() for elem in qualityPlifs])
+
+            #print 'Calling myalign...'
+            # calculates SpliceAlign, EstAlign, weightMatch, Gesamtscores, dnaest
+            currentAlignment.myalign( num_path[exampleIdx], dna, dna_len,\
+             est, est_len, prb, chastity, ps, matchmatrix, mm_len, donor, d_len,\
+             acceptor, a_len, c_qualityPlifs, remove_duplicate_scores,
+             print_matrix)
+
+            #print 'After calling myalign...'
+            #print 'Calling getAlignmentResults...'
+
+            c_SpliceAlign       = QPalmaDP.createIntArrayFromList([0]*(dna_len*num_path[exampleIdx]))
+            c_EstAlign          = QPalmaDP.createIntArrayFromList([0]*(est_len*num_path[exampleIdx]))
+            c_WeightMatch       = QPalmaDP.createIntArrayFromList([0]*(mm_len*num_path[exampleIdx]))
+            c_DPScores   = QPalmaDP.createDoubleArrayFromList([.0]*num_path[exampleIdx])
+
+            c_qualityPlifsFeatures = QPalmaDP.createDoubleArrayFromList([.0]*(Configuration.totalQualSuppPoints*num_path[exampleIdx]))
+
+            currentAlignment.getAlignmentResults(c_SpliceAlign, c_EstAlign,\
+            c_WeightMatch, c_DPScores, c_qualityPlifsFeatures)
+
+            #print 'After calling getAlignmentResults...'
+
+            newSpliceAlign = zeros((num_path[exampleIdx]*dna_len,1))
+            newEstAlign    = zeros((est_len*num_path[exampleIdx],1))
+            newWeightMatch = zeros((num_path[exampleIdx]*mm_len,1))
+            newDPScores    = zeros((num_path[exampleIdx],1))
+            newQualityPlifsFeatures = zeros((Configuration.totalQualSuppPoints*num_path[exampleIdx],1))
+
+            #print 'newSpliceAlign'
+            for i in range(dna_len*num_path[exampleIdx]):
+               newSpliceAlign[i] = c_SpliceAlign[i]
+            #   print '%f' % (spliceAlign[i])
+
+            #print 'newEstAlign'
+            for i in range(est_len*num_path[exampleIdx]):
+               newEstAlign[i] = c_EstAlign[i]
+            #   print '%f' % (spliceAlign[i])
+
+            #print 'weightMatch'
+            for i in range(mm_len*num_path[exampleIdx]):
+               newWeightMatch[i] = c_WeightMatch[i]
+            #   print '%f' % (weightMatch[i])
+
+            #print 'ViterbiScores'
+            for i in range(num_path[exampleIdx]):
+               newDPScores[i] = c_DPScores[i]
+
+
+            if use_quality_scores:
+               for i in range(Configuration.totalQualSuppPoints*num_path[exampleIdx]):
+                  newQualityPlifsFeatures[i] = c_qualityPlifsFeatures[i]
+
+            #  equals palma up to here
+
+            #print "Calling destructors"
+            del c_SpliceAlign
+            del c_EstAlign
+            del c_WeightMatch
+            del c_DPScores
+            del c_qualityPlifsFeatures
+            del currentAlignment
+
+            newSpliceAlign = newSpliceAlign.reshape(num_path[exampleIdx],dna_len)
+            newWeightMatch = newWeightMatch.reshape(num_path[exampleIdx],mm_len)
+            # Calculate weights of the respective alignments Note that we are
+            # calculating n-best alignments without hamming loss, so we
+            # have to keep track which of the n-best alignments correspond to
+            # the true one in order not to incorporate a true alignment in the
+            # constraints. To keep track of the true and false alignments we
+            # define an array true_map with a boolean indicating the
+            # equivalence to the true alignment for each decoded alignment.
+            true_map = [0]*(num_path[exampleIdx]+1)
+            true_map[0] = 1
+            path_loss   = [0]*(num_path[exampleIdx])
+
+            for pathNr in range(num_path[exampleIdx]):
+               weightDon, weightAcc, weightIntron = computeSpliceWeights(d, a, h, newSpliceAlign[pathNr,:].flatten().tolist()[0], don_supp, acc_supp)
+
+               decodedQualityFeatures = zeros((Configuration.totalQualSuppPoints,1))
+               for qidx in range(Configuration.totalQualSuppPoints):
+                  decodedQualityFeatures[qidx] = newQualityPlifsFeatures[(pathNr*Configuration.totalQualSuppPoints)+qidx]
+
+               #pdb.set_trace()
+
+               path_loss[pathNr] = 0
+               # sum up positionwise loss between alignments
+               for alignPosIdx in range(newSpliceAlign[pathNr,:].shape[1]):
+                  if newSpliceAlign[pathNr,alignPosIdx] != trueSpliceAlign[alignPosIdx]:
+                     path_loss[pathNr] += 1
+
+               # Gewichte in restliche Zeilen der Matrix speichern
+               wp = numpy.vstack([weightIntron, weightDon, weightAcc, newWeightMatch[pathNr,:].T, decodedQualityFeatures])
+               allWeights[:,pathNr+1] = wp
+
+               hpen = mat(h.penalties).reshape(len(h.penalties),1)
+               dpen = mat(d.penalties).reshape(len(d.penalties),1)
+               apen = mat(a.penalties).reshape(len(a.penalties),1)
+               features = numpy.vstack([hpen, dpen, apen, mmatrix[:], totalQualityPenalties])
+
+               AlignmentScores[pathNr+1] = (allWeights[:,pathNr+1].T * features)[0,0]
+
+               # Check wether scalar product + loss equals viterbi score
+               print 'Example nr.: %d, path nr. %d, scores: %f vs %f' % (exampleIdx,pathNr,newDPScores[pathNr,0], AlignmentScores[pathNr+1])
+
+               distinct_scores = False
+               if math.fabs(AlignmentScores[pathNr] - AlignmentScores[pathNr+1]) > 1e-5:
+                  distinct_scores = True
+               
+               #if not math.fabs(newDPScores[pathNr,0] - AlignmentScores[pathNr+1]) + [0,1][distinct_scores and (pathNr>0)] <= 1e-5:
+               if not math.fabs(newDPScores[pathNr,0] - AlignmentScores[pathNr+1]) <= 1e-5:
+                  pdb.set_trace()
+
+               #  # if the pathNr-best alignment is very close to the true alignment consider it as true
+               if norm( allWeights[:,0] - allWeights[:,pathNr+1] ) < 1e-5:
+                  true_map[pathNr+1] = 1
+
+               # assert AlignmentScores[0] > max(AlignmentScores[1:]) + 1e-6, pdb.set_trace()
+
+               # the true label sequence should not have a larger score than the maximal one WHYYYYY?
+               # this means that all n-best paths are to close to each other 
+               # we have to extend the n-best search to a (n+1)-best
+               if len([elem for elem in true_map if elem == 1]) == len(true_map):
+                  num_path[exampleIdx] = num_path[exampleIdx]+1
+
+               # Choose true and first false alignment for extending A
+               firstFalseIdx = -1
+               for map_idx,elem in enumerate(true_map):
+                  if elem == 0:
+                     firstFalseIdx = map_idx
+                     break
+
+               # if there is at least one useful false alignment add the
+               # corresponding constraints to the optimization problem
+               if firstFalseIdx != -1:
+                  trueWeights       = allWeights[:,0]
+                  firstFalseWeights = allWeights[:,firstFalseIdx]
+                  differenceVector  = trueWeights - firstFalseWeights
+                  #pdb.set_trace()
+
+                  if Configuration.USE_OPT:
+                     const_added = solver.addConstraint(differenceVector, exampleIdx)
+                     const_added_ctr += 1
+               #
+               # end of one example processing 
+               #
+
+            # call solver every nth example //added constraint
+            if exampleIdx != 0 and exampleIdx % 20 == 0 and Configuration.USE_OPT:
+               objValue,w,self.slacks = solver.solve()
+      
+               print "objValue is %f" % objValue
+
+               sum_xis = 0
+               for elem in self.slacks:
+                  sum_xis +=  elem
+   
+               for i in range(len(param)):
+                  param[i] = w[i]
+
+               #pdb.set_trace()
+               cPickle.dump(param,open('param_%d.pickle'%param_idx,'w+'))
+               param_idx += 1
+               [h,d,a,mmatrix,qualityPlifs] = set_param_palma(param,self.ARGS.train_with_intronlengthinformation)
+
+         #
+         # end of one iteration through all examples
+         #
+         iteration_nr += 1
+
+      #
+      # end of optimization 
+      #  
+      print 'Training completed'
+
+      pa = para()
+      pa.h = h
+      pa.d = d
+      pa.a = a
+      pa.mmatrix = mmatrix
+      pa.qualityPlifs = qualityPlifs
+
+      cPickle.dump(param,open('param_%d.pickle'%param_idx,'w+'))
+      #cPickle.dump(pa,open('elegans.param','w+'))
+      self.logfh.close()
+
+def fetch_genome_info(ginfo_filename):
+   if not os.path.exists(ginfo_filename):
+      cmd = ['']*4
+      cmd[0] = 'addpath /fml/ag-raetsch/home/fabio/svn/tools/utils'
+      cmd[1] = 'addpath /fml/ag-raetsch/home/fabio/svn/tools/genomes'
+      cmd[2] = 'genome_info = init_genome(\'%s\')' % gen_file
+      cmd[3] = 'save genome_info.mat genome_info'  
+      full_cmd = "matlab -nojvm -nodisplay -r \"%s; %s; %s; %s; exit\"" % (cmd[0],cmd[1],cmd[2],cmd[3])
+
+      obj = subprocess.Popen(full_cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
+      out,err = obj.communicate()
+      assert err == '', 'An error occured!\n%s'%err
+
+      ginfo = scipy.io.loadmat('genome_info.mat')
+      cPickle.dump(self.genome_info,open(ginfo_filename,'w+'))
+      return ginfo['genome_info']
+
+   else:
+      return cPickle.load(open(ginfo_filename))
+
+if __name__ == '__main__':
+   qpalma = QPalma()
+   qpalma.run()