+ added raw framework of workflow
authorfabio <fabio@e1793c9e-67f9-0310-80fc-b846ff1f7b36>
Thu, 21 Aug 2008 12:56:52 +0000 (12:56 +0000)
committerfabio <fabio@e1793c9e-67f9-0310-80fc-b846ff1f7b36>
Thu, 21 Aug 2008 12:56:52 +0000 (12:56 +0000)
git-svn-id: http://svn.tuebingen.mpg.de/ag-raetsch/projects/QPalma@10392 e1793c9e-67f9-0310-80fc-b846ff1f7b36

scripts/qpalma_pipeline.py

index 0e2d215..f7614f8 100644 (file)
@@ -9,7 +9,6 @@
 # Written (W) 2008 Fabio De Bona
 # Copyright (C) 2008 Max-Planck-Society
 
-
 #
 # This file contains the main interface to the QPalma pipeline.
 #
@@ -19,9 +18,8 @@
 
 from optparse import OptionParser
 
-
-from qpalma.gridtools import *
-
+from qpalma.gridtools import ApproximationTask,PreprocessingTask
+from qpalma.gridtools import AlignmentTask,PostprocessingTask
 
 def create_option_parser():
    parser = OptionParser()
@@ -50,47 +48,55 @@ class System:
    - setting up the different pipeline modules
    - run the experiment and report the results
 
-   
    """
 
-   # Before creating a candidate spliced read dataset we have to first filter
-   # the matches from the first seed finding run.
+   def __init__(self):
+      """
+      """
+      parser = create_option_parser()
+      (options, args) = parser.parse_args()
+
+
+   def run(self):
+
+      # Before creating a candidate spliced read dataset we have to first filter
+      # the matches from the first seed finding run.
 
-   grid_heuristic()
+      grid_heuristic()
 
-   # approx_task = ApproximationTask(...)
-   # approx_task.createJobs()
-   # approx_task.submit()
-   # approx_task.checkIfTaskFinished()
+      # approx_task = ApproximationTask(...)
+      # approx_task.createJobs()
+      # approx_task.submit()
+      # approx_task.checkIfTaskFinished()
 
-   # After filtering combine the filtered matches from the first run and the
-   # found matches from the second run to a full dataset
+      # After filtering combine the filtered matches from the first run and the
+      # found matches from the second run to a full dataset
 
-   createNewDataset
-   
-   # pre_task = PreprocessingTask(...)
-   # pre_task.createJobs()
-   # pre_task.submit()
+      createNewDataset
+      
+      # pre_task = PreprocessingTask(...)
+      # pre_task.createJobs()
+      # pre_task.submit()
 
-   # Now that we have a dataset we can perform the accurate alignments for this
-   # data
+      # Now that we have a dataset we can perform the accurate alignments for this
+      # data
 
-   grid_predict()
+      grid_predict()
 
-   # align_task = AlignmentTask(...)
-   # align_task.createJobs()
-   # align_task.submit()
+      # align_task = AlignmentTask(...)
+      # align_task.createJobs()
+      # align_task.submit()
 
-   # The results of the above alignment step can be converted to a data format
-   # needed for further postprocessing
+      # The results of the above alignment step can be converted to a data format
+      # needed for further postprocessing
 
-   grid_alignment()
+      grid_alignment()
 
-   # post_task = PostprocessingTask(...)
-   # post_task.createJobs()
-   # post_task.submit()
+      # post_task = PostprocessingTask(...)
+      # post_task.createJobs()
+      # post_task.submit()
 
 
 if __name__ == '__main__':
-   parser = create_option_parser()
-   (options, args) = parser.parse_args()
+   system_obj = System() 
+   system_obj.run()